func TestVaultClient_LookupToken(t *testing.T) { v := testutil.NewTestVault(t).Start() defer v.Stop() logger := log.New(os.Stderr, "", log.LstdFlags) client, err := NewVaultClient(v.Config, logger, nil) if err != nil { t.Fatalf("failed to build vault client: %v", err) } client.SetActive(true) defer client.Stop() waitForConnection(client, t) // Lookup ourselves s, err := client.LookupToken(context.Background(), v.Config.Token) if err != nil { t.Fatalf("self lookup failed: %v", err) } policies, err := PoliciesFrom(s) if err != nil { t.Fatalf("failed to parse policies: %v", err) } expected := []string{"root"} if !reflect.DeepEqual(policies, expected) { t.Fatalf("Unexpected policies; got %v; want %v", policies, expected) } // Create a token with a different set of policies expected = []string{"default"} req := vapi.TokenCreateRequest{ Policies: expected, } s, err = v.Client.Auth().Token().Create(&req) if err != nil { t.Fatalf("failed to create child token: %v", err) } // Get the client token if s == nil || s.Auth == nil { t.Fatalf("bad secret response: %+v", s) } // Lookup new child s, err = client.LookupToken(context.Background(), s.Auth.ClientToken) if err != nil { t.Fatalf("self lookup failed: %v", err) } policies, err = PoliciesFrom(s) if err != nil { t.Fatalf("failed to parse policies: %v", err) } if !reflect.DeepEqual(policies, expected) { t.Fatalf("Unexpected policies; got %v; want %v", policies, expected) } }
func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) { v := testutil.NewTestVault(t) logger := log.New(os.Stderr, "", log.LstdFlags) client, err := NewVaultClient(v.Config, logger, nil) if err != nil { t.Fatalf("failed to build vault client: %v", err) } client.SetActive(true) defer client.Stop() // Create some VaultAccessors vas := []*structs.VaultAccessor{ mock.VaultAccessor(), mock.VaultAccessor(), } if err := client.RevokeTokens(context.Background(), vas, false); err != nil { t.Fatalf("RevokeTokens failed: %v", err) } // Wasn't committed if len(client.revoking) != 0 { t.Fatalf("didn't add to revoke loop") } if err := client.RevokeTokens(context.Background(), vas, true); err != nil { t.Fatalf("RevokeTokens failed: %v", err) } // Was committed if len(client.revoking) != 2 { t.Fatalf("didn't add to revoke loop") } }
func TestInsert(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } s, err := mgo.Dial("") if !assert.NoError(t, err) { return } defer cleanup(s, "testinsert")() h := NewHandler(s, "testinsert", "test") items := []*resource.Item{ { ID: "1234", ETag: "etag", Updated: now, Payload: map[string]interface{}{ "id": "1234", "foo": "bar", }, }, } err = h.Insert(context.Background(), items) assert.NoError(t, err) d := map[string]interface{}{} err = s.DB("testinsert").C("test").FindId("1234").One(&d) if !assert.NoError(t, err) { return } assert.Equal(t, map[string]interface{}{"foo": "bar", "_id": "1234", "_etag": "etag", "_updated": now}, d) // Inserting same item twice should return a conflict error err = h.Insert(context.Background(), items) assert.Equal(t, resource.ErrConflict, err) }
func TestRoutingResolve(t *testing.T) { dstore := dssync.MutexWrap(ds.NewMapDatastore()) serv := mockrouting.NewServer() id := testutil.RandIdentityOrFatal(t) d := serv.ClientWithDatastore(context.Background(), id, dstore) resolver := NewRoutingResolver(d, 0) publisher := NewRoutingPublisher(d, dstore) privk, pubk, err := testutil.RandTestKeyPair(512) if err != nil { t.Fatal(err) } h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") err = publisher.Publish(context.Background(), privk, h) if err != nil { t.Fatal(err) } pubkb, err := pubk.Bytes() if err != nil { t.Fatal(err) } pkhash := u.Hash(pubkb) res, err := resolver.Resolve(context.Background(), key.Key(pkhash).B58String()) if err != nil { t.Fatal(err) } if res != h { t.Fatal("Got back incorrect value.") } }
func TestHandlerPostListWithReference(t *testing.T) { s := mem.NewHandler() s.Insert(context.Background(), []*resource.Item{{ID: "ref", Payload: map[string]interface{}{"id": "ref"}}}) index := resource.NewIndex() index.Bind("foo", schema.Schema{Fields: schema.Fields{"id": {}}}, s, resource.DefaultConf) bar := index.Bind("bar", schema.Schema{Fields: schema.Fields{ "id": {}, "foo": {Validator: &schema.Reference{Path: "foo"}}, }}, s, resource.DefaultConf) r, _ := http.NewRequest("POST", "/bar", bytes.NewBufferString(`{"id": "1", "foo": "ref"}`)) rm := &RouteMatch{ ResourcePath: []*ResourcePathComponent{ &ResourcePathComponent{ Name: "bar", Resource: bar, }, }, } ctx := contextWithIndex(context.Background(), index) status, _, body := listPost(ctx, r, rm) assert.Equal(t, http.StatusCreated, status) if assert.IsType(t, &resource.Item{}, body) { item := body.(*resource.Item) assert.Equal(t, "1", item.ID) } }
func TestSessionIsActive(t *testing.T) { u := test.URL() if u == nil { t.SkipNow() } session := sessionClient(u, t) // Skip test against ESXi -- SessionIsActive is not implemented if session.client.ServiceContent.About.ApiType != "VirtualCenter" { t.Skipf("Talking to %s instead of %s", session.client.ServiceContent.About.ApiType, "VirtualCenter") } err := session.Login(context.Background(), u.User) if err != nil { t.Error("Login Error: ", err) } active, err := session.SessionIsActive(context.Background()) if err != nil || !active { t.Errorf("Expected %t, got %t", true, active) t.Errorf("Expected nil, got %v", err) } session.Logout(context.Background()) active, err = session.SessionIsActive(context.Background()) if err == nil || active { t.Errorf("Expected %t, got %t", false, active) t.Errorf("Expected NotAuthenticated, got %v", err) } }
func TestEnumerateChildren(t *testing.T) { bsi := bstest.Mocks(1) ds := NewDAGService(bsi[0]) read := io.LimitReader(u.NewTimeSeededRand(), 1024*1024) root, err := imp.BuildDagFromReader(ds, chunk.NewSizeSplitter(read, 512)) if err != nil { t.Fatal(err) } set := cid.NewSet() err = EnumerateChildren(context.Background(), ds, root.Cid(), set.Visit, false) if err != nil { t.Fatal(err) } var traverse func(n node.Node) traverse = func(n node.Node) { // traverse dag and check for _, lnk := range n.Links() { c := lnk.Cid if !set.Has(c) { t.Fatal("missing key in set! ", lnk.Cid.String()) } child, err := ds.Get(context.Background(), c) if err != nil { t.Fatal(err) } traverse(child) } } traverse(root) }
func TestAddLink(t *testing.T) { ds := mdtest.Mock() fishnode := dag.NodeWithData([]byte("fishcakes!")) fk, err := ds.Add(fishnode) if err != nil { t.Fatal(err) } nd := new(dag.ProtoNode) nnode, err := addLink(context.Background(), ds, nd, "fish", fishnode) if err != nil { t.Fatal(err) } fnprime, err := nnode.GetLinkedNode(context.Background(), ds, "fish") if err != nil { t.Fatal(err) } fnpkey := fnprime.Cid() if !fnpkey.Equals(fk) { t.Fatal("wrong child node found!") } }
func TestClientOverMax(t *testing.T) { rs := NewServer() k := cid.NewCidV0(u.Hash([]byte("hello"))) numProvidersForHelloKey := 100 for i := 0; i < numProvidersForHelloKey; i++ { pi := testutil.RandIdentityOrFatal(t) err := rs.Client(pi).Provide(context.Background(), k) if err != nil { t.Fatal(err) } } max := 10 pi := testutil.RandIdentityOrFatal(t) client := rs.Client(pi) providersFromClient := client.FindProvidersAsync(context.Background(), k, max) i := 0 for _ = range providersFromClient { i++ } if i != max { t.Fatal("Too many providers returned") } }
func TestClientFindProviders(t *testing.T) { pi := testutil.RandIdentityOrFatal(t) rs := NewServer() client := rs.Client(pi) k := cid.NewCidV0(u.Hash([]byte("hello"))) err := client.Provide(context.Background(), k) if err != nil { t.Fatal(err) } // This is bad... but simulating networks is hard time.Sleep(time.Millisecond * 300) max := 100 providersFromClient := client.FindProvidersAsync(context.Background(), k, max) isInClient := false for pi := range providersFromClient { if pi.ID == pi.ID { isInClient = true } } if !isInClient { t.Fatal("Despite client providing key, client didn't receive peer when finding providers") } }
// NewAuthenticatedClient creates a new vim25.Client, authenticates the user // specified in the test URL, and returns it. func NewAuthenticatedClient(t *testing.T) *vim25.Client { u := URL() if u == nil { t.SkipNow() } soapClient := soap.NewClient(u, true) vimClient, err := vim25.NewClient(context.Background(), soapClient) if err != nil { t.Fatal(err) } req := types.Login{ This: *vimClient.ServiceContent.SessionManager, } req.UserName = u.User.Username() if pw, ok := u.User.Password(); ok { req.Password = pw } _, err = methods.Login(context.Background(), vimClient, &req) if err != nil { t.Fatal(err) } return vimClient }
// scrapeRoute parses and responds to a Scrape by using t.TrackerLogic. func (t *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { var err error start := time.Now() defer recordResponseDuration("scrape", err, time.Since(start)) req, err := ParseScrape(r) if err != nil { WriteError(w, err) return } host, _, err := net.SplitHostPort(r.RemoteAddr) if err != nil { log.Errorln("http: unable to determine remote address for scrape:", err) WriteError(w, err) return } ip := net.ParseIP(host) ctx := context.WithValue(context.Background(), middleware.ScrapeIsIPv6Key, len(ip) == net.IPv6len) resp, err := t.logic.HandleScrape(ctx, req) if err != nil { WriteError(w, err) return } err = WriteScrapeResponse(w, resp) if err != nil { WriteError(w, err) return } go t.logic.AfterScrape(context.Background(), req, resp) }
// announceRoute parses and responds to an Announce by using t.TrackerLogic. func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { var err error start := time.Now() defer recordResponseDuration("announce", err, time.Since(start)) req, err := ParseAnnounce(r, t.RealIPHeader, t.AllowIPSpoofing) if err != nil { WriteError(w, err) return } resp, err := t.logic.HandleAnnounce(context.Background(), req) if err != nil { WriteError(w, err) return } err = WriteAnnounceResponse(w, resp) if err != nil { WriteError(w, err) return } go t.logic.AfterAnnounce(context.Background(), req, resp) }
func TestClear(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } s, err := mgo.Dial("") if !assert.NoError(t, err) { return } defer cleanup(s, "testupdate")() h := NewHandler(s, "testupdate", "test") items := []*resource.Item{ {ID: "1", Payload: map[string]interface{}{"id": "1", "name": "a"}}, {ID: "2", Payload: map[string]interface{}{"id": "2", "name": "b"}}, {ID: "3", Payload: map[string]interface{}{"id": "3", "name": "c"}}, {ID: "4", Payload: map[string]interface{}{"id": "4", "name": "d"}}, } err = h.Insert(context.Background(), items) assert.NoError(t, err) lookup := resource.NewLookupWithQuery(schema.Query{ schema.In{Field: "name", Values: []schema.Value{"c", "d"}}, }) deleted, err := h.Clear(context.Background(), lookup) assert.NoError(t, err) assert.Equal(t, 2, deleted) lookup = resource.NewLookupWithQuery(schema.Query{ schema.Equal{Field: "id", Value: "2"}, }) deleted, err = h.Clear(context.Background(), lookup) assert.NoError(t, err) assert.Equal(t, 1, deleted) }
func TestFromContext(t *testing.T) { assert.Equal(t, NopLogger, FromContext(nil)) assert.Equal(t, NopLogger, FromContext(context.Background())) l := &logger{} ctx := NewContext(context.Background(), l) assert.Equal(t, l, FromContext(ctx)) }
func TestHandlerPostListWithSubSchemaReferenceNotFound(t *testing.T) { s := mem.NewHandler() s.Insert(context.Background(), []*resource.Item{{ID: "ref", Payload: map[string]interface{}{"id": "ref"}}}) index := resource.NewIndex() index.Bind("foo", schema.Schema{Fields: schema.Fields{"id": {}}}, s, resource.DefaultConf) bar := index.Bind("bar", schema.Schema{Fields: schema.Fields{ "id": {}, "sub": { Schema: &schema.Schema{ Fields: schema.Fields{ "foo": {Validator: &schema.Reference{Path: "foo"}}, }, }, }, }}, s, resource.DefaultConf) r, _ := http.NewRequest("POST", "/bar", bytes.NewBufferString(`{"id": "1", "sub": {"foo": "notfound"}}`)) rm := &RouteMatch{ ResourcePath: []*ResourcePathComponent{ &ResourcePathComponent{ Name: "bar", Resource: bar, }, }, } ctx := contextWithIndex(context.Background(), index) status, _, body := listPost(ctx, r, rm) assert.Equal(t, http.StatusNotFound, status) if assert.IsType(t, &Error{}, body) { err := body.(*Error) assert.Equal(t, http.StatusNotFound, err.Code) assert.Equal(t, "Resource reference not found for field `foo'", err.Message) } }
func TestVaultClient_SetActive(t *testing.T) { v := testutil.NewTestVault(t).Start() defer v.Stop() logger := log.New(os.Stderr, "", log.LstdFlags) client, err := NewVaultClient(v.Config, logger, nil) if err != nil { t.Fatalf("failed to build vault client: %v", err) } defer client.Stop() waitForConnection(client, t) // Do a lookup and expect an error about not being active _, err = client.LookupToken(context.Background(), "123") if err == nil || !strings.Contains(err.Error(), "not active") { t.Fatalf("Expected not-active error: %v", err) } client.SetActive(true) // Do a lookup of ourselves _, err = client.LookupToken(context.Background(), v.RootToken) if err != nil { t.Fatalf("Unexpected error: %v", err) } }
func NewGCS(name string, info map[string]string) (Backend, error) { b := &gcsBackend{ name: name, bucketName: info["bucket"], } keyJSON := []byte(info["key"]) if b.bucketName == "" { return nil, fmt.Errorf("blobstore: missing Google Cloud Storage bucket param for %s", name) } if len(keyJSON) == 0 { return nil, fmt.Errorf("blobstore: missing Google Cloud Storage key JSON param for %s", name) } jwtToken, err := google.JWTConfigFromJSON(keyJSON, "https://www.googleapis.com/auth/devstorage.read_write") if err != nil { return nil, fmt.Errorf("blobstore: error loading Google Cloud Storage JSON key: %s", err) } tokenSource := jwtToken.TokenSource(context.Background()) // Test getting an OAuth token so we can disambiguate an issue with the // token and an issue with the bucket permissions below. if _, err := tokenSource.Token(); err != nil { return nil, fmt.Errorf("blobstore: error getting Google Cloud Storage OAuth token: %s", err) } pemBlock, _ := pem.Decode(jwtToken.PrivateKey) privateKey, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes) if err != nil { return nil, fmt.Errorf("blobstore: error decoding Google Cloud Storage private key: %s", err) } rsaPrivateKey, ok := privateKey.(*rsa.PrivateKey) if !ok { return nil, fmt.Errorf("blobstore: unexpected Google Cloud Storage key type: %T", privateKey) } b.signOpts = func() *storage.SignedURLOptions { return &storage.SignedURLOptions{ GoogleAccessID: jwtToken.Email, SignBytes: func(b []byte) ([]byte, error) { digest := sha256.Sum256(b) return rsa.SignPKCS1v15(rand.Reader, rsaPrivateKey, crypto.SHA256, digest[:]) }, Method: "GET", Expires: time.Now().Add(10 * time.Minute), } } client, err := storage.NewClient(context.Background(), option.WithTokenSource(tokenSource)) if err != nil { return nil, fmt.Errorf("blobstore: error creating Google Cloud Storage client: %s", err) } b.bucket = client.Bucket(b.bucketName) _, err = b.bucket.Attrs(context.Background()) if err != nil { return nil, fmt.Errorf("blobstore: error checking Google Cloud Storage bucket %q existence, ensure that it exists and Owner access for %s is included the bucket ACL: %q", b.bucketName, jwtToken.Email, err) } return b, nil }
func TestLimitedStreams(t *testing.T) { mn, err := FullMeshConnected(context.Background(), 2) if err != nil { t.Fatal(err) } var wg sync.WaitGroup messages := 4 messageSize := 500 handler := func(s inet.Stream) { b := make([]byte, messageSize) for i := 0; i < messages; i++ { if _, err := io.ReadFull(s, b); err != nil { log.Fatal(err) } if !bytes.Equal(b[:4], []byte("ping")) { log.Fatal("bytes mismatch") } wg.Done() } s.Close() } hosts := mn.Hosts() for _, h := range mn.Hosts() { h.SetStreamHandler(protocol.TestingID, handler) } peers := mn.Peers() links := mn.LinksBetweenPeers(peers[0], peers[1]) // 1000 byte per second bandwidth bps := float64(1000) opts := links[0].Options() opts.Bandwidth = bps for _, link := range links { link.SetOptions(opts) } ctx := context.Background() s, err := hosts[0].NewStream(ctx, hosts[1].ID(), protocol.TestingID) if err != nil { t.Fatal(err) } filler := make([]byte, messageSize-4) data := append([]byte("ping"), filler...) before := time.Now() for i := 0; i < messages; i++ { wg.Add(1) if _, err := s.Write(data); err != nil { panic(err) } } wg.Wait() if !within(time.Since(before), time.Duration(time.Second*2), time.Second/3) { t.Fatal("Expected 2ish seconds but got ", time.Since(before)) } }
// TaskListener monitors etcd for new tasks func (m *etcdMinion) TaskListener(c chan<- *task.Task) error { log.Printf("Task listener is watching %s\n", m.queueDir) rand.Seed(time.Now().UTC().UnixNano()) b := backoff.Backoff{ Min: 1 * time.Second, Max: 10 * time.Minute, Factor: 2.0, Jitter: true, } watcherOpts := &etcdclient.WatcherOptions{ Recursive: true, } watcher := m.kapi.Watcher(m.queueDir, watcherOpts) for { resp, err := watcher.Next(context.Background()) if err != nil { // Use a backoff and retry later again duration := b.Duration() log.Printf("%s, retrying in %s\n", err, duration) time.Sleep(duration) continue } // Reset the backoff counter on successful receive b.Reset() // Ignore "delete" events when removing a task from the queue action := strings.ToLower(resp.Action) if strings.EqualFold(action, "delete") { continue } // Unmarshal and remove task from the queue t, err := EtcdUnmarshalTask(resp.Node) m.kapi.Delete(context.Background(), resp.Node.Key, nil) if err != nil { log.Printf("Received invalid task %s: %s\n", resp.Node.Key, err) continue } // Send the task for processing log.Printf("Received task %s\n", t.ID) t.State = task.TaskStateQueued t.TimeReceived = time.Now().Unix() if err := m.SaveTaskResult(t); err != nil { log.Printf("Unable to save task state: %s\n", err) continue } c <- t } return nil }
// TODO does dht ensure won't receive self as a provider? probably not. func TestCanceledContext(t *testing.T) { rs := NewServer() k := cid.NewCidV0(u.Hash([]byte("hello"))) // avoid leaking goroutine, without using the context to signal // (we want the goroutine to keep trying to publish on a // cancelled context until we've tested it doesnt do anything.) done := make(chan struct{}) defer func() { done <- struct{}{} }() t.Log("async'ly announce infinite stream of providers for key") i := 0 go func() { // infinite stream for { select { case <-done: t.Log("exiting async worker") return default: } pi, err := testutil.RandIdentity() if err != nil { t.Error(err) } err = rs.Client(pi).Provide(context.Background(), k) if err != nil { t.Error(err) } i++ } }() local := testutil.RandIdentityOrFatal(t) client := rs.Client(local) t.Log("warning: max is finite so this test is non-deterministic") t.Log("context cancellation could simply take lower priority") t.Log("and result in receiving the max number of results") max := 1000 t.Log("cancel the context before consuming") ctx, cancelFunc := context.WithCancel(context.Background()) cancelFunc() providers := client.FindProvidersAsync(ctx, k, max) numProvidersReturned := 0 for _ = range providers { numProvidersReturned++ } t.Log(numProvidersReturned) if numProvidersReturned == max { t.Fatal("Context cancel had no effect") } }
func TestAddMutation_gru2(t *testing.T) { key := x.DataKey("question.tag", 0x01) dir, err := ioutil.TempDir("", "storetest_") require.NoError(t, err) defer os.RemoveAll(dir) ps, err := store.NewStore(dir) require.NoError(t, err) Init(ps) ol := getNew(key, ps) { // Set two tag ids and merge. edge := &task.DirectedEdge{ ValueId: 0x02, Label: "gru", } addMutation(t, ol, edge, Set) edge = &task.DirectedEdge{ ValueId: 0x03, Label: "gru", } addMutation(t, ol, edge, Set) merged, err := ol.CommitIfDirty(context.Background()) require.NoError(t, err) require.True(t, merged) } { // Lets set a new tag and delete the two older ones. edge := &task.DirectedEdge{ ValueId: 0x02, Label: "gru", } addMutation(t, ol, edge, Del) edge = &task.DirectedEdge{ ValueId: 0x03, Label: "gru", } addMutation(t, ol, edge, Del) edge = &task.DirectedEdge{ ValueId: 0x04, Label: "gru", } addMutation(t, ol, edge, Set) merged, err := ol.CommitIfDirty(context.Background()) require.NoError(t, err) require.True(t, merged) } // Posting list should just have the new tag. uids := []uint64{0x04} require.Equal(t, uids, listToArray(t, 0, ol)) }
func TestDoubleGet(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() t.Log("Test a one node trying to get one block from another") instances := sg.Instances(2) blocks := bg.Blocks(1) ctx1, cancel1 := context.WithCancel(context.Background()) blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []*cid.Cid{blocks[0].Cid()}) if err != nil { t.Fatal(err) } ctx2, cancel2 := context.WithCancel(context.Background()) defer cancel2() blkch2, err := instances[1].Exchange.GetBlocks(ctx2, []*cid.Cid{blocks[0].Cid()}) if err != nil { t.Fatal(err) } // ensure both requests make it into the wantlist at the same time time.Sleep(time.Millisecond * 100) cancel1() _, ok := <-blkch1 if ok { t.Fatal("expected channel to be closed") } err = instances[0].Exchange.HasBlock(blocks[0]) if err != nil { t.Fatal(err) } select { case blk, ok := <-blkch2: if !ok { t.Fatal("expected to get the block here") } t.Log(blk) case <-time.After(time.Second * 5): t.Fatal("timed out waiting on block") } for _, inst := range instances { err := inst.Exchange.Close() if err != nil { t.Fatal(err) } } }
func TestConvertToEdges(t *testing.T) { q1 := `_uid_:0x01 <type> _uid_:0x02 . _uid_:0x01 <character> _uid_:0x03 .` nquads, err := convertToNQuad(context.Background(), q1) require.NoError(t, err) mr, err := convertToEdges(context.Background(), nquads) require.NoError(t, err) require.EqualValues(t, len(mr.edges), 2) }
// If there are CAs and TLS certs on disk, it tries to load and fails if there // are any errors, even if a join token is provided. func TestLoadSecurityConfigLoadFromDisk(t *testing.T) { tempdir, err := ioutil.TempDir("", "test-load-node-tls") require.NoError(t, err) defer os.RemoveAll(tempdir) paths := ca.NewConfigPaths(filepath.Join(tempdir, "certificates")) tc := cautils.NewTestCA(t) defer tc.Stop() peer, err := tc.ConnBroker.Remotes().Select() require.NoError(t, err) // Load successfully with valid passphrase rootCA, err := ca.CreateRootCA(ca.DefaultRootCN, paths.RootCA) require.NoError(t, err) krw := ca.NewKeyReadWriter(paths.Node, []byte("passphrase"), nil) require.NoError(t, err) _, err = rootCA.IssueAndSaveNewCertificates(krw, identity.NewID(), ca.WorkerRole, identity.NewID()) require.NoError(t, err) node, err := New(&Config{ StateDir: tempdir, JoinAddr: peer.Addr, JoinToken: tc.ManagerToken, UnlockKey: []byte("passphrase"), }) require.NoError(t, err) securityConfig, err := node.loadSecurityConfig(context.Background()) require.NoError(t, err) require.NotNil(t, securityConfig) // Invalid passphrase node, err = New(&Config{ StateDir: tempdir, JoinAddr: peer.Addr, JoinToken: tc.ManagerToken, }) require.NoError(t, err) _, err = node.loadSecurityConfig(context.Background()) require.Equal(t, ErrInvalidUnlockKey, err) // Invalid CA rootCA, err = ca.CreateRootCA(ca.DefaultRootCN, paths.RootCA) require.NoError(t, err) node, err = New(&Config{ StateDir: tempdir, JoinAddr: peer.Addr, JoinToken: tc.ManagerToken, UnlockKey: []byte("passphrase"), }) require.NoError(t, err) _, err = node.loadSecurityConfig(context.Background()) require.IsType(t, x509.UnknownAuthorityError{}, errors.Cause(err)) }
func TestPathRoundTrip(t *testing.T) { t.Parallel() ctx := SetPath(context.Background(), "hi") if path := Path(ctx); path != "hi" { t.Errorf("expected hi, got %q", path) } if path := Path(context.Background()); path != "" { t.Errorf("expected empty path, got %q", path) } }
func ExampleEventLogger() { { log := EventLogger(nil) e := log.EventBegin(context.Background(), "dial") e.Done() } { log := EventLogger(nil) e := log.EventBegin(context.Background(), "dial") _ = e.Close() // implements io.Closer for convenience } }
func TestNewClient(t *testing.T) { u := test.URL() if u == nil { t.SkipNow() } c, err := NewClient(context.Background(), u, true) if err != nil { t.Fatal(err) } f := func() error { var x mo.Folder err = mo.RetrieveProperties(context.Background(), c, c.ServiceContent.PropertyCollector, c.ServiceContent.RootFolder, &x) if err != nil { return err } if len(x.Name) == 0 { return errors.New("empty response") } return nil } // check cookie is valid with an sdk request if err := f(); err != nil { t.Fatal(err) } // check cookie is valid with a non-sdk request u.User = nil // turn off Basic auth u.Path = "/folder" r, err := c.Client.Get(u.String()) if err != nil { t.Fatal(err) } if r.StatusCode != http.StatusOK { t.Fatal(r) } // sdk request should fail w/o a valid cookie c.Client.Jar = nil if err := f(); err == nil { t.Fatal("should fail") } // invalid login u.Path = "/sdk" u.User = url.UserPassword("ENOENT", "EINVAL") _, err = NewClient(context.Background(), u, true) if err == nil { t.Fatal("should fail") } }
func TestValidateTTNAuthContext(t *testing.T) { for _, env := range strings.Split("ACCOUNT_SERVER_PROTO ACCOUNT_SERVER_URL", " ") { if os.Getenv(env) == "" { t.Skipf("Skipping auth server test: %s configured", env) } } accountServer := fmt.Sprintf("%s://%s", os.Getenv("ACCOUNT_SERVER_PROTO"), os.Getenv("ACCOUNT_SERVER_URL"), ) a := assertions.New(t) c := new(Component) c.Config.KeyDir = os.TempDir() c.Config.AuthServers = map[string]string{ "ttn-account-preview": accountServer, } err := c.initAuthServers() a.So(err, assertions.ShouldBeNil) { ctx := context.Background() _, err = c.ValidateTTNAuthContext(ctx) a.So(err, assertions.ShouldNotBeNil) } { md := metadata.Pairs() ctx := metadata.NewContext(context.Background(), md) _, err = c.ValidateTTNAuthContext(ctx) a.So(err, assertions.ShouldNotBeNil) } { md := metadata.Pairs( "id", "dev", ) ctx := metadata.NewContext(context.Background(), md) _, err = c.ValidateTTNAuthContext(ctx) a.So(err, assertions.ShouldNotBeNil) } { md := metadata.Pairs( "id", "dev", "token", "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJ0dG4tYWNjb3VudC1wcmV2aWV3Iiwic3ViIjoiZGV2IiwidHlwZSI6InJvdXRlciIsImlhdCI6MTQ3NjQzOTQzOH0.Duz-E5aMYEPY_Nf5Pky7Qmjbs1dMp9PN9nMqbSzoU079b8TPL4DH2SKcRHrrMqieB3yhJb3YaQBfY6dKWfgVz8BmTeKlGXfFrqEj91y30J7r9_VsHRzgDMJedlqXryvf0S_yD27TsJ7TMbGYyE00T4tAX3Uf6wQZDhdyHNGtdf4jtoAjzOxVAodNtXZp26LR7fFk56UstBxOxztBMzyzmAdiTG4lSyEqq7zsuJcFjmHB9MfEoD4ZT-iTRL1ohFjGuj2HN49oPyYlZAVPP7QajLyNsLnv-nDqXE_QecOjAcEq4PLNJ3DpXtX-lo8I_F1eV9yQnDdQQi4EUvxmxZWeBA", ) ctx := metadata.NewContext(context.Background(), md) _, err = c.ValidateTTNAuthContext(ctx) a.So(err, assertions.ShouldBeNil) } }
func TestMultiWrite(t *testing.T) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv) ctx, cancel := context.WithCancel(context.Background()) defer cancel() dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) if err != nil { t.Fatal(err) } data := make([]byte, 4000) u.NewTimeSeededRand().Read(data) for i := 0; i < len(data); i++ { n, err := dagmod.WriteAt(data[i:i+1], int64(i)) if err != nil { t.Fatal(err) } if n != 1 { t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") } size, err := dagmod.Size() if err != nil { t.Fatal(err) } if size != int64(i+1) { t.Fatal("Size was reported incorrectly") } } nd, err := dagmod.GetNode() if err != nil { t.Fatal(err) } read, err := uio.NewDagReader(context.Background(), nd, dserv) if err != nil { t.Fatal(err) } rbuf, err := ioutil.ReadAll(read) if err != nil { t.Fatal(err) } err = testu.ArrComp(rbuf, data) if err != nil { t.Fatal(err) } }