func main() { r := runner.New(Name) if err := r.Init(); err != nil { log.Fatal(err) } if r.Kite == nil { r.Log.Fatal("couldnt init kite") } // remove QOS, we want to consume all the messages from RMQ if err := r.Bongo.Broker.Sub.(*broker.Consumer).Consumer.QOS(0); err != nil { r.Log.Fatal("couldnt remove the QOS %# v", err) } appConfig := config.MustRead(r.Conf.Path) // init mongo connection modelhelper.Initialize(appConfig.Mongo) defer modelhelper.Close() // init with defaults & ensure expireAt index mongoCache := cache.NewMongoCacheWithTTL(modelhelper.Mongo.Session, cache.StartGC(), cache.MustEnsureIndexExpireAt()) defer mongoCache.StopGC() handler := collaboration.New(r.Log, mongoCache, appConfig, r.Kite) r.SetContext(handler) // only listen and operate on collaboration ping messages that are fired by the handler r.Register(models.Ping{}).On(collaboration.FireEventName).Handle((*collaboration.Controller).Ping) r.Listen() r.Wait() }
func TestCollaborationOperationsDeleteDriveDoc(t *testing.T) { r := runner.New("collaboration-DeleteDriveDoc-tests") err := r.Init() if err != nil { panic(err) } defer r.Close() appConfig := config.MustRead(r.Conf.Path) modelhelper.Initialize(appConfig.Mongo) defer modelhelper.Close() // init with defaults mongoCache := cache.NewMongoCacheWithTTL(modelhelper.Mongo.Session) defer mongoCache.StopGC() handler := New(r.Log, mongoCache, appConfig, r.Kite) Convey("while testing DeleteDriveDoc", t, func() { req := &models.Ping{ AccountId: 1, FileId: fmt.Sprintf("%d", rand.Int63()), } Convey("should be able to create the file", func() { f, err := createTestFile(handler) So(err, ShouldBeNil) req.FileId = f.Id Convey("should be able to delete the created file", func() { err = handler.DeleteDriveDoc(req) So(err, ShouldBeNil) }) Convey("if file id is nil response should be nil", func() { req := req req.FileId = "" err = handler.DeleteDriveDoc(req) So(err, ShouldBeNil) }) }) }) }
func TestCollaboration(t *testing.T) { r := runner.New("collaboration-tests") err := r.Init() if err != nil { panic(err) } defer r.Close() appConfig := config.MustRead(r.Conf.Path) modelhelper.Initialize(appConfig.Mongo) defer modelhelper.Close() // init with defaults mongoCache := cache.NewMongoCacheWithTTL(modelhelper.Mongo.Session) defer mongoCache.StopGC() handler := New(r.Log, mongoCache, appConfig, r.Kite) Convey("while pinging collaboration", t, func() { // owner owner := apimodels.NewAccount() owner.OldId = AccountOldId.Hex() owner, err := rest.CreateAccount(owner) So(err, ShouldBeNil) So(owner, ShouldNotBeNil) groupName := apimodels.RandomGroupName() ownerSession, err := modelhelper.FetchOrCreateSession(owner.Nick, groupName) So(err, ShouldBeNil) So(ownerSession, ShouldNotBeNil) rand.Seed(time.Now().UnixNano()) req := &models.Ping{ AccountId: 1, FileId: fmt.Sprintf("%d", rand.Int63()), } Convey("while testing Ping", func() { Convey("reponse should be success with valid ping", func() { err = handler.Ping(req) So(err, ShouldBeNil) }) Convey("reponse should be success with invalid FileId", func() { req.FileId = "" err = handler.Ping(req) So(err, ShouldBeNil) }) Convey("reponse should be success with invalid AccountId", func() { req.AccountId = 0 err = handler.Ping(req) So(err, ShouldBeNil) }) Convey("reponse should be success with invalid session", func() { req := req // prepare an invalid session here req.CreatedAt = time.Now().UTC() err = mongoCache.SetEx(PrepareFileKey(req.FileId), ExpireSessionKeyDuration, req.CreatedAt.Add(-terminateSessionDuration)) err = handler.Ping(req) So(err, ShouldBeNil) }) Convey("after sleep time", func() { req := req Convey("expired session should get invalidSessoin", func() { st := sleepTime sleepTime = time.Millisecond * 110 tsd := terminateSessionDuration terminateSessionDuration = 100 // set durations back to the original value defer func() { sleepTime = st terminateSessionDuration = tsd }() req.CreatedAt = time.Now().UTC() // prepare a valid key err = mongoCache.SetEx( PrepareFileKey(req.FileId), terminateSessionDuration, // expire the key after this period req.CreatedAt.Unix()) // while sleeping here, redis key should be removed // and we can understand that the Collab session is expired time.Sleep(sleepTime) req := req err = handler.wait(req) So(err, ShouldEqual, errSessionInvalid) }) Convey("deadlined session should get errDeadlineReached", func() { st := sleepTime sleepTime = time.Millisecond * 110 dd := deadLineDuration deadLineDuration = 100 // set durations back to the original value defer func() { sleepTime = st deadLineDuration = dd }() req := req err := handler.wait(req) So(err, ShouldEqual, errDeadlineReached) }) }) }) Convey("while testing checkIfKeyIsValid", func() { req := req req.CreatedAt = time.Now().UTC() // prepare a valid key err := mongoCache.SetEx( PrepareFileKey(req.FileId), ExpireSessionKeyDuration, // expire the key after this period req.CreatedAt.Unix(), // value - unix time ) So(err, ShouldBeNil) Convey("valid key should return nil", func() { err = handler.checkIfKeyIsValid(req) So(err, ShouldBeNil) }) Convey("invalid key should return errSessionInvalid", func() { req := req // override fileId req.FileId = fmt.Sprintf("%d", rand.Int63()) err = handler.checkIfKeyIsValid(req) So(err, ShouldEqual, errSessionInvalid) }) Convey("invalid (non-timestamp) value should return errSessionInvalid", func() { req := req req.CreatedAt = time.Now().UTC() err = mongoCache.SetEx( PrepareFileKey(req.FileId), ExpireSessionKeyDuration, // expire the key after this period "req.CreatedAt.Unix()", // replace timestamp with unix time ) err = handler.checkIfKeyIsValid(req) So(err, ShouldEqual, errSessionInvalid) }) Convey("old ping time should return errSessionInvalid", func() { req := req req.CreatedAt = time.Now().UTC() err = mongoCache.SetEx( PrepareFileKey(req.FileId), ExpireSessionKeyDuration, // expire the key after this period req.CreatedAt.Add(-terminateSessionDuration).Unix(), ) err = handler.checkIfKeyIsValid(req) So(err, ShouldEqual, errSessionInvalid) }) Convey("previous ping time is in safe area", func() { req := req testPingTimes(req, -1, mongoCache, handler, nil) }) Convey("0 ping time is in safe area", func() { req := req testPingTimes(req, 0, mongoCache, handler, nil) }) Convey("2 ping time is in safe area", func() { req := req testPingTimes(req, 2, mongoCache, handler, nil) }) Convey("3 ping time is in safe area", func() { req := req testPingTimes(req, 3, mongoCache, handler, nil) }) Convey("4 ping time is not in safe area - because we already reverted the time ", func() { req := req testPingTimes(req, 4, mongoCache, handler, errSessionInvalid) }) Convey("5 ping time is not in safe area ", func() { req := req testPingTimes(req, 5, mongoCache, handler, errSessionInvalid) }) }) }) }
func TestCollaborationOperationsEndPrivateMessage(t *testing.T) { r := runner.New("collaboration-EndPrivateMessage-tests") err := r.Init() if err != nil { panic(err) } defer r.Close() appConfig := config.MustRead(r.Conf.Path) modelhelper.Initialize(appConfig.Mongo) defer modelhelper.Close() // init with defaults mongoCache := cache.NewMongoCacheWithTTL(modelhelper.Mongo.Session) defer mongoCache.StopGC() handler := New(r.Log, mongoCache, appConfig, r.Kite) Convey("while testing EndPrivateMessage", t, func() { req := &models.Ping{ AccountId: 1, FileId: fmt.Sprintf("%d", rand.Int63()), } Convey("should be able to create the channel first", func() { creator := socialapimodels.CreateAccountWithTest() // init account c := socialapimodels.NewChannel() // init channel c.CreatorId = creator.Id // set Creator id c.TypeConstant = socialapimodels.Channel_TYPE_COLLABORATION So(c.Create(), ShouldBeNil) cp, err := c.AddParticipant(creator.Id) So(err, ShouldBeNil) So(cp, ShouldNotBeNil) req.AccountId = c.CreatorId // set real account id req.ChannelId = c.Id // set real channel id ws := &mongomodels.Workspace{ ObjectId: bson.NewObjectId(), OriginId: bson.NewObjectId(), Name: "My Workspace", Slug: "my-workspace", ChannelId: strconv.FormatInt(req.ChannelId, 10), MachineUID: bson.NewObjectId().Hex(), MachineLabel: "koding-vm-0", Owner: "cihangir", RootPath: "/home/cihangir", IsDefault: true, } So(modelhelper.CreateWorkspace(ws), ShouldBeNil) Convey("should be able to delete channel", func() { err = handler.EndPrivateMessage(req) So(err, ShouldBeNil) Convey("deleted channel should not be exist", func() { channel := socialapimodels.NewChannel() err := channel.ById(req.ChannelId) So(err, ShouldEqual, bongo.RecordNotFound) }) Convey("channel id in workspace should not be exist", func() { ws2, err := modelhelper.GetWorkspaceByChannelId( strconv.FormatInt(req.ChannelId, 10), ) So(err, ShouldEqual, mgo.ErrNotFound) So(ws2, ShouldEqual, nil) }) }) Convey("if not a participant, should not be able to delete channel", func() { req.AccountId = 1 err = handler.EndPrivateMessage(req) So(err, ShouldBeNil) Convey("channel should exist", func() { channel := socialapimodels.NewChannel() err := channel.ById(req.ChannelId) So(err, ShouldBeNil) }) }) Convey("if channel doesnt exists, should success", func() { req.ChannelId = 1 err = handler.EndPrivateMessage(req) So(err, ShouldBeNil) Convey("channel should not exist", func() { channel := socialapimodels.NewChannel() err := channel.ById(req.ChannelId) So(err, ShouldEqual, bongo.RecordNotFound) }) }) }) }) }
func TestCollaborationOperationsUnshareVM(t *testing.T) { r := runner.New("collaboration-UnshareVM-tests") err := r.Init() if err != nil { panic(err) } defer r.Close() appConfig := config.MustRead(r.Conf.Path) modelhelper.Initialize(appConfig.Mongo) defer modelhelper.Close() // init with defaults mongoCache := cache.NewMongoCacheWithTTL(modelhelper.Mongo.Session) defer mongoCache.StopGC() handler := New(r.Log, mongoCache, appConfig, r.Kite) Convey("while testing UnshareVM", t, func() { Convey("should be able to create the channel and workspace first", func() { Convey("should be able to UnshareVM", func() { creator, err := socialapimodels.CreateAccountInBothDbs() // init account So(err, ShouldBeNil) participant1, err := socialapimodels.CreateAccountInBothDbs() So(err, ShouldBeNil) participant2, err := socialapimodels.CreateAccountInBothDbs() So(err, ShouldBeNil) m1, m1ws1 := prepareSingleWorkspace(creator, participant1, participant2) channelId, err := strconv.ParseInt(m1ws1.ChannelId, 10, 64) So(err, ShouldBeNil) req1 := &models.Ping{ AccountId: creator.Id, FileId: fmt.Sprintf("%d", rand.Int63()), ChannelId: channelId, } toBeRemovedUsers, err := handler.findToBeRemovedUsers(req1) So(err, ShouldBeNil) So(toBeRemovedUsers, ShouldNotBeNil) err = handler.UnshareVM(req1, toBeRemovedUsers) So(err, ShouldBeNil) err = handler.EndPrivateMessage(req1) So(err, ShouldBeNil) Convey("remove users should not be in the machine", func() { mm1, err := modelhelper.GetMachineByUid(m1.Uid) So(err, ShouldBeNil) So(mm1, ShouldNotBeNil) So(len(mm1.Users), ShouldEqual, 1) ownerUser, err := modelhelper.GetUserByAccountId(creator.OldId) So(err, ShouldBeNil) So(mm1.Users[0].Id.Hex(), ShouldEqual, ownerUser.ObjectId.Hex()) }) }) Convey("if participant and owner shares multiple workspaces", func() { creator, err := socialapimodels.CreateAccountInBothDbs() // init account So(err, ShouldBeNil) participant1, err := socialapimodels.CreateAccountInBothDbs() So(err, ShouldBeNil) participant2, err := socialapimodels.CreateAccountInBothDbs() So(err, ShouldBeNil) participant3, err := socialapimodels.CreateAccountInBothDbs() So(err, ShouldBeNil) _, _, m2, m2ws1, m2ws2 := prepareWorkspace(creator, participant1, participant2, participant3) Convey("remove from first workspace", func() { channelId, err := strconv.ParseInt(m2ws1.ChannelId, 10, 64) So(err, ShouldBeNil) req := &models.Ping{ AccountId: creator.Id, FileId: fmt.Sprintf("%d", rand.Int63()), ChannelId: channelId, } toBeRemovedUsers, err := handler.findToBeRemovedUsers(req) So(err, ShouldBeNil) So(toBeRemovedUsers, ShouldNotBeNil) err = handler.UnshareVM(req, toBeRemovedUsers) So(err, ShouldBeNil) err = handler.EndPrivateMessage(req) So(err, ShouldBeNil) Convey("participants should still be in the second machine", func() { mm2, err := modelhelper.GetMachineByUid(m2.Uid) So(err, ShouldBeNil) So(mm2, ShouldNotBeNil) So(len(mm2.Users), ShouldEqual, 3) // participant1 is not in the second WS, so it should be removed from the machine ownerUser, err := modelhelper.GetUserByAccountId(creator.OldId) So(err, ShouldBeNil) So(mm2.Users[0].Id.Hex(), ShouldEqual, ownerUser.ObjectId.Hex()) participant2User, err := modelhelper.GetUserByAccountId(participant2.OldId) So(err, ShouldBeNil) So(mm2.Users[1].Id.Hex(), ShouldEqual, participant2User.ObjectId.Hex()) participant3User, err := modelhelper.GetUserByAccountId(participant3.OldId) So(err, ShouldBeNil) So(mm2.Users[2].Id.Hex(), ShouldEqual, participant3User.ObjectId.Hex()) Convey("after removing from second WS", func() { // remove from second WS too channelId, err := strconv.ParseInt(m2ws2.ChannelId, 10, 64) So(err, ShouldBeNil) req := &models.Ping{ AccountId: creator.Id, FileId: fmt.Sprintf("%d", rand.Int63()), ChannelId: channelId, } toBeRemovedUsers, err := handler.findToBeRemovedUsers(req) So(err, ShouldBeNil) So(toBeRemovedUsers, ShouldNotBeNil) err = handler.UnshareVM(req, toBeRemovedUsers) So(err, ShouldBeNil) err = handler.EndPrivateMessage(req) So(err, ShouldBeNil) Convey("owner and permanent should still stay", func() { mm2, err := modelhelper.GetMachineByUid(m2.Uid) So(err, ShouldBeNil) So(mm2, ShouldNotBeNil) So(len(mm2.Users), ShouldEqual, 2) ownerUser, err := modelhelper.GetUserByAccountId(creator.OldId) So(err, ShouldBeNil) So(mm2.Users[0].Id.Hex(), ShouldEqual, ownerUser.ObjectId.Hex()) participant1User, err := modelhelper.GetUserByAccountId(participant3.OldId) So(err, ShouldBeNil) So(mm2.Users[1].Id.Hex(), ShouldEqual, participant1User.ObjectId.Hex()) }) }) }) }) }) }) }) }
func main() { r := runner.New(Name) if err := r.Init(); err != nil { log.Fatal(err) } // appConfig c := config.MustRead(r.Conf.Path) mc := mux.NewConfig(Name, r.Conf.Host, r.Conf.Port) mc.Debug = r.Conf.Debug m := mux.New(mc, r.Log, r.Metrics) // init mongo connection modelhelper.Initialize(c.Mongo) defer modelhelper.Close() // init mongo cache with ensured index mgoCache := cache.NewMongoCacheWithTTL(modelhelper.Mongo.Session, cache.StartGC(), cache.MustEnsureIndexExpireAt(), ) defer mgoCache.StopGC() handlers.AddHandlers(m) collaboration.AddHandlers(m, mgoCache) paymentapi.AddHandlers(m) mailapi.AddHandlers(m) algoliaapi.AddHandlers(m, r.Log) account.AddHandlers(m) channel.AddHandlers(m) client.AddHandlers(m) message.AddHandlers(m) messagelist.AddHandlers(m) participant.AddHandlers(m) privatechannel.AddHandlers(m) reply.AddHandlers(m) realtimeapi.AddHandlers(m) presenceapi.AddHandlers(m) slackapi.AddHandlers(m, c) credential.AddHandlers(m, r.Log, c) emailapi.AddHandlers(m) mmdb, err := helper.ReadGeoIPDB(c) if err != nil { r.Log.Critical("ip persisting wont work err: %s", err.Error()) } else { defer mmdb.Close() } // set default values for dev env if r.Conf.Environment == "dev" { go setDefaults(r.Log) } payment.Initialize(c) m.Listen() // shutdown server defer m.Close() r.Listen() r.Wait() }
func TestCollaborationDriveService(t *testing.T) { r := runner.New("collaboration-drive-tests") err := r.Init() if err != nil { panic(err) } defer r.Close() appConfig := config.MustRead(r.Conf.Path) modelhelper.Initialize(appConfig.Mongo) defer modelhelper.Close() // init with defaults mongoCache := cache.NewMongoCacheWithTTL(modelhelper.Mongo.Session) defer mongoCache.StopGC() handler := New(r.Log, mongoCache, appConfig, r.Kite) SkipConvey("while pinging collaboration", t, func() { // owner owner := apimodels.NewAccount() owner.OldId = AccountOldId.Hex() owner, err := rest.CreateAccount(owner) So(err, ShouldBeNil) So(owner, ShouldNotBeNil) groupName := apimodels.RandomGroupName() ownerSession, err := modelhelper.FetchOrCreateSession(owner.Nick, groupName) So(err, ShouldBeNil) So(ownerSession, ShouldNotBeNil) rand.Seed(time.Now().UnixNano()) req := &models.Ping{ AccountId: 1, FileId: fmt.Sprintf("%d", rand.Int63()), } Convey("while testing drive operations", func() { req := req req.CreatedAt = time.Now().UTC() Convey("should be able to create the file", func() { f, err := createTestFile(handler) if err != nil { t.Skip("Err happened, skipping: %s", err.Error()) } req.FileId = f.Id Convey("should be able to get the created file", func() { f2, err := handler.getFile(f.Id) if err != nil { t.Skip("Err happened, skipping: %s", err.Error()) } So(f2, ShouldNotBeNil) Convey("should be able to delete the created file", func() { err = handler.deleteFile(req.FileId) if err != nil { t.Skip("Err happened, skipping: %s", err.Error()) } Convey("should not be able to get the deleted file", func() { deadLine := time.After(TestTimeout) tick := time.Tick(time.Millisecond * 100) for { select { case <-tick: f2, err := handler.getFile(f.Id) if err != nil { t.Skip("Err happened, skipping: %s", err.Error()) } So(f2, ShouldBeNil) case <-deadLine: t.Skip("Could not get file after %s", TestTimeout) } } }) Convey("deleting the deleted file should not give error", func() { err = handler.deleteFile(req.FileId) if err != nil { t.Skip("Err happened, skipping: %s", err.Error()) } So(err, ShouldBeNil) }) }) }) }) }) }) }