示例#1
1
文件: client.go 项目: GregWilson/kite
// makeResponseCallback prepares and returns a callback function sent to the server.
// The caller of the Tell() is blocked until the server calls this callback function.
// Sets theResponse and notifies the caller by sending to done channel.
func (c *Client) makeResponseCallback(doneChan chan *response, removeCallback <-chan uint64, method string, args []interface{}) dnode.Function {
	return dnode.Callback(func(arguments *dnode.Partial) {
		// Single argument of response callback.
		var resp struct {
			Result *dnode.Partial `json:"result"`
			Err    *Error         `json:"error"`
		}

		// Notify that the callback is finished.
		defer func() {
			if resp.Err != nil {
				c.LocalKite.Log.Debug("Error received from kite: %q method: %q args: %#v err: %s", c.Kite.Name, method, args, resp.Err.Error())
				doneChan <- &response{resp.Result, resp.Err}
			} else {
				doneChan <- &response{resp.Result, nil}
			}
		}()

		// Remove the callback function from the map so we do not
		// consume memory for unused callbacks.
		if id, ok := <-removeCallback; ok {
			c.scrubber.RemoveCallback(id)
		}

		// We must only get one argument for response callback.
		arg, err := arguments.SliceOfLength(1)
		if err != nil {
			resp.Err = &Error{Type: "invalidResponse", Message: err.Error()}
			return
		}

		// Unmarshal callback response argument.
		err = arg[0].Unmarshal(&resp)
		if err != nil {
			resp.Err = &Error{Type: "invalidResponse", Message: err.Error()}
			return
		}

		// At least result or error must be sent.
		keys := make(map[string]interface{})
		err = arg[0].Unmarshal(&keys)
		_, ok1 := keys["result"]
		_, ok2 := keys["error"]
		if !ok1 && !ok2 {
			resp.Err = &Error{
				Type:    "invalidResponse",
				Message: "Server has sent invalid response arguments",
			}
			return
		}
	})
}
示例#2
0
// RemoteCache calls klient's remote.cache method.
//
// Note that due to how the remote/req library is setup, this function needs to
// take the callback as a separate argument for now. This will be improved
// in the future, in one way or another.
func (k *Klient) RemoteCache(r req.Cache, cb func(par *dnode.Partial)) error {
	cacheReq := struct {
		req.Cache
		Progress dnode.Function `json:"progress"`
	}{
		Cache: r,
	}

	if cb != nil {
		cacheReq.Progress = dnode.Callback(cb)
	}

	// No response from cacheFolder currently.
	_, err := k.Tell("remote.cacheFolder", cacheReq)
	return err
}
示例#3
0
文件: fs_test.go 项目: koding/koding
func TestWatcher(t *testing.T) {
	testDir := "testdata"

	type change struct {
		action string
		name   string
	}

	onChangeFunc := func(changes *[]change) dnode.Function {
		return dnode.Callback(func(r *dnode.Partial) {
			s := r.MustSlice()
			m := s[0].MustMap()

			e := m["event"].MustString()

			var f = &FileEntry{}
			m["file"].Unmarshal(f)

			*changes = append(*changes, change{
				name:   f.FullPath,
				action: e,
			})
		})
	}

	changes1 := make([]change, 0)
	onChange1 := onChangeFunc(&changes1)

	_, err := remote.Tell("readDirectory", struct {
		Path     string
		OnChange dnode.Function
	}{
		Path:     testDir,
		OnChange: onChange1,
	})
	if err != nil {
		t.Fatal(err)
	}

	changes2 := make([]change, 0)
	onChange2 := onChangeFunc(&changes2)

	_, err = remote2.Tell("readDirectory", struct {
		Path     string
		OnChange dnode.Function
	}{
		Path:     testDir,
		OnChange: onChange2,
	})
	if err != nil {
		t.Fatal(err)
	}

	addFile := "testdata/example3.txt"
	newFile := "testdata/example4.txt"

	t.Logf("Creating file %s", addFile)
	time.Sleep(time.Millisecond * 100)
	ioutil.WriteFile(addFile, []byte("example"), 0755)

	t.Logf("Renaming file from %s to %s", addFile, newFile)
	time.Sleep(time.Millisecond * 100)
	err = os.Rename(addFile, newFile)
	if err != nil {
		t.Error(err)
	}

	t.Logf("Removing file %s", newFile)
	time.Sleep(time.Millisecond * 100)
	err = os.Remove(newFile)
	if err != nil {
		t.Error(err)
	}

	time.Sleep(time.Millisecond * 100)

	var expected = map[string]bool{
		"added_testdata/example3.txt":   true,
		"added_testdata/example4.txt":   true,
		"removed_testdata/example3.txt": true,
		"removed_testdata/example4.txt": true,
	}

	t.Logf("changes1 %+v", changes1)
	t.Logf("changes2 %+v", changes2)

	testChanges := func(changes []change) error {
		for _, change := range changes {
			_, ok := expected[change.action+"_"+change.name]
			if !ok {
				fmt.Errorf("%s_%s does not exist", change.action, change.name)
			}
		}

		return nil
	}

	if err := testChanges(changes1); err != nil {
		t.Errorf("watcher for remote: %s", err)
	}

	if err := testChanges(changes2); err != nil {
		t.Errorf("watcher for remote2: %s", err)
	}
}
示例#4
0
// Test 2 way communication between kites.
func TestKite(t *testing.T) {
	// Create a mathworker kite
	mathKite := newXhrKite("mathworker", "0.0.1")
	mathKite.Config.DisableAuthentication = true
	mathKite.HandleFunc("square", Square)
	mathKite.HandleFunc("squareCB", SquareCB)
	mathKite.HandleFunc("sleep", Sleep)
	go http.ListenAndServe("127.0.0.1:3636", mathKite)

	// Wait until it's started
	time.Sleep(time.Second)

	// Create exp2 kite
	exp2Kite := newXhrKite("exp2", "0.0.1")
	fooChan := make(chan string)
	exp2Kite.HandleFunc("foo", func(r *Request) (interface{}, error) {
		s := r.Args.One().MustString()
		t.Logf("Message received: %s\n", s)
		fooChan <- s
		return nil, nil
	})

	// exp2 connects to mathworker
	remote := exp2Kite.NewClient("http://127.0.0.1:3636/kite")
	err := remote.Dial()
	if err != nil {
		t.Fatal(err)
	}

	result, err := remote.TellWithTimeout("square", 4*time.Second, 2)
	if err != nil {
		t.Fatal(err)
	}

	number := result.MustFloat64()

	t.Logf("rpc result: %f\n", number)

	if number != 4 {
		t.Fatalf("Invalid result: %f", number)
	}

	select {
	case s := <-fooChan:
		if s != "bar" {
			t.Fatalf("Invalid message: %s", s)
		}
	case <-time.After(100 * time.Millisecond):
		t.Fatal("Did not get the message")
	}

	resultChan := make(chan float64, 1)
	resultCallback := func(args *dnode.Partial) {
		n := args.One().MustFloat64()
		resultChan <- n
	}

	result, err = remote.TellWithTimeout("squareCB", 4*time.Second, 3, dnode.Callback(resultCallback))
	if err != nil {
		t.Fatal(err)
	}

	select {
	case n := <-resultChan:
		if n != 9.0 {
			t.Fatalf("Unexpected result: %f", n)
		}
	case <-time.After(100 * time.Millisecond):
		t.Fatal("Did not get the message")
	}

	result, err = remote.TellWithTimeout("sleep", time.Second)
	if err == nil {
		t.Fatal("Did get message in 1 seconds, however the sleep method takes 2 seconds to response")
	}

	result, err = remote.Tell("sleep")
	if err != nil {
		t.Fatal(err)
	}

	if !result.MustBool() {
		t.Fatal("sleep result must be true")
	}

}
示例#5
0
// Test 2 way communication between kites.
func TestKite(t *testing.T) {
	// Create a mathworker kite
	mathKite := newXhrKite("mathworker", "0.0.1")
	mathKite.Config.DisableAuthentication = true
	mathKite.Config.Port = 3636
	mathKite.HandleFunc("square", Square)
	mathKite.HandleFunc("squareCB", SquareCB)
	mathKite.HandleFunc("sleep", Sleep)
	mathKite.HandleFunc("sqrt", Sqrt)
	mathKite.FinalFunc(func(r *Request, resp interface{}, err error) (interface{}, error) {
		if r.Method != "sqrt" || err != ErrNegative {
			return resp, err
		}

		a := r.Args.One().MustFloat64()

		// JSON does not marshal complex128,
		// for test purpose we use just string
		return fmt.Sprintf("%di", int(math.Sqrt(-a)+0.5)), nil
	})
	go mathKite.Run()
	<-mathKite.ServerReadyNotify()
	defer mathKite.Close()

	// Create exp2 kite
	exp2Kite := newXhrKite("exp2", "0.0.1")
	fooChan := make(chan string)
	exp2Kite.HandleFunc("foo", func(r *Request) (interface{}, error) {
		s := r.Args.One().MustString()
		t.Logf("Message received: %s\n", s)
		fooChan <- s
		return nil, nil
	})

	// exp2 connects to mathworker
	remote := exp2Kite.NewClient("http://127.0.0.1:3636/kite")
	err := remote.Dial()
	if err != nil {
		t.Fatal(err)
	}
	defer remote.Close()

	result, err := remote.TellWithTimeout("sqrt", 4*time.Second, -4)
	if err != nil {
		t.Fatal(err)
	}

	if s, err := result.String(); err != nil || s != "2i" {
		t.Fatalf("want 2i, got %v (%v)", result, err)
	}

	result, err = remote.TellWithTimeout("square", 4*time.Second, 2)
	if err != nil {
		t.Fatal(err)
	}

	number := result.MustFloat64()

	t.Logf("rpc result: %f\n", number)

	if number != 4 {
		t.Fatalf("Invalid result: %f", number)
	}

	select {
	case s := <-fooChan:
		if s != "bar" {
			t.Fatalf("Invalid message: %s", s)
		}
	case <-time.After(100 * time.Millisecond):
		t.Fatal("Did not get the message")
	}

	resultChan := make(chan float64, 1)
	resultCallback := func(args *dnode.Partial) {
		n := args.One().MustFloat64()
		resultChan <- n
	}

	result, err = remote.TellWithTimeout("squareCB", 4*time.Second, 3, dnode.Callback(resultCallback))
	if err != nil {
		t.Fatal(err)
	}

	select {
	case n := <-resultChan:
		if n != 9.0 {
			t.Fatalf("Unexpected result: %f", n)
		}
	case <-time.After(100 * time.Millisecond):
		t.Fatal("Did not get the message")
	}

	result, err = remote.TellWithTimeout("sleep", time.Second)
	if err == nil {
		t.Fatal("Did get message in 1 seconds, however the sleep method takes 2 seconds to response")
	}

	result, err = remote.Tell("sleep")
	if err != nil {
		t.Fatal(err)
	}

	if !result.MustBool() {
		t.Fatal("sleep result must be true")
	}

}
示例#6
0
func TestNoConcurrentCallbacks(t *testing.T) {
	const timeout = 2 * time.Second

	type Callback struct {
		Index int
		Func  dnode.Function
	}

	k := newXhrKite("callback", "0.0.1")
	k.Config.DisableAuthentication = true
	k.HandleFunc("call", func(r *Request) (interface{}, error) {
		if r.Args == nil {
			return nil, errors.New("empty argument")
		}

		var arg Callback
		if err := r.Args.One().Unmarshal(&arg); err != nil {
			return nil, err
		}

		if !arg.Func.IsValid() {
			return nil, errors.New("invalid argument")
		}

		if err := arg.Func.Call(arg.Index); err != nil {
			return nil, err
		}

		return true, nil
	})

	go k.Run()
	<-k.ServerReadyNotify()
	defer k.Close()

	url := fmt.Sprintf("http://127.0.0.1:%d/kite", k.Port())

	c := k.NewClient(url)
	defer c.Close()

	// The TestNoConcurrentCallbacks asserts ConcurrentCallbacks
	// are disabled by default for each new client.
	//
	// When callbacks are executed concurrently, the order
	// of indices received on the channel is random,
	// thus making this test to fail.
	//
	// c.ConcurrentCallbacks = true

	if err := c.DialTimeout(timeout); err != nil {
		t.Errorf("DialTimeout(%q)=%s", url, err)
	}

	indices := make(chan int, 50)
	callback := dnode.Callback(func(arg *dnode.Partial) {
		var index int
		if err := arg.One().Unmarshal(&index); err != nil {
			t.Logf("failed to unmarshal: %s", err)
		}

		time.Sleep(time.Duration(rand.Int31n(100)) * time.Millisecond)

		indices <- index
	})

	for i := 0; i < cap(indices); i++ {
		arg := &Callback{
			Index: i + 1,
			Func:  callback,
		}

		if _, err := c.TellWithTimeout("call", timeout, arg); err != nil {
			t.Fatalf("%d: TellWithTimeout()=%s", i, err)
		}
	}

	var n, lastIndex int

	for {
		if n == cap(indices) {
			// All indices were read.
			break
		}

		select {
		case <-time.After(timeout):
			t.Fatalf("reading indices has timed out after %s (n=%d)", timeout, n)
		case index := <-indices:
			if index == 0 {
				t.Fatalf("invalid index=%d (n=%d)", index, n)
			}

			if index <= lastIndex {
				t.Fatalf("expected to receive indices in ascending order; received %d, last index %d (n=%d)", index, lastIndex, n)
			}

			lastIndex = index
			n++
		}
	}
}
示例#7
0
func TestWatch(t *testing.T) {
	msg := []string{"I", "love", "Coffee"}
	fakeFunc := func() (<-chan *vagrantutil.CommandOutput, error) {
		ch := make(chan *vagrantutil.CommandOutput, 3)
		for _, m := range msg {
			ch <- &vagrantutil.CommandOutput{
				Line: m,
			}
		}
		close(ch)

		return ch, nil
	}

	// add fake handler, what matters is how `watchCommand` is working.
	vagrantKite.HandleFunc("fakeWatch", func(r *kite.Request) (interface{}, error) {
		fn := func(r *kite.Request, v *vagrantutil.Vagrant) (interface{}, error) {
			return handlers.watchCommand(r, "", fakeFunc)
		}
		return handlers.withPath(r, fn)
	})

	var reMsgs []string

	done := make(chan error, 1)

	success := dnode.Callback(func(r *dnode.Partial) {
		done <- nil
	})

	failure := dnode.Callback(func(r *dnode.Partial) {
		done <- errors.New(r.One().MustString())
	})

	output := dnode.Callback(func(r *dnode.Partial) {
		msg := r.One().MustString()
		reMsgs = append(reMsgs, msg)
	})

	_, err := remote.Tell("fakeWatch", struct {
		FilePath string
		Output   dnode.Function
		Success  dnode.Function
		Failure  dnode.Function
	}{
		FilePath: vagrantName,
		Success:  dnode.Function(success),
		Failure:  dnode.Function(failure),
		Output:   dnode.Function(output),
	})
	if err != nil {
		t.Fatal(err)
	}

	// wait so  our callback can connect the messages
	select {
	case err := <-done:
		if err != nil {
			t.Fatal(err)
		}
	case <-time.After(5 * time.Second):
		t.Fatal("timed out waiting for done")
	}

	equals(t, msg, reMsgs)
}
示例#8
0
func TestSubscribe(t *testing.T) {
	ps := NewPubSub(logging.NewLogger("testing"))

	s := kite.New("s", "0.0.0")
	s.Config.DisableAuthentication = true

	doneC, subscribe := handlerWrapper(ps.Subscribe)
	s.HandleFunc("client.Subscribe", subscribe)

	ts := httptest.NewServer(s)
	defer ts.Close()

	c1 := kite.New("c1", "0.0.0").NewClient(fmt.Sprintf("%s/kite", ts.URL))
	c2 := kite.New("c2", "0.0.0").NewClient(fmt.Sprintf("%s/kite", ts.URL))

	err := c1.Dial()
	if err != nil {
		t.Fatal("Failed to connect to testing Kite", err)
	}
	err = c2.Dial()
	if err != nil {
		t.Fatal("Failed to connect to testing Kite", err)
	}

	// Should require arguments
	_, err = c1.Tell("client.Subscribe")
	if err == nil {
		t.Error("client.Subscribe should require args")
	}
	if err = wait(doneC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Should require eventName
	_, err = c1.Tell("client.Subscribe", struct {
		Data      string
		OnPublish dnode.Function
	}{
		Data:      "foo",
		OnPublish: dnode.Callback(func(f *dnode.Partial) {}),
	})
	if err == nil {
		t.Error("client.Subscribe should require EventName")
	}
	if err = wait(doneC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Should require onPublish
	_, err = c1.Tell("client.Subscribe", struct {
		eventName string
		Data      string
	}{
		eventName: "foo",
		Data:      "bar",
	})
	if err == nil {
		t.Error("client.Subscribe should require OnPublish")
	}
	if err = wait(doneC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Should require valid onPublish func
	_, err = c1.Tell("client.Subscribe", struct {
		eventName string
		onPublish string
	}{
		eventName: "foo",
		onPublish: "bar",
	})
	if err == nil {
		t.Error("client.Subscribe should require a valid OnPublish func")
	}
	if err = wait(doneC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Should subscribe to any given event name
	pRes, err := c1.Tell("client.Subscribe", SubscribeRequest{
		EventName: "test",
		OnPublish: dnode.Callback(func(f *dnode.Partial) {}),
	})
	if err != nil {
		t.Error(err)
	}
	if err = wait(doneC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	subs := getCopy(ps, "test")
	if len(subs) != 1 {
		t.Fatal("client.Subscribe should store a single onPublish callback")
	}

	// Should return the subIndex
	var res SubscribeResponse
	if err = pRes.Unmarshal(&res); err != nil {
		t.Errorf("client.Subscribe should return a valid response struct. err:%s", err)
	}

	if expected := 1; res.ID != expected {
		t.Errorf(
			"client.Subscribe should return the response id. Wanted:%d, Got:%d",
			expected, res.ID,
		)
	}

	// Should store the proper callback
	successC := make(chan struct{}, 1)
	pRes, err = c1.Tell("client.Subscribe", SubscribeRequest{
		EventName: "test",
		OnPublish: dnode.Callback(func(f *dnode.Partial) {
			select {
			case successC <- struct{}{}:
			case <-time.After(time.Second): // Don't leak go-routines.
			}
		}),
	})
	if err = wait(doneC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	if err != nil {
		t.Fatal(err)
	}

	subs = getCopy(ps, "test")
	if len(subs) != 2 {
		t.Fatal("client.Subscribe should store multiple onPublish callbacks")
	}

	subs[2].Call()
	if err = wait(successC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	if err = pRes.Unmarshal(&res); err != nil {
		t.Errorf("client.Subscribe should return a valid response struct. err:%s", err)
	}

	if expected := 2; res.ID != expected {
		t.Errorf(
			"client.Subscribe should return the response id. Wanted:%d, Got:%d",
			expected, res.ID,
		)
	}

	// Should allow multiple clients to subscribe
	pRes, err = c2.Tell("client.Subscribe", SubscribeRequest{
		EventName: "test",
		OnPublish: dnode.Callback(func(_ *dnode.Partial) {}),
	})
	if err != nil {
		t.Error(err)
	}
	if err = wait(doneC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	subs = getCopy(ps, "test")
	if len(subs) != 3 {
		t.Fatal("client.Subscribe should allow multiple clients to Sub")
	}

	if err = pRes.Unmarshal(&res); err != nil {
		t.Errorf("client.Subscribe should return a valid response struct. err:%s", err)
	}

	if expected := 3; res.ID != expected {
		t.Errorf(
			"client.Subscribe should return the response id. Wanted:%d, Got:%d",
			expected, res.ID,
		)
	}

	// disconnectFunc will be added to kite's OnDisconnect callback slice.
	// Since kite callbacks are synchronous, we will provide synchronization
	// with Subscriptions map.
	disconnectedC := make(chan struct{})
	s.OnDisconnect(func(_ *kite.Client) {
		select {
		case disconnectedC <- struct{}{}:
		case <-time.After(time.Second):
		}
	})

	// Should remove onPublish func after the client disconnects
	c1.Close()
	if err = wait(disconnectedC, 2*time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	subs = getCopy(ps, "test")
	if len(subs) != 1 {
		t.Error("client.Subscribe",
			"should remove all of a clients callbacks on Disconnect")
	}

	// Should remove the map, when all clients disconnect
	c2.Close()
	if err = wait(disconnectedC, 2*time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	subs = getCopy(ps, "test")
	if subs != nil {
		t.Error("client.Subscribe",
			"should remove the event map when all clients disconnect")
	}
}
示例#9
0
func TestUnsubscribe(t *testing.T) {
	ps := NewPubSub(logging.NewLogger("testing"))
	s := kite.New("s", "0.0.0")
	s.Config.DisableAuthentication = true

	donePubC, publish := handlerWrapper(ps.Publish)
	s.HandleFunc("client.Publish", publish)

	doneSubC, subscribe := handlerWrapper(ps.Subscribe)
	s.HandleFunc("client.Subscribe", subscribe)

	doneUnsubC, unsubscribe := handlerWrapper(ps.Unsubscribe)
	s.HandleFunc("client.Unsubscribe", unsubscribe)

	ts := httptest.NewServer(s)
	defer ts.Close()

	c1 := kite.New("c1", "0.0.0").NewClient(fmt.Sprintf("%s/kite", ts.URL))
	c2 := kite.New("c2", "0.0.0").NewClient(fmt.Sprintf("%s/kite", ts.URL))

	err := c1.Dial()
	if err != nil {
		t.Fatal("Failed to connect to testing Kite", err)
	}
	err = c2.Dial()
	if err != nil {
		t.Fatal("Failed to connect to testing Kite", err)
	}

	// Track the calls to our subs.
	callsMu := sync.Mutex{} // protects calls map.
	calls := map[string]bool{}
	var wg sync.WaitGroup
	wg.Add(3)

	// Setup our event, sub index 1
	_, err = c1.Tell("client.Subscribe", SubscribeRequest{
		EventName: "test",
		OnPublish: dnode.Callback(func(f *dnode.Partial) {
			defer wg.Done()

			callsMu.Lock()
			defer callsMu.Unlock()
			calls["c1:1"] = true
		}),
	})
	if err != nil {
		t.Fatal(err)
	}
	if err = wait(doneSubC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Setup our event, sub index 2
	_, err = c2.Tell("client.Subscribe", SubscribeRequest{
		EventName: "test",
		OnPublish: dnode.Callback(func(f *dnode.Partial) {
			defer wg.Done()

			callsMu.Lock()
			defer callsMu.Unlock()
			calls["c2:2"] = true
		}),
	})
	if err != nil {
		t.Fatal(err)
	}
	if err = wait(doneSubC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Setup our event, sub index 3
	_, err = c2.Tell("client.Subscribe", SubscribeRequest{
		EventName: "test",
		OnPublish: dnode.Callback(func(f *dnode.Partial) {
			defer wg.Done()

			callsMu.Lock()
			defer callsMu.Unlock()
			calls["c2:3"] = true
		}),
	})
	if err != nil {
		t.Fatal(err)
	}
	if err = wait(doneSubC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Setup our event, sub index 4
	_, err = c1.Tell("client.Subscribe", SubscribeRequest{
		EventName: "test",
		OnPublish: dnode.Callback(func(f *dnode.Partial) {
			defer wg.Done()

			callsMu.Lock()
			defer callsMu.Unlock()
			calls["c1:4"] = true
		}),
	})
	if err != nil {
		t.Fatal(err)
	}
	if err = wait(doneSubC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Should remove subs from client
	_, err = c2.Tell("client.Unsubscribe", UnsubscribeRequest{
		EventName: "test",
		ID:        2,
	})
	if err != nil {
		t.Fatal(err)
	}
	if err = wait(doneUnsubC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	subs := getCopy(ps, "test")
	if expected := 3; len(subs) != expected {
		t.Fatalf(
			"client.Unsubscribe should remove callbacks. Wanted:%d, Got:%d",
			expected, len(subs),
		)
	}

	// Should publish to the expected methods. The above check should
	// work for this, but just to be safe lets actually publish and make sure
	// the subs work like we expect.
	_, err = c1.Tell("client.Publish", PublishRequest{
		EventName: "test",
	})
	if err = wait(donePubC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Block, waiting for the goroutines to call the callbacks.
	wg.Wait()

	expected := map[string]bool{"c1:1": true, "c2:3": true, "c1:4": true}
	if !reflect.DeepEqual(expected, calls) {
		t.Errorf(
			"client.Unsubscribe should prevent callbacks from receving calls. Wanted:%s, Got:%s",
			expected, calls,
		)
	}
	// Reset call order
	calls = map[string]bool{}
	wg.Add(2)

	// Should allow any kite to unsub given an ID (ie, not just it's own subs)
	_, err = c2.Tell("client.Unsubscribe", UnsubscribeRequest{
		EventName: "test",
		ID:        4,
	})
	if err != nil {
		t.Fatal(err)
	}
	if err = wait(doneUnsubC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Should publish to the expected methods.
	_, err = c1.Tell("client.Publish", PublishRequest{
		EventName: "test",
	})
	if err = wait(donePubC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Block, waiting for the goroutines to call the callbacks.
	wg.Wait()

	expected = map[string]bool{"c1:1": true, "c2:3": true}
	if !reflect.DeepEqual(expected, calls) {
		t.Errorf(
			"client.Unsubscribe should prevent callbacks from receving calls. Wanted:%s, Got:%s",
			expected, calls,
		)
	}

	// Should return ErrSubNotFound if the id does not exist.
	_, err = c2.Tell("client.Unsubscribe", UnsubscribeRequest{
		EventName: "test",
		ID:        7,
	})
	if err == nil || err.Error() != ErrSubNotFound.Error() {
		t.Errorf(
			"client.Unsubscribe: Should return the proper error when the sub is not found. Wanted:%s, Got:%s",
			ErrSubNotFound, err,
		)
	}
	if err = wait(doneUnsubC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Should return ErrSubNotFound if the event does not exist.
	_, err = c2.Tell("client.Unsubscribe", UnsubscribeRequest{
		EventName: "fakeEvent",
		ID:        10,
	})
	if err == nil || err.Error() != ErrSubNotFound.Error() {
		t.Errorf(
			"client.Unsubscribe: Should return the proper error when the sub is not found. Wanted:%s, Got:%s",
			ErrSubNotFound, err,
		)
	}
	if err = wait(doneUnsubC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	// Should remove the event map if no subs are left.
	_, err = c2.Tell("client.Unsubscribe", UnsubscribeRequest{
		EventName: "test",
		ID:        1,
	})
	if err != nil {
		t.Fatal(err)
	}
	if err = wait(doneUnsubC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	_, err = c2.Tell("client.Unsubscribe", UnsubscribeRequest{
		EventName: "test",
		ID:        3,
	})
	if err != nil {
		t.Fatal(err)
	}
	if err = wait(doneUnsubC, time.Second); err != nil {
		t.Fatalf("want err = nil; got %v", err)
	}

	if subs := getCopy(ps, "test"); subs != nil {
		t.Errorf(
			"client.Unsubscribe should remove the sub map if no subs are left, it did not.",
		)
	}
}
示例#10
0
func TestTail(t *testing.T) {
	tmpDir, tmpFile, err := makeTempAndCopy("testdata/testfile1.txt")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tmpDir)

	var watchCount int
	var watchResult []string
	var watchMu sync.Mutex

	watchFunc := dnode.Callback(func(r *dnode.Partial) {
		watchCount++
		line := r.One().MustString()

		watchMu.Lock()
		watchResult = append(watchResult, line)
		watchMu.Unlock()
	})

	_, err = remote.Tell("tail", &Request{
		Path:  tmpFile,
		Watch: watchFunc,
	})
	if err != nil {
		t.Fatal(err)
	}

	fmt.Println("Waiting for the results..")
	time.Sleep(time.Second * 1)

	// Should return empty by default, since no new lines were given.
	watchMu.Lock()
	n := len(watchResult)
	watchMu.Unlock()

	if n != 0 {
		t.Errorf("WatchFunc should not be called for pre-existing lines.\nWant: 0\nGot : %d\n", n)
	}

	file, err := os.OpenFile(tmpFile, os.O_APPEND|os.O_WRONLY, 0600)
	if err != nil {
		t.Fatal(err)
	}

	file.WriteString("Tail2\n")
	file.WriteString("Tail3\n")
	file.WriteString("Tail4\n")
	file.Close()

	// wait so the watch function picked up the tail changes
	time.Sleep(time.Second * 5)

	var modifiedLines = []string{"Tail2", "Tail3", "Tail4"}

	watchMu.Lock()
	if !reflect.DeepEqual(modifiedLines, watchResult) {
		err = fmt.Errorf("\nWatchFunc should not be called for pre-existing lines.\n"+
			"Want: %#v\nGot : %#v\n", modifiedLines, watchResult)
	}
	watchMu.Unlock()

	if err != nil {
		t.Error(err)
	}
}
示例#11
0
func TestTailOffset(t *testing.T) {
	tmpDir, tmpFile, err := makeTempAndCopy("testdata/testfile1.txt")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tmpDir)

	// Read the last 3 lines of the file.
	offset := 3

	var watchCount int
	watchResult := []string{}
	watchFunc := dnode.Callback(func(r *dnode.Partial) {
		watchCount++
		line := r.One().MustString()
		watchResult = append(watchResult, line)
	})

	_, err = remote.Tell("tail", &Request{
		Path:       tmpFile,
		Watch:      watchFunc,
		LineOffset: offset,
	})
	if err != nil {
		t.Fatal(err)
	}

	// Write some data to file
	file, err := os.OpenFile(tmpFile, os.O_APPEND|os.O_WRONLY, 0600)
	if err != nil {
		t.Fatal(err)
	}

	_, err = file.WriteString("DataA\n")
	if err != nil {
		t.Fatal(err)
	}
	_, err = file.WriteString("DataB\n")
	if err != nil {
		t.Fatal(err)
	}
	err = file.Close()
	if err != nil {
		t.Fatal(err)
	}

	fmt.Println("....Waiting for the results..")
	time.Sleep(time.Second * 5)

	// Read the file, and get the offset lines to compare against.
	sourceText, err := ioutil.ReadFile(tmpFile)
	if err != nil {
		t.Fatal(err)
	}
	// wait so the watch function picked up the tail changes
	offsetLines := strings.Split(strings.TrimSpace(string(sourceText)), "\n")
	// Adding 2 to the offset, because we want to get
	// the offset lines + our additions.
	offsetLines = offsetLines[len(offsetLines)-(offset+2):]
	if !reflect.DeepEqual(offsetLines, watchResult) {
		t.Errorf(
			"\nWatchFunc should callback with offset lines.\nWant: %#v\nGot : %#v\n",
			offsetLines, watchResult,
		)
	}

	if watchCount != offset+2 {
		t.Errorf(
			"WatchFunc should be called for each offsetline, and any new writes.\nWanted %d calls, Got %d calls",
			offset+2, watchCount,
		)
	}
}
示例#12
0
// TestMultipleTail compares two log.tail calls on a single file, and ensures that
// they both receive the same input.
func TestMultipleTail(t *testing.T) {
	tmpDir, tmpFile, err := makeTempAndCopy("testdata/testfile2.txt")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tmpDir)

	watchResult := []string{}
	watchFunc := dnode.Callback(func(r *dnode.Partial) {
		line := r.One().MustString()
		watchResult = append(watchResult, line)
	})

	_, err = remote.Tell("tail", &Request{
		Path:  tmpFile,
		Watch: watchFunc,
	})
	if err != nil {
		t.Fatal(err)
	}

	watchResult2 := []string{}
	watchFunc2 := dnode.Callback(func(r *dnode.Partial) {
		line := r.One().MustString()
		watchResult2 = append(watchResult2, line)
	})

	_, err = remote2.Tell("tail", &Request{
		Path:  tmpFile,
		Watch: watchFunc2,
	})
	if err != nil {
		t.Fatal(err)
	}

	time.Sleep(time.Second * 2)

	file, err := os.OpenFile(tmpFile, os.O_APPEND|os.O_WRONLY, 0600)
	if err != nil {
		t.Fatal(err)
	}
	defer file.Close()

	file.WriteString("Tail2\n")
	file.WriteString("Tail3\n")

	// wait so the watch function picked up the tail changes
	time.Sleep(time.Second)
	t.Logf("watchResult = %+v\n", watchResult)
	t.Logf("watchResult2 = %+v\n", watchResult2)

	// Now check the new two results
	if !reflect.DeepEqual(
		watchResult[len(watchResult)-2:],
		watchResult2[len(watchResult2)-2:],
	) {
		t.Errorf("\nWant: %v\nGot : %v\n",
			watchResult[len(watchResult)-2:],
			watchResult2[len(watchResult2)-2:],
		)
	}

	// Now let us disconnect the second connection, we should receive any new
	// changes for watchResult2 (From watchFunc2) anymore

	currentWatchLen := len(watchResult)
	currentWatch2Len := len(watchResult2)
	remote2.Close()

	// wait so onDisconnect get recognized on Kite
	time.Sleep(time.Second)

	file.WriteString("Tail4\n")
	file.WriteString("Tail5\n")

	// wait so the watch function picked up the tail changes
	time.Sleep(time.Second)

	if currentWatch2Len != len(watchResult2) {
		t.Errorf("WatchFunc2 is still triggered, got %d should have %d", len(watchResult2), currentWatch2Len)
	}

	if currentWatchLen+2 != len(watchResult) {
		t.Errorf("WatchFunc2 is not triggered, got %d should have %d", len(watchResult), currentWatchLen+2)
	}
}
示例#13
0
文件: handlers.go 项目: gotao/kite
func (k *Kontrol) handleRegister(r *kite.Request) (interface{}, error) {
	k.log.Info("Register request from: %s", r.Client.Kite)

	if r.Args.One().MustMap()["url"].MustString() == "" {
		return nil, errors.New("invalid url")
	}

	var args struct {
		URL string `json:"url"`
	}
	r.Args.One().MustUnmarshal(&args)
	if args.URL == "" {
		return nil, errors.New("empty url")
	}

	// Only accept requests with kiteKey because we need this info
	// for generating tokens for this kite.
	if r.Auth.Type != "kiteKey" {
		return nil, fmt.Errorf("Unexpected authentication type: %s", r.Auth.Type)
	}

	t, err := jwt.Parse(r.Auth.Key, kitekey.GetKontrolKey)
	if err != nil {
		return nil, err
	}

	publicKey, ok := t.Claims["kontrolKey"].(string)
	if !ok {
		return nil, errors.New("public key is not passed")
	}

	var keyPair *KeyPair
	var newKey bool

	// check if the key is valid and is stored in the key pair storage, if not
	// check if there is a new key we can use.
	keyPair, err = k.keyPair.GetKeyFromPublic(strings.TrimSpace(publicKey))
	if err != nil {
		newKey = true
		keyPair, err = k.pickKey(r)
		if err != nil {
			return nil, err // nothing to do here ..
		}
	}

	kiteURL := args.URL
	remote := r.Client

	if err := validateKiteKey(&remote.Kite); err != nil {
		return nil, err
	}

	value := &kontrolprotocol.RegisterValue{
		URL:   kiteURL,
		KeyID: keyPair.ID,
	}

	// Register first by adding the value to the storage. Return if there is
	// any error.
	if err := k.storage.Upsert(&remote.Kite, value); err != nil {
		k.log.Error("storage add '%s' error: %s", remote.Kite, err)
		return nil, errors.New("internal error - register")
	}

	every := onceevery.New(UpdateInterval)

	ping := make(chan struct{}, 1)
	closed := false

	updaterFunc := func() {
		for {
			select {
			case <-ping:
				k.log.Debug("Kite is active, got a ping %s", remote.Kite)
				every.Do(func() {
					k.log.Debug("Kite is active, updating the value %s", remote.Kite)
					err := k.storage.Update(&remote.Kite, value)
					if err != nil {
						k.log.Error("storage update '%s' error: %s", remote.Kite, err)
					}
				})
			case <-time.After(HeartbeatInterval + HeartbeatDelay):
				k.log.Debug("Kite didn't sent any heartbeat %s.", remote.Kite)
				every.Stop()
				closed = true
				return
			}
		}
	}
	go updaterFunc()

	heartbeatArgs := []interface{}{
		HeartbeatInterval / time.Second,
		dnode.Callback(func(args *dnode.Partial) {
			k.log.Debug("Kite send us an heartbeat. %s", remote.Kite)

			k.clientLocks.Get(remote.Kite.ID).Lock()
			defer k.clientLocks.Get(remote.Kite.ID).Unlock()

			select {
			case ping <- struct{}{}:
			default:
			}

			// seems we miss a heartbeat, so start it again!
			if closed {
				closed = false
				k.log.Warning("Updater was closed, but we are still getting heartbeats. Starting again %s",
					remote.Kite)

				// it might be removed because the ttl cleaner would come
				// before us, so try to add it again, the updater will than
				// continue to update it afterwards.
				k.storage.Upsert(&remote.Kite, value)
				go updaterFunc()
			}
		}),
	}

	// now trigger the remote kite so it sends us periodically an heartbeat
	remote.GoWithTimeout("kite.heartbeat", 4*time.Second, heartbeatArgs...)

	k.log.Info("Kite registered: %s", remote.Kite)

	remote.OnDisconnect(func() {
		k.log.Info("Kite disconnected: %s", remote.Kite)
		every.Stop()
	})

	// send response back to the kite, also send the new public Key if it's exist
	p := &protocol.RegisterResult{URL: args.URL}
	if newKey {
		p.PublicKey = keyPair.Public
	}

	return p, nil
}
示例#14
0
func (k *Klient) cmd(queryString, method, boxPath string) error {
	queryString, err := utils.QueryString(queryString)
	if err != nil {
		return err
	}

	k.Log.Debug("calling %q command on %q with %q", method, queryString, boxPath)

	kref, err := klient.ConnectTimeout(k.Kite, queryString, k.dialTimeout())
	if err != nil {
		k.Log.Debug("connecting to %q klient failed: %s", queryString, err)

		return err
	}

	done := make(chan error, 1)

	success := dnode.Callback(func(*dnode.Partial) {
		done <- nil
	})

	failure := dnode.Callback(func(r *dnode.Partial) {
		msg, err := r.One().String()
		if err != nil {
			err = errors.New("unknown failure")
		} else {
			err = errors.New(msg)
		}
		done <- err
	})

	lost := make(chan struct{})
	beat := make(chan struct{})
	stop := make(chan struct{})
	defer close(stop)

	go func() {
		// Heartbeat timer is initially stopped since at this
		// point we do not know whether klient supports heartbeats.
		// On first heartbeat we activate the timer and set the ch.
		var (
			t  *time.Timer
			ch <-chan time.Time
		)

		for {
			select {
			case <-stop:
				return
			case <-ch:
				lost <- struct{}{}
			case <-beat:
				if t == nil {
					t = time.NewTimer(defaultDialTimeout)
					ch = t.C
					defer t.Stop()
				}

				t.Reset(defaultDialTimeout)
			}
		}
	}()

	heartbeat := dnode.Callback(func(r *dnode.Partial) {
		beat <- struct{}{}
	})

	req := &Command{
		FilePath:  boxPath,
		Success:   success,
		Failure:   failure,
		Heartbeat: heartbeat,
	}

	if k.Debug {
		log := k.Log.New(method)
		req.Output = dnode.Callback(func(r *dnode.Partial) {
			log.Debug("%s", r.One().MustString())
		})
	}

	if _, err = kref.Client.TellWithTimeout(method, k.timeout(), req); err != nil {
		return errors.New("sending request to klient failed: " + err.Error())
	}

	select {
	case err := <-done:
		return err
	case <-time.After(k.timeout()):
		return fmt.Errorf("timed out calling %q on %q", method, queryString)
	case <-lost:
		return errors.New("connection to your KD Daemon was lost due to inactivity")
	}
}
示例#15
0
文件: fs.go 项目: koding/koding
func ReadDirectory(r *kite.Request) (interface{}, error) {
	var params ReadDirectoryOptions
	if r.Args == nil {
		return nil, errors.New("arguments are not passed")
	}

	if r.Args.One().Unmarshal(&params) != nil || params.Path == "" {
		log.Println("params", params)
		return nil, errors.New("{ path: [string], onChange: [function]}")
	}

	response := make(map[string]interface{})

	if params.OnChange.IsValid() {
		onceBody := func() { startWatcher() }
		go once.Do(onceBody)

		var eventType string
		var fileEntry *FileEntry

		changer := func(ev fsnotify.Event) {
			switch ev.Op {
			case fsnotify.Create:
				eventType = "added"
				fileEntry, _ = getInfo(ev.Name)
			case fsnotify.Remove, fsnotify.Rename:
				eventType = "removed"
				fileEntry = NewFileEntry(path.Base(ev.Name), ev.Name)
			}

			event := map[string]interface{}{
				"event": eventType,
				"file":  fileEntry,
			}

			// send back the result to the client
			params.OnChange.Call(event)
			return
		}

		// first check if are watching the path, if not send it to the watcher
		mu.Lock()
		userCallbacks, ok := watchCallbacks[params.Path]
		if !ok {
			// notify new paths to the watcher
			newPaths <- params.Path
			userCallbacks = make(map[string]func(fsnotify.Event), 0)
		}

		// now add the callback to the specific user.
		_, ok = userCallbacks[r.Username]
		if !ok {
			userCallbacks[r.Username] = changer
			watchCallbacks[params.Path] = userCallbacks
		}
		mu.Unlock()

		removePath := func() {
			mu.Lock()
			userCallbacks, ok := watchCallbacks[params.Path]
			if ok {
				// delete the user callback function for this path
				delete(userCallbacks, r.Username)

				// now check if there is any user left back. If we have removed
				// all users, we should also stop the watcher from watching the
				// path. So notify the watcher to stop watching the path and
				// also remove it from the callbacks map
				if len(userCallbacks) == 0 {
					delete(watchCallbacks, params.Path)
					oldPaths <- params.Path
				}
			}
			mu.Unlock()
		}

		// remove the user or path when the remote client disconnects
		r.Client.OnDisconnect(removePath)

		// this callback is called whenever we receive a 'stopWatching' from the client
		response["stopWatching"] = dnode.Callback(func(r *dnode.Partial) {
			removePath()
		})
	}

	files, err := readDirectory(params.Path, params.Recursive, params.IgnoreFolders)
	if err != nil {
		return nil, err
	}

	response["files"] = files
	return response, nil
}
示例#16
0
func (k *Kontrol) HandleRegister(r *kite.Request) (interface{}, error) {
	k.log.Info("Register request from: %s", r.Client.Kite)

	// Only accept requests with kiteKey because we need this info
	// for generating tokens for this kite.
	if r.Auth.Type != "kiteKey" {
		return nil, fmt.Errorf("Unexpected authentication type: %s", r.Auth.Type)
	}

	var args struct {
		URL string `json:"url"`
	}

	if err := r.Args.One().Unmarshal(&args); err != nil {
		return nil, err
	}

	if args.URL == "" {
		return nil, errors.New("empty url")
	}

	if _, err := url.Parse(args.URL); err != nil {
		return nil, fmt.Errorf("invalid register URL: %s", err)
	}

	res := &protocol.RegisterResult{
		URL: args.URL,
	}

	ex := &kitekey.Extractor{
		Claims: &kitekey.KiteClaims{},
	}

	t, err := jwt.ParseWithClaims(r.Auth.Key, ex.Claims, ex.Extract)
	if err != nil {
		return nil, err
	}

	var keyPair *KeyPair
	var origKey = ex.Claims.KontrolKey

	// check if the key is valid and is stored in the key pair storage, if not
	// check if there is a new key we can use.
	keyPair, res.KiteKey, err = k.getOrUpdateKeyPub(ex.Claims.KontrolKey, t, r)
	if err != nil {
		return nil, err
	}

	if origKey != keyPair.Public {
		// NOTE(rjeczalik): updates public key for old kites, new kites
		// expect kite key to be updated
		res.PublicKey = keyPair.Public
	}

	if err := validateKiteKey(&r.Client.Kite); err != nil {
		return nil, err
	}

	value := &kontrolprotocol.RegisterValue{
		URL:   args.URL,
		KeyID: keyPair.ID,
	}

	// Register first by adding the value to the storage. Return if there is
	// any error.
	if err := k.storage.Upsert(&r.Client.Kite, value); err != nil {
		k.log.Error("storage add '%s' error: %s", &r.Client.Kite, err)
		return nil, errors.New("internal error - register")
	}

	every := onceevery.New(UpdateInterval)

	ping := make(chan struct{}, 1)
	closed := int32(0)

	kiteCopy := r.Client.Kite

	updaterFunc := func() {
		for {
			select {
			case <-k.closed:
				return
			case <-ping:
				k.log.Debug("Kite is active, got a ping %s", &kiteCopy)
				every.Do(func() {
					k.log.Debug("Kite is active, updating the value %s", &kiteCopy)
					err := k.storage.Update(&kiteCopy, value)
					if err != nil {
						k.log.Error("storage update '%s' error: %s", &kiteCopy, err)
					}
				})
			case <-time.After(HeartbeatInterval + HeartbeatDelay):
				k.log.Debug("Kite didn't sent any heartbeat %s.", &kiteCopy)
				atomic.StoreInt32(&closed, 1)
				return
			}
		}
	}

	go updaterFunc()

	heartbeatArgs := []interface{}{
		HeartbeatInterval / time.Second,
		dnode.Callback(func(args *dnode.Partial) {
			k.log.Debug("Kite send us an heartbeat. %s", &kiteCopy)

			k.clientLocks.Get(kiteCopy.ID).Lock()
			defer k.clientLocks.Get(kiteCopy.ID).Unlock()

			select {
			case ping <- struct{}{}:
			default:
			}

			// seems we miss a heartbeat, so start it again!
			if atomic.CompareAndSwapInt32(&closed, 1, 0) {
				k.log.Warning("Updater was closed, but we are still getting heartbeats. Starting again %s", &kiteCopy)

				// it might be removed because the ttl cleaner would come
				// before us, so try to add it again, the updater will than
				// continue to update it afterwards.
				k.storage.Upsert(&kiteCopy, value)
				go updaterFunc()
			}
		}),
	}

	// now trigger the remote kite so it sends us periodically an heartbeat
	resp := r.Client.GoWithTimeout("kite.heartbeat", 4*time.Second, heartbeatArgs...)

	go func() {
		if err := (<-resp).Err; err != nil {
			k.log.Error("failed requesting heartbeats from %q kite: %s", kiteCopy.Name, err)
		}
	}()

	k.log.Info("Kite registered: %s", &r.Client.Kite)

	clientKite := r.Client.Kite.String()

	r.Client.OnDisconnect(func() {
		k.log.Info("Kite disconnected: %s", clientKite)
	})

	return res, nil
}