Beispiel #1
0
func pipeThenClose(src, dst *net.TCPConn, finishChannel chan bool) {
	defer func() {
		src.CloseRead()
		dst.CloseWrite()
		finishChannel <- true
	}()

	buf := asocks.GetBuffer()
	defer asocks.GiveBuffer(buf)

	for {
		src.SetReadDeadline(time.Now().Add(60 * time.Second))
		n, err := src.Read(buf)
		if n > 0 {
			data := buf[0:n]
			encodeData(data)
			if _, err := dst.Write(data); err != nil {
				break
			}
		}
		if err != nil {
			break
		}
	}
}
Beispiel #2
0
// Copy from WebSocket to socket and vice versa.
func proxy(local *net.TCPConn, conn *websocketConn) {
	var wg sync.WaitGroup

	wg.Add(2)

	go func() {
		_, err := io.Copy(conn, local)
		if err != nil {
			Log("error copying ORPort to WebSocket")
		}
		local.CloseRead()
		conn.Close()
		wg.Done()
	}()

	go func() {
		_, err := io.Copy(local, conn)
		if err != nil {
			Log("error copying WebSocket to ORPort")
		}
		local.CloseWrite()
		conn.Close()
		wg.Done()
	}()

	wg.Wait()
}
Beispiel #3
0
func proxy(cliConn *net.TCPConn, rAddr *net.TCPAddr) error {
	srvConn, err := net.DialTCP("tcp", nil, rAddr)
	if err != nil {
		cliConn.Close()
		return err
	}
	defer srvConn.Close()

	// channels to wait on the close event for each connection
	serverClosed := make(chan struct{}, 1)
	clientClosed := make(chan struct{}, 1)

	go broker(srvConn, cliConn, clientClosed)
	go broker(cliConn, srvConn, serverClosed)

	var waitFor chan struct{}
	select {
	case <-clientClosed:
		// the client closed first
		srvConn.SetLinger(0)
		srvConn.CloseRead()
		waitFor = serverClosed
	case <-serverClosed:
		cliConn.CloseRead()
		waitFor = clientClosed
	}

	<-waitFor
	return nil
}
Beispiel #4
0
// Pipe starts bridging with two tcp connection
func Pipe(dst *net.TCPConn, src *net.TCPConn, f *func([]byte) []byte) error {
	defer src.CloseRead()
	defer dst.CloseWrite()

	rb := make([]byte, 4096)

	for {
		rsize, err := src.Read(rb)
		if err != nil {
			if isRecoverable(err) {
				continue
			}
			return err
		}

		var wb []byte
		if f != nil {
			wb = (*f)(rb[:rsize])
		} else {
			wb = rb[:rsize]
		}
		wWrote := 0
		wTotal := len(wb)
		for wWrote != wTotal {
			wSize, err := dst.Write(wb[wWrote:])
			wWrote += wSize
			if err != nil {
				if isRecoverable(err) {
					continue
				}
				return err
			}
		}
	}
}
Beispiel #5
0
func TCProxy(srvConn, cliConn *net.TCPConn) {
	// channels to wait on the close event for each connection
	serverClosed := make(chan struct{}, 1)
	clientClosed := make(chan struct{}, 1)

	go broker(srvConn, cliConn, clientClosed)
	go broker(cliConn, srvConn, serverClosed)

	// wait for one half of the proxy to exit, then trigger a shutdown of the
	// other half by calling CloseRead(). This will break the read loop in the
	// broker and allow us to fully close the connection cleanly without a
	// "use of closed network connection" error.
	var waitFor chan struct{}
	select {
	case <-clientClosed:
		// the client closed first and any more packets from the server aren't
		// useful, so we can optionally SetLinger(0) here to recycle the port
		// faster.
		srvConn.SetLinger(0)
		srvConn.CloseRead()
		waitFor = serverClosed
	case <-serverClosed:
		cliConn.CloseRead()
		waitFor = clientClosed
	}

	// Wait for the other connection to close.
	// This "waitFor" pattern isn't required, but gives us a way to track the
	// connection and ensure all copies terminate correctly; we can trigger
	// stats on entry and deferred exit of this function.
	<-waitFor
}
Beispiel #6
0
func tcpShim(inbound, outbound *net.TCPConn, connEvent *events.Connection, eh events.Handler) error {
	eh.Connection(connEvent)
	ch := make(chan error, 1)
	go func() {
		var err error
		defer func() { ch <- err }()
		_, err = io.Copy(inbound, outbound)
		outbound.CloseRead()
		inbound.CloseWrite()
	}()

	_, err1 := io.Copy(outbound, inbound)
	inbound.CloseRead()
	outbound.CloseWrite()

	err2 := <-ch
	inbound.Close()
	outbound.Close()

	if err1 != nil {
		return err1
	} else {
		return err2
	}
}
Beispiel #7
0
func (proxy *Proxy) handleConnection(in *net.TCPConn) error {
	defer in.Close()

	plainOut, err := proxy.connectionFactory()
	if err != nil {
		log.Print("could no create outgoing connection", err)
		return err
	}
	out := plainOut.(*net.TCPConn)
	defer out.Close()

	serverClosed := make(chan struct{}, 1)
	clientClosed := make(chan struct{}, 1)

	go broker(out, in, clientClosed)
	go broker(in, out, serverClosed)

	var waitFor chan struct{}

	select {
	case <-clientClosed:
		// the client closed first and any more packets from the server aren't
		// useful, so we can optionally SetLinger(0) here to recycle the port
		// faster.
		out.SetLinger(0)
		out.CloseRead()
		waitFor = serverClosed
	case <-serverClosed:
		in.CloseRead()
		waitFor = clientClosed
	}

	<-waitFor
	return nil
}
Beispiel #8
0
func copyAndClose(ctx *ProxyCtx, dst, src *net.TCPConn) {
	if _, err := io.Copy(dst, src); err != nil {
		ctx.Warnf("Error copying to client: %s", err)
	}

	dst.CloseWrite()
	src.CloseRead()
}
Beispiel #9
0
func (self *Session) ProxyTCP(conn *net.TCPConn, bufferSize int) {
	writeClosed := make(chan struct{})
	readClosed := make(chan struct{})
	go func() {
		<-writeClosed
		<-readClosed
		conn.Close()
		self.log("proxy closed")
	}()
	// to conn
	go func() {
		var once sync.Once
		for {
			select {
			case msg := <-self.Message:
				switch msg.Tag {
				case DATA:
					_, err := conn.Write(msg.Data)
					if err != nil {
						self.log("proxy write error %v", err)
						go once.Do(func() {
							<-time.After(time.Second * 5)
							conn.CloseWrite()
							close(writeClosed)
						})
						self.AbortRead()
					}
				case STATE:
					switch msg.State {
					case STATE_FINISH_SEND, STATE_ABORT_SEND:
						go once.Do(func() {
							<-time.After(time.Second * 5)
							conn.CloseWrite()
							close(writeClosed)
						})
					case STATE_ABORT_READ:
					case STATE_FINISH_READ:
					}
				}
			case <-self.Stopped:
				return
			}
		}
	}()
	// from conn
	for {
		buf := make([]byte, bufferSize)
		n, err := conn.Read(buf)
		if err != nil {
			conn.CloseRead()
			close(readClosed)
			self.FinishSend()
			return
		}
		self.Send(buf[:n])
	}
}
Beispiel #10
0
func copyBytes(in, out *net.TCPConn) {
	glog.Infof("Copying from %v <-> %v <-> %v <-> %v",
		in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
	if _, err := io.Copy(in, out); err != nil {
		glog.Errorf("I/O error: %v", err)
	}
	in.CloseRead()
	out.CloseWrite()
}
Beispiel #11
0
func sendDest(source *net.TCPConn, dest []*DestList) {
	buffer := make([]byte, MIN_BUF_SIZE)
	n, _ := source.Read(buffer)
	// log.Println("recv data len:=>", n)
	for _, d := range dest {
		d.Conn.Write(buffer[:n])
		d.Conn.CloseWrite()
	}
	source.CloseRead()
}
// Copy one side of the socket, doing a half close when it has
// finished
func copy_half(backend *Backend, dst, src *net.TCPConn, wg *sync.WaitGroup) {
	defer wg.Done()
	transferred, err := io.Copy(dst, src)
	backend.transferred += transferred
	if err != nil {
		log.Printf("Error: %s", err)
	}
	dst.CloseWrite()
	src.CloseRead()
}
Beispiel #13
0
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
	defer wg.Done()
	glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
	n, err := io.Copy(dest, src)
	if err != nil {
		glog.Errorf("I/O error: %v", err)
	}
	glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
	dest.CloseWrite()
	src.CloseRead()
}
Beispiel #14
0
func CopyBytes(in, out *net.TCPConn) {
	log.Printf("Copying from %v <-> %v <-> %v <-> %v",
		in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
	_, err := io.Copy(in, out)
	if err != nil && err != io.EOF {
		log.Printf("I/O error: %v", err)
	}

	in.CloseRead()
	out.CloseWrite()
}
Beispiel #15
0
func copyBytes(logLevel int, direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
	defer wg.Done()
	if logLevel > 0 {
		log.Printf("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
	}
	n, err := io.Copy(dest, src)
	if err != nil {
		log.Printf("I/O error: %v", err)
	}
	if logLevel > 0 {
		log.Printf("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
	}
	dest.CloseWrite()
	src.CloseRead()
}
Beispiel #16
0
func (p *Proxy) proxyTCPStream(ctx context.Context, src *net.TCPConn) {
	srcRemoteAddr := src.RemoteAddr().(*net.TCPAddr)
	srcLocalAddr := src.LocalAddr().(*net.TCPAddr)

	route := p.routes.GetTable().Lookup(protocols.TCP,
		srcRemoteAddr.IP, srcLocalAddr.IP,
		uint16(srcRemoteAddr.Port), uint16(srcLocalAddr.Port))
	if route == nil {
		src.Close()
		return
	}

	go func() {
		dstAddr := net.TCPAddr{
			IP:   route.Outbound.DstIP,
			Port: int(route.Outbound.DstPort),
		}

		dst, err := net.DialTCP("tcp", nil, &dstAddr)
		if err != nil {
			src.Close()
			return
		}

		dst.SetKeepAlivePeriod(10 * time.Second)
		src.SetKeepAlivePeriod(10 * time.Second)

		go func() {
			<-ctx.Done()
			src.Close()
			dst.Close()
		}()

		go func() {
			defer dst.CloseWrite()
			defer src.CloseRead()
			io.Copy(dst, src)
		}()

		go func() {
			defer src.CloseWrite()
			defer dst.CloseRead()
			io.Copy(src, dst)
		}()
	}()
}
Beispiel #17
0
func (a *Agent) proxyStream(from, to *net.TCPConn) {
	for {
		buf := make([]byte, 1024)
		_, err := from.Read(buf)
		if err != nil {
			if err != io.EOF {
				log.Printf("[err] error reading from %s to %s: %s", from, to, err)
			}
			from.CloseRead()
			to.CloseWrite()
			return
		}

		_, err = to.Write(buf)
		if err != nil {
			log.Printf("[err] error writing from %s to %s: %s", from, to, err)
			return
		}
	}
}
Beispiel #18
0
func (fwd *forwarding) tcpShim(inbound, outbound *net.TCPConn) error {
	ch := make(chan error, 1)
	go func() {
		var err error
		defer func() { ch <- err }()
		_, err = io.Copy(inbound, outbound)
		outbound.CloseRead()
		inbound.CloseWrite()
	}()

	_, err1 := io.Copy(outbound, inbound)
	inbound.CloseRead()
	outbound.CloseWrite()

	err2 := <-ch
	inbound.Close()
	outbound.Close()

	if err1 != nil {
		return err1
	} else {
		return err2
	}
}
Beispiel #19
0
// proxy brokers a connection from src to dst
func proxy(dst, src *net.TCPConn) error {
	// channels to wait on the close event for each connection
	serverClosed := make(chan struct{}, 1)
	clientClosed := make(chan struct{}, 1)
	errors := make(chan error, 1)

	go broker(dst, src, clientClosed, errors)
	go broker(src, dst, serverClosed, errors)

	// wait for one half of the proxy to exit, then trigger a shutdown of the
	// other half by calling CloseRead(). This will break the read loop in the
	// broker and allow us to fully close the connection cleanly without a
	// "use of closed network connection" error.
	var waitFor chan struct{}
	select {
	case <-clientClosed:
		// the client closed first and any more packets from the server aren't
		// useful, so we can optionally SetLinger(0) here to recycle the port
		// faster.
		dst.SetLinger(0)
		dst.CloseRead()
		waitFor = serverClosed
	case <-serverClosed:
		src.CloseRead()
		waitFor = clientClosed
	case err := <-errors:
		src.CloseRead()
		dst.SetLinger(0)
		dst.CloseRead()
		return err
	}

	// Wait for the other connection to close.
	<-waitFor
	return nil
}
Beispiel #20
0
func (p *Proxy) proxyConn(conn *net.TCPConn) {
	p.connMutex.Lock()
	closeConns := p.closeConns
	destAddr := p.destAddr
	p.connMutex.Unlock()
	defer func() {
		log.Debugf("closing source connection: %v", conn)
		conn.Close()
	}()
	defer conn.Close()

	if destAddr == nil {
		return
	}

	destConn, err := net.DialTCP("tcp", nil, p.destAddr)
	if err != nil {
		conn.Close()
		return
	}
	defer func() {
		log.Debugf("closing destination connection: %v", destConn)
		destConn.Close()
	}()

	var wg sync.WaitGroup
	end := make(chan bool)
	wg.Add(1)
	go func() {
		defer wg.Done()
		n, err := io.Copy(destConn, conn)
		if err != nil {
		}
		conn.Close()
		destConn.CloseRead()
		log.Debugf("ending. copied %d bytes from source to dest", n)
	}()
	wg.Add(1)
	go func() {
		defer wg.Done()
		n, err := io.Copy(conn, destConn)
		if err != nil {
		}
		destConn.Close()
		conn.CloseRead()
		log.Debugf("ending. copied %d bytes from dest to source", n)
	}()

	go func() {
		wg.Wait()
		end <- true
	}()

	select {
	case <-end:
		log.Debugf("all io copy goroutines done")
		return
	case <-closeConns:
		log.Debugf("closing all connections")
		return
	}
}
Beispiel #21
0
func forward(source *net.TCPConn, dest *net.TCPConn) {
	defer dest.CloseWrite()
	defer source.CloseRead()
	io.Copy(dest, source)
}
Beispiel #22
0
func (thisServer *server) Handle(connection *net.TCPConn, row int) {
	var (
		fail          = fmt.Errorf("failed: (%s).server.Handle((%s<->%s), %d)", thisServer.relatedNode.Address().String(), connection.LocalAddr().String(), connection.RemoteAddr().String(), row)
		queryAsBuffer bytes.Buffer
		err           error
	)
	defer func() {
		if err := connection.Close(); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
		}
	}()
	if _, err := io.Copy(&queryAsBuffer, connection); err != nil {
		log.Printf("%s:\n%s", fail.Error(), err.Error())
		return
	}
	if err = connection.CloseRead(); err != nil {
		log.Printf("%s:\n%s", fail.Error(), err.Error())
	}
	query := Query{}
	if queryAsBuffer.Len() > 0 {
		if err = json.Unmarshal(queryAsBuffer.Bytes(), &query); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
	}
	response := Response{Method: query.Method}
	var (
		queryKey                       *key
		queryAddress, resultingAddress *net.TCPAddr
		resultingBytes                 []byte
	)
	switch query.Method {
	case "BaseKey":
		response.Result = thisServer.relatedNode.BaseKey().String()
	case "Name":
		response.Result = thisServer.relatedNode.Name()
	case "FindSuccessor":
		if queryKey, err = makeNewKey(query.Argument); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
		if resultingAddress, err = thisServer.relatedNode.FindSuccessor(queryKey); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
		response.Result = resultingAddress.String()
	case "Predecessor":
		if thisServer.relatedNode.Predecessor() != nil {
			response.Result = thisServer.relatedNode.Predecessor().String()
		}
	case "Successor":
		if thisServer.relatedNode.Successor() != nil {
			response.Result = thisServer.relatedNode.Successor().String()
		}
	case "ClosestPrecedingFinger":
		if queryKey, err = makeNewKey(query.Argument); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
		if resultingAddress, err = thisServer.relatedNode.ClosestPrecedingFinger(queryKey); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
		response.Result = resultingAddress.String()
	case "Notify":
		if queryAddress, err = query.parseAddress(); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
		}
		if err = thisServer.relatedNode.BeNotified(queryAddress); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
		}
	case "Store":
		data := []byte(query.Argument)
		hashOfData := hash(data)
		thisServer.relatedNode.dataStorage[hashOfData.String()] = data
		if thisServer.relatedNode.Predecessor() != nil && thisServer.relatedNode.Predecessor() != thisServer.relatedNode.Address() {
			sendBackupToPredecessor(thisServer.relatedNode.Address(), thisServer.relatedNode.Predecessor(), data)
		}
		if thisServer.relatedNode.Successor() != nil && thisServer.relatedNode.Successor() != thisServer.relatedNode.Address() {
			sendBackupToSuccessor(thisServer.relatedNode.Address(), thisServer.relatedNode.Successor(), data)
		}
	case "Delete":
		hashOfData := query.Argument
		if err = deleteBackupAtPredecessor(thisServer.relatedNode.Address(), thisServer.relatedNode.Predecessor(), hashOfData); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
		if err = deleteBackupAtSuccessor(thisServer.relatedNode.Address(), thisServer.relatedNode.Successor(), hashOfData); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
		delete(thisServer.relatedNode.dataStorage, hashOfData)
	case "BackupToSuccessor":
		var supposedPredecessor *net.TCPAddr
		if supposedPredecessor, err = net.ResolveTCPAddr("tcp", query.Source); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
		if thisServer.relatedNode.Predecessor() == nil || supposedPredecessor.String() != thisServer.relatedNode.Predecessor().String() {
			thisServer.relatedNode.BeNotified(supposedPredecessor)
		}
		if supposedPredecessor.String() == thisServer.relatedNode.Predecessor().String() {
			data := []byte(query.Argument)
			hashOfData := hash(data)
			thisServer.relatedNode.backupStorageOfPredecessorsData[hashOfData.String()] = data
		}
	case "DeleteBackupAtSuccessor":
		var supposedPredecessor *net.TCPAddr
		if supposedPredecessor, err = net.ResolveTCPAddr("tcp", query.Source); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
		if thisServer.relatedNode.Predecessor() == nil || supposedPredecessor.String() != thisServer.relatedNode.Predecessor().String() {
			thisServer.relatedNode.BeNotified(supposedPredecessor)
		}
		if supposedPredecessor.String() == thisServer.relatedNode.Predecessor().String() {
			hashOfData := query.Argument
			delete(thisServer.relatedNode.backupStorageOfPredecessorsData, hashOfData)
		}
	case "BackupToPredecessor":
		//			var supposedSuccessor *net.TCPAddr;
		//			if supposedSuccessor, err = net.ResolveTCPAddr("tcp", query.Source); err != nil {
		//				log.Printf("%s:\n%s", fail.Error(), err.Error())
		//				return
		//			}
		//			if supposedSuccessor.String() == thisServer.relatedNode.Predecessor().String() {
		data := []byte(query.Argument)
		hashOfData := hash(data)
		thisServer.relatedNode.backupStorageOfSuccessorsData[hashOfData.String()] = data
		//			}
	case "DeleteBackupAtPredecessor":
		//			var supposedSuccessor *net.TCPAddr;
		//			if supposedSuccessor, err = net.ResolveTCPAddr("tcp", query.Source); err != nil {
		//				log.Printf("%s:\n%s", fail.Error(), err.Error())
		//				return
		//			}
		//			if supposedSuccessor.String() == thisServer.relatedNode.Predecessor().String() {
		hashOfData := query.Argument
		delete(thisServer.relatedNode.backupStorageOfSuccessorsData, hashOfData)
		//			}
	case "Download":
		hashOfData := query.Argument
		if elem, ok := thisServer.relatedNode.dataStorage[hashOfData]; ok {
			response.Result = string(elem[:])
		}
	case "DownloadAlias":
		hashOfName := query.Argument
		if elem, ok := thisServer.relatedNode.aliasStorage[hashOfName]; ok {
			response.Result = string(elem[:])
		}
	case "StoreAlias":
		hashOfName := strings.SplitN(query.Argument, "::::", 2)[0]
		hashOfData := strings.SplitN(query.Argument, "::::", 2)[1]
		thisServer.relatedNode.aliasStorage[hashOfName] = hashOfData
		if thisServer.relatedNode.Predecessor() != nil && thisServer.relatedNode.Predecessor() != thisServer.relatedNode.Address() {
			sendAliasBackupToPredecessor(thisServer.relatedNode.Address(), thisServer.relatedNode.Predecessor(), query.Argument)
		}
		if thisServer.relatedNode.Successor() != nil && thisServer.relatedNode.Successor() != thisServer.relatedNode.Address() {
			sendAliasBackupToSuccessor(thisServer.relatedNode.Address(), thisServer.relatedNode.Successor(), query.Argument)
		}
	case "DeleteAlias":
		hashOfName := query.Argument
		if err = deleteAliasAtPredecessor(thisServer.relatedNode.Address(), thisServer.relatedNode.Predecessor(), hashOfName); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
		if err = deleteAliasAtSuccessor(thisServer.relatedNode.Address(), thisServer.relatedNode.Successor(), hashOfName); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
		delete(thisServer.relatedNode.aliasStorage, hashOfName)
	case "AliasBackupToPredecessor":
		//			var supposedSuccessor *net.TCPAddr;
		//			if supposedSuccessor, err = net.ResolveTCPAddr("tcp", query.Source); err != nil {
		//				log.Printf("%s:\n%s", fail.Error(), err.Error())
		//				return
		//			}
		//			if supposedSuccessor.String() == thisServer.relatedNode.Predecessor().String() {
		hashOfName := strings.SplitN(query.Argument, "::::", 2)[0]
		hashOfData := strings.SplitN(query.Argument, "::::", 2)[1]
		thisServer.relatedNode.backupStorageOfSuccessorsAliases[hashOfName] = hashOfData
		//			}
	case "DeleteAliasAtSuccessor":
		var supposedPredecessor *net.TCPAddr
		if supposedPredecessor, err = net.ResolveTCPAddr("tcp", query.Source); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
		if thisServer.relatedNode.Predecessor() == nil || supposedPredecessor.String() != thisServer.relatedNode.Predecessor().String() {
			thisServer.relatedNode.BeNotified(supposedPredecessor)
		}
		if supposedPredecessor.String() == thisServer.relatedNode.Predecessor().String() {
			hashOfName := query.Argument
			delete(thisServer.relatedNode.backupStorageOfPredecessorsAliases, hashOfName)
		}
	case "AliasBackupToSuccessor":
		var supposedPredecessor *net.TCPAddr
		if supposedPredecessor, err = net.ResolveTCPAddr("tcp", query.Source); err != nil {
			log.Printf("%s:\n%s", fail.Error(), err.Error())
			return
		}
		if thisServer.relatedNode.Predecessor() == nil || supposedPredecessor.String() != thisServer.relatedNode.Predecessor().String() {
			thisServer.relatedNode.BeNotified(supposedPredecessor)
		}
		if supposedPredecessor.String() == thisServer.relatedNode.Predecessor().String() {
			hashOfName := strings.SplitN(query.Argument, "::::", 2)[0]
			hashOfData := strings.SplitN(query.Argument, "::::", 2)[1]
			thisServer.relatedNode.backupStorageOfPredecessorsAliases[hashOfName] = hashOfData
		}
	case "DeleteAliasAtPredecessor":
		//			var supposedSuccessor *net.TCPAddr;
		//			if supposedSuccessor, err = net.ResolveTCPAddr("tcp", query.Source); err != nil {
		//				log.Printf("%s:\n%s", fail.Error(), err.Error())
		//				return
		//			}
		//			if supposedSuccessor.String() == thisServer.relatedNode.Predecessor().String() {
		hashOfName := query.Argument
		delete(thisServer.relatedNode.backupStorageOfSuccessorsAliases, hashOfName)
		//			}
	default:
		errHere := fmt.Errorf("failed: method not found: %s", query.Method)
		log.Printf("%s:\n%s", fail.Error(), errHere.Error())
	}
	if resultingBytes, err = json.Marshal(response); err != nil {
		log.Printf("%s:\n%s", fail.Error(), err.Error())
		return
	}
	if _, err = connection.Write(resultingBytes); err != nil {
		log.Printf("%s:\n%s", fail.Error(), err.Error())
		return
	}
	return
}
func proxy(local *net.TCPConn, ws *websocket.Conn) {
	var wg sync.WaitGroup

	wg.Add(2)

	// Local-to-WebSocket read loop.
	go func() {
		buf := make([]byte, bufSiz)
		var err error
		for {
			n, er := local.Read(buf[:])
			if n > 0 {
				ew := websocket.Message.Send(ws, buf[:n])
				if ew != nil {
					err = ew
					break
				}
			}
			if er != nil {
				err = er
				break
			}
		}
		if err != nil && err != io.EOF {
			Log("%s", err)
		}
		local.CloseRead()
		ws.Close()

		wg.Done()
	}()

	// WebSocket-to-local read loop.
	go func() {
		var buf []byte
		var err error
		for {
			er := websocket.Message.Receive(ws, &buf)
			if er != nil {
				err = er
				break
			}
			n, ew := local.Write(buf)
			if ew != nil {
				err = ew
				break
			}
			if n != len(buf) {
				err = io.ErrShortWrite
				break
			}
		}
		if err != nil && err != io.EOF {
			Log("%s", err)
		}
		local.CloseWrite()
		ws.Close()

		wg.Done()
	}()

	wg.Wait()
}
Beispiel #24
0
func copyBytes(dest, src *net.TCPConn, wg *sync.WaitGroup) {
	defer wg.Done()
	io.Copy(dest, src)
	dest.CloseWrite()
	src.CloseRead()
}