func (srv *Server) HandleClient(raw io.ReadWriteCloser) { defer raw.Close() /*raw, err := kiss.LLObfsServerHandshake(srv.prv.PublicKey(), raw) if err != nil { kilog.Debug("directory: client failed LLObfs handshake: %v", err.Error()) return }*/ var theirKey natrium.EdDSAPublic // "verifier" that copies down their public key copier := func(haha natrium.EdDSAPublic) bool { theirKey = haha return true } sock, err := kiss.KiSSNamedHandshake(srv.prv, copier, raw) if err != nil { kilog.Debug("directory: client failed KiSS handshake: %v", err.Error()) return } defer sock.Close() var request clntRequest err = struc.Unpack(sock, &request) if err != nil { kilog.Debug("directory: client sent malformed request: %v", err.Error()) return } //fmt.Println(request) srv.rqDispatch(sock, request) }
// ServeConn runs a single connection. // // ServeConn blocks, serving the connection until the client hangs up. func (s *Server) ServeConn(conn io.ReadWriteCloser) { // First create the yamux server to wrap this connection mux, err := yamux.Server(conn, nil) if err != nil { conn.Close() log.Printf("[ERR] plugin: %s", err) return } // Accept the control connection control, err := mux.Accept() if err != nil { mux.Close() log.Printf("[ERR] plugin: %s", err) return } // Create the broker and start it up broker := newMuxBroker(mux) go broker.Run() // Use the control connection to build the dispenser and serve the // connection. server := rpc.NewServer() server.RegisterName("Dispenser", &dispenseServer{ ProviderFunc: s.ProviderFunc, ProvisionerFunc: s.ProvisionerFunc, broker: broker, }) server.ServeConn(control) }
func doLoris(conn io.ReadWriteCloser, victimUri *url.URL, activeConnectionsCh chan<- int, requestHeader []byte) { defer conn.Close() if _, err := conn.Write(requestHeader); err != nil { log.Printf("Cannot write requestHeader=[%v]: [%s]\n", requestHeader, err) return } activeConnectionsCh <- 1 defer func() { activeConnectionsCh <- -1 }() readerStopCh := make(chan int, 1) go nullReader(conn, readerStopCh) for i := 0; i < *contentLength; i++ { select { case <-readerStopCh: return case <-time.After(*sleepInterval): } if _, err := conn.Write(sharedWriteBuf); err != nil { log.Printf("Error when writing %d byte out of %d bytes: [%s]\n", i, *contentLength, err) return } } }
func getflag(c io.ReadWriteCloser) { defer c.Close() var flag_id string fmt.Fprintln(c, "flag_id: ") _, err := fmt.Fscanln(c, &flag_id) if err != nil { return } var cookie string fmt.Fprintln(c, "token_id: ") _, err = fmt.Fscanln(c, &cookie) if err != nil { return } entry := db.Get(flag_id) if entry == nil { fmt.Fprintln(c, "flagval: no_entry_exists") log.Println("getflag: request for non-existant entry") } else if cookie == entry[0] { fmt.Fprintln(c, "flagval:", entry[1]) log.Println("getflag: got") } else { fmt.Fprintln(c, "flagval: getflag_auth_fail") log.Println("getflag: auth fail") } return }
// NewClient creates a client from an already-open connection-like value. // Dial is typically used instead. func NewClient(conn io.ReadWriteCloser) (*Client, error) { // Create the yamux client so we can multiplex mux, err := yamux.Client(conn, nil) if err != nil { conn.Close() return nil, err } // Connect to the control stream. control, err := mux.Open() if err != nil { mux.Close() return nil, err } // Create the broker and start it up broker := newMuxBroker(mux) go broker.Run() // Build the client using our broker and control channel. return &Client{ broker: broker, control: rpc.NewClient(control), }, nil }
func (h *attachHandler) attach(req *host.AttachReq, conn io.ReadWriteCloser) { defer conn.Close() g := grohl.NewContext(grohl.Data{"fn": "attach", "job.id": req.JobID}) g.Log(grohl.Data{"at": "start"}) attachWait := make(chan struct{}) job := h.state.AddAttacher(req.JobID, attachWait) if job == nil { defer h.state.RemoveAttacher(req.JobID, attachWait) if _, err := conn.Write([]byte{host.AttachWaiting}); err != nil { return } // TODO: add timeout <-attachWait job = h.state.GetJob(req.JobID) } success := make(chan struct{}) failed := make(chan struct{}) opts := &AttachRequest{ Job: job, Logs: req.Flags&host.AttachFlagLogs != 0, Stream: req.Flags&host.AttachFlagStream != 0, Height: req.Height, Width: req.Width, Attached: success, ReadWriter: conn, Streams: make([]string, 0, 3), } if req.Flags&host.AttachFlagStdin != 0 { opts.Streams = append(opts.Streams, "stdin") } if req.Flags&host.AttachFlagStdout != 0 { opts.Streams = append(opts.Streams, "stdout") } if req.Flags&host.AttachFlagStderr != 0 { opts.Streams = append(opts.Streams, "stderr") } go func() { select { case <-success: conn.Write([]byte{host.AttachSuccess}) close(success) case <-failed: } close(attachWait) }() if err := h.backend.Attach(opts); err != nil { select { case <-success: default: close(failed) conn.Write(append([]byte{host.AttachError}, err.Error()...)) } g.Log(grohl.Data{"status": "error", "err": err}) return } g.Log(grohl.Data{"at": "finish"}) }
// init initializes the conn to the ircServer and start all the gouroutines // requires to run ircBot func (bot *ircBot) init(conn io.ReadWriteCloser) { glog.Infoln("Init bot", bot) quit := make(chan struct{}) receive := make(chan string) go bot.readSocket(quit, receive, conn) // Listen for incoming messages in background thread go bot.listenSendMonitor(quit, receive, conn) go func(bot *ircBot, conn io.Closer) { for { select { case <-bot.closing: err := conn.Close() if err != nil { glog.Errorln("An error occured while closing the conn of", bot, err) } close(quit) return } } }(bot, conn) bot.RLock() if bot.serverPass != "" { bot.SendRaw("PASS " + bot.serverPass) } bot.RUnlock() bot.SendRaw("PING Bonjour") }
func (g *GraphiteTcpServer) handleConnection(conn io.ReadWriteCloser) { scanner := bufio.NewScanner(conn) defer conn.Close() // Create a channel for the metrics addChan := make(chan metrics.Metric, 1000) g.backend.AddMetricChan(addChan) defer close(addChan) for scanner.Scan() { // PARSE METRIC LINES //err, m := err, m := parseGraphiteLine(scanner.Text()) if err == nil { // Send parsed metric to the back-end addChan <- m } else { conn.Write([]byte(err.Error())) } } if err := scanner.Err(); err != nil { fmt.Printf("Error while parsing text: %v", err) } }
func serve(c io.ReadWriteCloser, call []string) { defer c.Close() e := serveExec(c, call) if e != nil { common.FDumpError(c, e) } }
// Run starts reading bytes from the serial port, (re)opening it if needed // and sends the read bytes to the channel. // Run blocks so should be called as a go routine. func (sc *serialToChan) Run() { // Ser is nil when closed. var ser io.ReadWriteCloser for { // Open the serial port. if ser == nil { // Open serial port. log.Printf("LoopSerial: Opening the port.\n") var err error // Open the serial port. ser, err = openSerial(sc.port) if err != nil { log.Printf("LoopSerial: Error opening the port: %s.\n", err.Error()) ser = nil time.Sleep(5 * time.Second) } } // b is nil if no bytes were received. var b []byte if ser != nil { // Read buf from serial. b = getBuffer("LoopSerial: asking for buffer.") n, err := ser.Read(b) if err != nil || n == 0 { // Error reading the serial port. log.Printf("LoopSerial: Error reading the serial port. Closing it.") // Return and invalidate the buffer. putBuffer(b, "LoopSerial: returning buffer because of read error.") b = nil // Close serial. ser.Close() ser = nil } else { b = b[:n] } } if b != nil { // Send the bytes to the channel. sc.bytesChan <- b // Set b to nil to indicate that it has been sent. b = nil } // Check if the done channel has been closed, but don't wait. select { case <-sc.done: // Time to stop. if ser != nil { ser.Close() } close(sc.bytesChan) return default: } } }
func (m *CircularMPI) handleMessage(conn io.ReadWriteCloser) { defer conn.Close() buf, err := ioutil.ReadAll(conn) if err != nil { m.logger.Println(err) conn.Write([]byte{'N', 'O'}) return } conn.Write([]byte{'O', 'K'}) go func() { states := make(map[string]Versioner) data := bytes.NewBuffer(buf) st, err := decodeData(data) if err != nil { m.logger.Println(err) } states = st m.logger.Println(m.me, " Received ") m.cleanLocalStreams(states) m.prepareData(states) if len(states) == 0 { m.logger.Println("Nothing to send Going") return } // Ne need to clean local streams for key, v := range states { m.Dummy.Write(key, v) } m.sendData(states) }() }
func handleAgent(ll *log.Log, cc io.ReadWriteCloser) { defer cc.Close() client := osprocess.New(nil) agent := rpc.RepresentAgent(cc, client) for i := 0; ; i++ { ll.Info("starting program") res, err := agent.StartProcess(&rpc.StartProcessReq{ ProgramName: fmt.Sprintf("echoer v%d i was told to do this by the supervisor", i), }) if err != nil { ll.Err(err).Error("couldn't send command to remote agent") return } ll.KV("process.id", res.ProcessID).Info("agent is running program") time.Sleep(3 * time.Second) ll.Info("stopping program") _, err = agent.StopProcess(&rpc.StopProcessReq{ProcessID: res.ProcessID, Timeout: time.Second}) if err != nil { ll.Err(err).Error("couldn't send command to remote agent") return } } }
// Accept starts a new SMTP session using io.ReadWriteCloser func Accept(remoteAddress string, conn io.ReadWriteCloser, storage storage.Storage, messageChan chan *data.Message, hostname string, monkey monkey.ChaosMonkey) { defer conn.Close() proto := smtp.NewProtocol() proto.Hostname = hostname var link *linkio.Link reader := io.Reader(conn) writer := io.Writer(conn) if monkey != nil { linkSpeed := monkey.LinkSpeed() if linkSpeed != nil { link = linkio.NewLink(*linkSpeed * linkio.BytePerSecond) reader = link.NewLinkReader(io.Reader(conn)) writer = link.NewLinkWriter(io.Writer(conn)) } } session := &Session{conn, proto, storage, messageChan, remoteAddress, false, "", link, reader, writer, monkey} proto.LogHandler = session.logf proto.MessageReceivedHandler = session.acceptMessage proto.ValidateSenderHandler = session.validateSender proto.ValidateRecipientHandler = session.validateRecipient proto.ValidateAuthenticationHandler = session.validateAuthentication proto.GetAuthenticationMechanismsHandler = func() []string { return []string{"PLAIN"} } session.logf("Starting session") session.Write(proto.Start()) for session.Read() == true { if monkey != nil && monkey.Disconnect != nil && monkey.Disconnect() { session.conn.Close() break } } session.logf("Session ended") }
func (gc *Goctl) reader(c io.ReadWriteCloser) error { defer gc.logger.Info("Connection closed.") defer c.Close() gc.logger.Info("New connection.") for { buf, err := Read(c) if err != nil { gc.logger.Error("Error reading from connection.", "error", err) return err } cmd := strings.Split(string(buf), "\u0000") gc.logger.Debug("Got command.", "cmd", cmd) var resp string if h := gc.handlers[cmd[0]]; h != nil { resp = h.Run(gc, cmd[1:]) } else { resp = fmt.Sprintf("ERROR: unknown command: '%s'.", cmd[0]) } gc.logger.Debug("Responding.", "resp", resp) Write(c, []byte(resp)) } /* NOTREACHED */ }
// logs a player in from an incoming connection, creating a player // in the world if they successfully connect func Login(rwc io.ReadWriteCloser, ip net.Addr) { showTitle(rwc) for i := 0; i < retries; i++ { user, err := authenticate(rwc, ip) switch err { case nil: world.SpawnPlayer(rwc, user) return case ErrAuth: log.Printf("Failed login from %s", ip) _, err = rwc.Write([]byte("Incorrect username or password, please try again\n")) if err != nil { break } case ErrDupe: ok, err := handleDupe(user, rwc) if ok && err == nil { kick(user) world.SpawnPlayer(rwc, user) return } case ErrNotSetup: rwc.Close() return } if err != nil { log.Printf("Error during login of user from %s: %s", ip, err) return } } }
func ProcessTroubleShooting(rwc io.ReadWriteCloser) { data := make([]byte, 0) buf := make([]byte, 1024) for { n, err := rwc.Read(buf) if nil != err { break } if len(data) == 0 { data = buf[0:n] } else { data = append(data, buf[0:n]...) } if len(data) > 1024 { fmt.Fprintf(rwc, "Too long command from input.") break } i := bytes.IndexByte(data, '\n') if -1 == i { continue } line := strings.TrimSpace(string(data[0:i])) data = data[i+1:] if len(line) == 0 { continue } err = Handle(line, rwc) if err == io.EOF { break } } rwc.Close() }
func (s *Server) handleScgiRequest(fd io.ReadWriteCloser) { var buf bytes.Buffer tmp := make([]byte, 1024) n, err := fd.Read(tmp) if err != nil || n == 0 { return } colonPos := bytes.IndexByte(tmp[0:], ':') read := n length, _ := strconv.Atoi(string(tmp[0:colonPos])) buf.Write(tmp[0:n]) for read < length { n, err := fd.Read(tmp) if err != nil || n == 0 { break } buf.Write(tmp[0:n]) read += n } req, err := readScgiRequest(&buf) if err != nil { s.Logger.Println("SCGI read error", err.Error()) return } sc := scgiConn{fd, req, make(map[string][]string), false} s.routeHandler(req, &sc) sc.finishRequest() fd.Close() }
func client(username string, server io.ReadWriteCloser) { log.Print("Connected to server") defer server.Close() defer log.Print("Server disconnected") buf := bufio.NewWriter(server) encode, decode := gob.NewEncoder(buf), gob.NewDecoder(server) var handshake Handshake if err := encode.Encode(handshake); err != nil { log.Printf("Error while sending handshake: %s", err) return } send := make(chan *packet.Packet) defer close(send) clientpkg.Network = send go clientEncode(encode, buf, send) go clientDecode(decode) go keepAlive(send) clientpkg.Main() }
func Pipe(src io.ReadWriteCloser, dst io.ReadWriteCloser) (int64, int64) { var sent, received int64 var c = make(chan bool) var o sync.Once close := func() { src.Close() dst.Close() close(c) } go func() { received, _ = io.Copy(src, dst) o.Do(close) }() go func() { sent, _ = io.Copy(dst, src) o.Do(close) }() <-c return received, sent }
func PipeThenClose(src, dst io.ReadWriteCloser) { defer dst.Close() buf := make([]byte, MaxPacketSize) for { n, err := src.Read(buf) // read may return EOF with n > 0 // should always process n > 0 bytes before handling error if n > 0 { // Note: avoid overwrite err returned by Read. if _, err := dst.Write(buf[0:n]); err != nil { log.Println("write:", err) break } } if err != nil { // Always "use of closed network connection", but no easy way to // identify this specific error. So just leave the error along for now. // More info here: https://code.google.com/p/go/issues/detail?id=4373 /* if bool(Debug) && err != io.EOF { Debug.Println("read:", err) } */ log.Println("read:", err) break } } }
func setflag(c io.ReadWriteCloser) { defer c.Close() var flag_id string fmt.Fprintln(c, "room_id: ") _, err := fmt.Fscanln(c, &flag_id) if err != nil { return } var cookie string fmt.Fprintln(c, "auth_token: ") _, err = fmt.Fscanln(c, &cookie) if err != nil { return } var flag string fmt.Fprintln(c, "flag: ") _, err = fmt.Fscanln(c, &flag) if err != nil { return } if db.Set(flag_id, []string{cookie, flag}) { fmt.Fprintln(c, "set_flag flag_set") log.Println("setflag: flag set") } else if cookie == db.Get(flag_id)[0] { db.Update(flag_id, []string{cookie, flag}) fmt.Fprintln(c, "setflag: flag_updated") log.Println("setflag: flag updated") } else { fmt.Fprintln(c, "setflag: flag_update_auth_fail") log.Println("setflag: auth fail") } }
func (p *Proxy) accept(src io.ReadWriteCloser) { p.count++ cid := p.count l := p.Fork("conn#%d", cid) l.Debugf("Open") if p.client.sshConn == nil { l.Debugf("No server connection") src.Close() return } remoteAddr := p.remote.RemoteHost + ":" + p.remote.RemotePort dst, err := chshare.OpenStream(p.client.sshConn, remoteAddr) if err != nil { l.Infof("Stream error: %s", err) src.Close() return } //then pipe s, r := chshare.Pipe(src, dst) l.Debugf("Close (sent %d received %d)", s, r) }
func acceptAndWrite(rwc io.ReadWriteCloser, t *testing.T) { // write buf := []byte(msg) for len(buf) > 0 { n, err := rwc.Write(buf) if err != nil { t.Fatalf("write: %s", err) } buf = buf[n:] } // read buf2 := make([]byte, len(msg)) free := buf2 for len(free) > 0 { n, err := rwc.Read(free) if err != nil { t.Fatalf("write/read: %s", err) } free = free[n:] } if string(buf2) != msg { t.Fatalf("write/read crc fail") } // close if err := rwc.Close(); err != nil { t.Fatalf("write/close: %s", err) } }
func handleScgiRequest(fd io.ReadWriteCloser) { var buf bytes.Buffer var tmp [1024]byte n, err := fd.Read(&tmp) if err != nil || n == 0 { return } colonPos := bytes.IndexByte(tmp[0:], ':') read := n length, _ := strconv.Atoi(string(tmp[0:colonPos])) buf.Write(tmp[0:n]) for read < length { n, err := fd.Read(&tmp) if err != nil || n == 0 { break } buf.Write(tmp[0:n]) read += n } req, err := readScgiRequest(&buf) if err != nil { log.Stderrf("SCGI read error", err.String()) return } sc := scgiConn{fd, make(map[string][]string), false} routeHandler(req, &sc) fd.Close() }
func (pf *fakePortForwarder) PortForward(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error { defer stream.Close() var wg sync.WaitGroup // client -> server wg.Add(1) go func() { defer wg.Done() // copy from stream into a buffer received := new(bytes.Buffer) io.Copy(received, stream) // store the received content pf.lock.Lock() pf.received[port] = received.String() pf.lock.Unlock() }() // server -> client wg.Add(1) go func() { defer wg.Done() // send the hardcoded data to the stream io.Copy(stream, strings.NewReader(pf.send[port])) }() wg.Wait() return nil }
func Copy(a io.ReadWriteCloser, b io.ReadWriteCloser) { // setup one-way forwarding of stream traffic io.Copy(a, b) // and close both connections when a read fails a.Close() b.Close() }
func netcat(c io.ReadWriteCloser) { out("piping stdio to connection") done := make(chan struct{}, 2) go func() { n, _ := io.Copy(c, os.Stdin) out("sent %d bytes", n) done <- struct{}{} }() go func() { n, _ := io.Copy(os.Stdout, c) out("received %d bytes", n) done <- struct{}{} }() // wait until we exit. sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) select { case <-done: case <-sigc: return } c.Close() }
func fakeServer(t *testing.T, rw io.ReadWriteCloser, fakeReply fakeResponseMap) { b := bufio.NewReader(rw) _, err := rw.Write([]byte(fakeReply[""])) if err != nil { t.Errorf("fakeServer: Banner write causes %s", err) } for { line, err := b.ReadString('\n') if err != nil { if err != io.EOF { t.Errorf("fakeServer: Reading causes %s", err) } break } reply := fakeReply[strings.TrimSpace(line)] if reply == "" { break } _, err = rw.Write([]byte(reply)) if err != nil { t.Errorf("fakeServer: Writing causes %s", err) break } } rw.Close() }
func handleClient(p1, p2 io.ReadWriteCloser) { log.Println("stream opened") defer log.Println("stream closed") defer p1.Close() defer p2.Close() // start tunnel p1die := make(chan struct{}) go func() { io.Copy(p1, p2) close(p1die) }() p2die := make(chan struct{}) go func() { io.Copy(p2, p1) close(p2die) }() // wait for tunnel termination select { case <-p1die: case <-p2die: } }
func (l *Listener) handleConnection(conn io.ReadWriteCloser, received chan<- *ReceivedMessage) { defer conn.Close() parser := SMTPParser() reader := bufio.NewReader(conn) writer := bufio.NewWriter(conn) session := new(Session) session.Start().WriteTo(writer) for { line, err := reader.ReadString('\n') if err != nil { l.Printf("error reading from client:", err) break } resp := session.Advance(parser(line)) resp.WriteTo(writer) switch { case resp.IsClose(): return case resp.NeedsData(): resp, msg := session.ReadData(func() (string, error) { return reader.ReadString('\n') }) resp.WriteTo(writer) if msg != nil { received <- msg } } } }