func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr, clientKey *connTrackKey) { defer func() { proxy.connTrackLock.Lock() delete(proxy.connTrackTable, *clientKey) proxy.connTrackLock.Unlock() proxyConn.Close() }() readBuf := make([]byte, UDPBufSize) for { proxyConn.SetReadDeadline(time.Now().Add(UDPConnTrackTimeout)) again: read, err := proxyConn.Read(readBuf) if err != nil { if err, ok := err.(*net.OpError); ok && err.Err == syscall.ECONNREFUSED { // This will happen if the last write failed // (e.g: nothing is actually listening on the // proxied port on the container), ignore it // and continue until UDPConnTrackTimeout // expires: goto again } return } for i := 0; i != read; { written, err := proxy.listener.WriteToUDP(readBuf[i:read], clientAddr) if err != nil { return } i += written } } }
func receiveUDP(udpConn *net.UDPConn, rAddr *net.UDPAddr) { b := make([]byte, 1024) udpConn.SetReadDeadline(time.Now().Add(5 * time.Second)) n, Addr, err := udpConn.ReadFromUDP(b) // n,err:=udpConn.Read(b) if err != nil { fmt.Println(err.Error()) return } if n > 1024 { fmt.Println("Buff out!") return } fmt.Println(udpConn.RemoteAddr().Network()) if !Addr.IP.Equal(rAddr.IP) { fmt.Println("IP diff:%s-%s", Addr.IP.String(), rAddr.IP.String()) return } else if Addr.Port != rAddr.Port { fmt.Println("Port diff:%d-%d", Addr.Port, rAddr.Port) return } fmt.Printf("Receive from %s:%s", udpConn.RemoteAddr().String(), b[:n]) }
// ServeUDP starts a UDP listener for the server. // Each request is handled in a seperate goroutine, // with the Handler set in .... func (srv *Server) ServeUDP(l *net.UDPConn) error { defer l.Close() handler := srv.Handler if handler == nil { handler = DefaultServeMux } if srv.UDPSize == 0 { srv.UDPSize = UDPMsgSize } for { m := make([]byte, srv.UDPSize) n, a, e := l.ReadFromUDP(m) if e != nil { return e } m = m[:n] if srv.ReadTimeout != 0 { l.SetReadDeadline(time.Now().Add(srv.ReadTimeout)) } if srv.WriteTimeout != 0 { l.SetWriteDeadline(time.Now().Add(srv.WriteTimeout)) } d, err := newConn(nil, l, a, m, handler, srv.TsigSecret) if err != nil { continue } go d.serve() } panic("not reached") }
// serveUDP starts a UDP listener for the server. // Each request is handled in a seperate goroutine. func (srv *Server) serveUDP(l *net.UDPConn) error { defer l.Close() handler := srv.Handler if handler == nil { handler = DefaultServeMux } if srv.UDPSize == 0 { srv.UDPSize = udpMsgSize } for { if srv.ReadTimeout != 0 { l.SetReadDeadline(time.Now().Add(srv.ReadTimeout)) } if srv.WriteTimeout != 0 { l.SetWriteDeadline(time.Now().Add(srv.WriteTimeout)) } m := make([]byte, srv.UDPSize) n, a, e := l.ReadFromUDP(m) if e != nil || n == 0 { // don't bail out, but wait for a new request continue } m = m[:n] go serve(a, handler, m, l, nil, srv.TsigSecret) } panic("dns: not reached") }
func udpConnectionReader(conn *net.UDPConn, rcvCh chan UdpMessage, TimeoutCh chan bool) { for { buf := make([]byte, MSGsize) conn.SetReadDeadline(time.Now().Add(200 * time.Millisecond)) n, rAddr, err := conn.ReadFromUDP(buf) buf = buf[:n] if err != nil || n < 0 { switch err := err.(type) { case net.Error: if err.Timeout() { //fmt.Println("Before rcvCh1") TimeoutCh <- true //fmt.Println("After rcvCh1") continue } else { fmt.Println("Error in connectionReader") panic(err) } } } var TempData elevatorOperation.Elevator DecodeMessage(&TempData, buf) //fmt.Println(TempData) rcvCh <- UdpMessage{Raddr: rAddr.String(), Data: TempData, Length: n} //fmt.Println("After rcvCh2") time.Sleep(10 * time.Millisecond) } }
func detect_precense(connection *net.UDPConn, masterChan chan bool) { buffer := make([]byte, 2048) for { t := time.Now() connection.SetReadDeadline(t.Add(3 * time.Second)) _, _, err := connection.ReadFromUDP(buffer) if err != nil { fmt.Println("UDP timeout: ", err) masterChan <- true break } /* fmt.Println("I'm getting into this for") select { case <-time.After(time.Second * 3): fmt.Println("Master dead") masterChan <- true break L /* default: _, _, err := connection.ReadFromUDP(buffer) if err != nil { fmt.Println("You messed up in detect_precense.") panic(err) } }(*/ } }
// Sets the read deadline on the given connection using the given timeout (which should be a duration). The timeout is added to time.Now(). func setReadDeadline(connection *net.UDPConn, timeout string) error { duration, error := time.ParseDuration(timeout) if error != nil { return error } connection.SetReadDeadline(time.Now().Add(duration)) return nil }
// Receive a message. func Receive(l *net.UDPConn, buf []byte) (Message, error) { l.SetReadDeadline(time.Now().Add(ResponseTimeout)) nr, _, err := l.ReadFromUDP(buf) if err != nil { return Message{}, err } return ParseMessage(buf[:nr]) }
func udpTest(Conn *net.UDPConn) bool { buf := make([]byte, 1024) Conn.SetReadDeadline(time.Now().Add(1 * time.Second)) Conn.Write([]byte("TEST")) n, _, _ := Conn.ReadFromUDP(buf) Conn.Close() return string(buf[:n]) == "OK\n" }
// Receive a message. func Receive(l *net.UDPConn, buf []byte) (Message, error) { l.SetReadDeadline(time.Now().Add(RESPONSE_TIMEOUT)) nr, _, err := l.ReadFromUDP(buf) if err != nil { return Message{}, err } return parseMessage(buf[:nr]) }
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, net.Addr, error) { conn.SetReadDeadline(time.Now().Add(timeout)) m := make([]byte, srv.UDPSize) n, a, e := conn.ReadFromUDP(m) if e != nil || n == 0 { return nil, nil, ErrConn } m = m[:n] return m, a, nil }
func asyncSflowListen(t *testing.T, wg *sync.WaitGroup, conn *net.UDPConn, trace *packetsTraceInfo) { defer wg.Done() var buf [maxDgramSize]byte t.Log("listen...") nbPackets := 0 conn.SetReadDeadline(time.Now().Add(10 * time.Second)) for { _, _, err := conn.ReadFromUDP(buf[:]) if err != nil { neterr := err.(*net.OpError) if neterr.Timeout() == false { t.Error(err.Error()) } break } p := gopacket.NewPacket(buf[:], layers.LayerTypeSFlow, gopacket.Default) sflowLayer := p.Layer(layers.LayerTypeSFlow) sflowPacket, ok := sflowLayer.(*layers.SFlowDatagram) if !ok { t.Fatal("not SFlowDatagram") break } if sflowPacket.SampleCount > 0 { for _, sample := range sflowPacket.FlowSamples { for _, rec := range sample.Records { record, ok := rec.(layers.SFlowRawPacketFlowRecord) if !ok { t.Fatal("1st layer is not SFlowRawPacketFlowRecord type") break } packet := record.Header nbPackets++ packetSize := len(packet.Data()) if nbPackets > len(trace.bytes) { t.Fatalf("Too much Packets, reference have only %d", len(trace.bytes)) } if trace.bytes[nbPackets-1] != packetSize { t.Fatalf("Packet size don't match %d %d", trace.bytes[nbPackets-1], packetSize) } } } } } if trace.packets != nbPackets { t.Fatalf("NB Packets don't match %d %d", trace.packets, nbPackets) } }
func dong(udpConn *net.UDPConn, rAddr *net.UDPAddr, chap []byte, sn uint32, timeout time.Duration) (state *State, err error) { b := make([]byte, 1024) udpConn.SetReadDeadline(time.Now().Add(timeout)) n, Addr, err := udpConn.ReadFromUDP(b) if err != nil { return nil, err } if (udpAddrEqual(Addr, rAddr) == false) || (checkReply(b[:n], chap, sn)) { return nil, errNotReplyPackage } return getStateFromBytes(b) }
// Sends to_write via udp_conn to remote_addr, then reads from udp_conn into read_buf // and returns the number of bytes read and the UDP address from which they were received. // Errors are reported in the 3rd return value. // If timeout is non-0 the function will return (with an error if necessary) after no more // than that duration. Before that time, if an error occurs during sending or reading, the // function will retry the whole operation (beginning with the write). // For each individual retry, a random timeout between min_wait_retry and max_wait_retry is // used (but no more than the remaining time from timeout). func writeReadUDP(udp_conn *net.UDPConn, remote_addr *net.UDPAddr, to_write, read_buf []byte, min_wait_retry, max_wait_retry, timeout time.Duration) (int, *net.UDPAddr, error) { var special_err error var err error if timeout == 0 { timeout = 365 * 86400 * time.Second } endtime := time.Now().Add(timeout) if min_wait_retry <= 0 { min_wait_retry++ } if max_wait_retry <= min_wait_retry { max_wait_retry = min_wait_retry + 1 } for { _, err = udp_conn.WriteToUDP(to_write, remote_addr) if err == nil { timo := time.Duration(rand.Int63n(int64(max_wait_retry-min_wait_retry))) + min_wait_retry endtime2 := time.Now().Add(timo) if endtime2.After(endtime) { endtime2 = endtime } udp_conn.SetReadDeadline(endtime2) var n int var raddr *net.UDPAddr n, raddr, err = udp_conn.ReadFromUDP(read_buf) if err == nil { if n < 4 { err = too_short } else { return n, raddr, err } } } if e, ok := err.(*net.OpError); !ok || !e.Timeout() { special_err = err } if time.Now().After(endtime) { break } } if special_err != nil { return 0, nil, special_err } return 0, nil, err }
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { conn.SetReadDeadline(time.Now().Add(timeout)) m := make([]byte, srv.UDPSize) n, s, e := ReadFromSessionUDP(conn, m) if e != nil || n == 0 { if e != nil { return nil, nil, e } return nil, nil, ErrShortRead } m = m[:n] return m, s, nil }
func receive(conn *net.UDPConn, timeout time.Duration) (Packet, *net.UDPAddr, error) { var p Packet b := make([]byte, 1500) if timeout > 0 { conn.SetReadDeadline(time.Now().Add(timeout)) } _, remote, err := conn.ReadFromUDP(b) if err != nil { return p, remote, err } err = p.deserialize(b) return p, remote, err }
func backupProcess(conn *net.UDPConn, primaryAlive bool, count *int) bool { for primaryAlive { conn.SetReadDeadline(time.Now().Add(time.Second * 2)) //takes some time to open terminal data := make([]byte, 16) length, _, err := conn.ReadFromUDP(data[0:]) if err != nil { primaryAlive = false return primaryAlive } else { *count = getCount(string(data[0:length])) fmt.Println("Backup, count:", *count) } } return true }
func sendRequest(sock *net.UDPConn, request, response []byte) (int, error) { if _, err := sock.Write(request); err != nil { return 0, err } sock.SetReadDeadline(time.Now().Add(time.Second)) n, err := sock.Read(response) if err != nil { if netErr, ok := err.(net.Error); ok && netErr.Timeout() { return 0, fmt.Errorf("no response from tracker: %s", err) } } return n, err }
func (this *SearchGateway) SendMessage() (result string, err error) { //Send broadcast messages to bring the port, formats such as: "239.255.255.250:1900" var conn *net.UDPConn defer func() { if r := recover(); r != nil { err = fmt.Errorf("panic err: %s", r) } }() remotAddr, err := net.ResolveUDPAddr("udp", "239.255.255.250:1900") if err != nil { return "", fmt.Errorf("Multicast address format is incorrect err: %s", err) } locaAddr, err := net.ResolveUDPAddr("udp", this.upnp.LocalHost+":0") if err != nil { return "", fmt.Errorf("Local IP address is incorrent err: %s", err) } conn, err = net.ListenUDP("udp", locaAddr) if err != nil { return "", fmt.Errorf("Listening udp error err: %s", err) } defer func(conn net.Conn) { if err := conn.Close(); err != nil { log.Printf("conn close err: %s", err) } }(conn) _, err = conn.WriteToUDP([]byte(this.searchMessage), remotAddr) if err != nil { return "", fmt.Errorf("Sent to a multicast address err: %s", err) } buf := make([]byte, 1024) err = conn.SetReadDeadline(time.Now().Add(readTimeout)) if err != nil { return "", fmt.Errorf("Error setting deadline for connection: %s", err) } n, _, err := conn.ReadFromUDP(buf) if err != nil { return "", fmt.Errorf("Error message received from a multicast address: %s", err) } return string(buf[:n]), nil }
// Returns the address of any registry which is currently active on the given interface or localhost. func GetRegistryAddressFromInterface(intf net.Interface, localhost bool, ch chan *net.TCPAddr) { request := LookupInfoRequest{OPERATION_ADDRESS, "registry"} response := LookupAddressResponse{} buffer := make([]byte, PACKET_SIZE) var connection *net.UDPConn var err error if localhost { connection, err = net.ListenUDP(UDP_PROTOCOL, UDP_ANY_ADDR) } else { connection, err = net.ListenMulticastUDP(UDP_PROTOCOL, &intf, MULTICAT_ADDR) } if err != nil { return } defer connection.Close() bytes, err := json.Marshal(request) if err != nil { return } if localhost { _, err = connection.WriteToUDP(bytes, MULTICAT_SELF_ADDR) } else { _, err = connection.WriteToUDP(bytes, MULTICAT_ADDR) } if err != nil { return } connection.SetReadDeadline(time.Now().Add(time.Second)) length, address, err := connection.ReadFromUDP(buffer) if err != nil { return } err = json.Unmarshal(buffer[:length], &response) if err != nil { return } if response.Address.Port != 0 { ch <- &net.TCPAddr{address.IP, response.Address.Port, address.Zone} } }
func backup(sock *net.UDPConn, primaryAlive bool, counter *int) bool { for primaryAlive { sock.SetReadDeadline(time.Now().Add(2 * time.Second)) data := make([]byte, 255) n, _, err := sock.ReadFromUDP(data[0:]) if err != nil { primaryAlive = false return primaryAlive } else { SpawnProcess() s := strings.TrimLeft(string(data[:n]), "Count: ") count, _ := strconv.Atoi(s) *counter = count fmt.Println("Backup, count:", *counter) } } return true }
func listenForDiscPacket(c *net.UDPConn, timeout time.Duration) (addr net.UDPAddr, err error) { for { // Listen until our deadline c.SetReadDeadline(time.Now().Add(timeout)) // Wait for packet data := make([]byte, 4096) read, remoteAddr, err := c.ReadFromUDP(data) if err == nil { // Wrap our buffer in an IO object rd := bytes.NewReader(data[:read]) // Parse header var h discPacket if err := h.read(rd); err != nil { fmt.Println("Header read error: %v", err) continue } // We only want server packets if h.Type == DISC_CLIENT { continue } // Return the result to our client and stop addr = net.UDPAddr{ IP: remoteAddr.IP, Port: int(h.Port), Zone: remoteAddr.Zone, } return addr, nil } else { return addr, err } } return addr, nil }
func rd(out chan string, conn *net.UDPConn, quit chan struct{}) { log.Printf("rd: %s", conn.LocalAddr()) defer log.Printf("rd: done") const maxSize = 4096 for { select { case <-quit: return default: b := make([]byte, maxSize) conn.SetReadDeadline(time.Now().Add(cycle)) n, remoteAddr, err := conn.ReadFrom(b) if err != nil { continue } if n >= maxSize { log.Printf("%s: too big", remoteAddr) continue } out <- string(b[:n]) } } }
// Sends the data in sendbuf to peer_addr (with possible resends) and waits for // an ACK with the correct block id, if sendbuf contains a DATA message. // Returns true if the sending was successful and the ACK was received. func sendAndWaitForAck(udp_conn *net.UDPConn, peer_addr *net.UDPAddr, sendbuf []byte, retransmissions, dups, strays *int) bool { // absolute deadline when this function will return false deadline := time.Now().Add(total_timeout) readbuf := make([]byte, 4096) *retransmissions-- // to counter the ++ being done at the start of the loop outer: for { // re/send *retransmissions++ n, err := udp_conn.Write(sendbuf) if err != nil { util.Log(0, "ERROR! TFTP error in Write(): %v", err) break } if n != len(sendbuf) { util.Log(0, "ERROR! TFTP: Incomplete write") break } //util.Log(2, "DEBUG! TFTP: Sent %v bytes to %v. Waiting for ACK...", len(sendbuf), peer_addr) for { // check absolute deadline if time.Now().After(deadline) { break outer } // set deadline for next read timo := time.Duration(rand.Int63n(int64(max_wait_retry-min_wait_retry))) + min_wait_retry endtime2 := time.Now().Add(timo) if endtime2.After(deadline) { endtime2 = deadline } udp_conn.SetReadDeadline(endtime2) n, from, err := udp_conn.ReadFromUDP(readbuf) if err != nil { e, ok := err.(*net.OpError) if !ok || !e.Timeout() { util.Log(0, "ERROR! TFTP ReadFromUDP() failed while waiting for ACK from %v (local address: %v): %v", udp_conn.RemoteAddr(), udp_conn.LocalAddr(), err) break outer // retries make no sense => bail out } else { //util.Log(2, "DEBUG! TFTP timeout => resend %#v", sendbuf) continue outer // resend } } if from.Port != peer_addr.Port { *strays++ emsg := fmt.Sprintf("WARNING! TFTP server got UDP packet from incorrect source: %v instead of %v", from.Port, peer_addr.Port) sendError(udp_conn, from, 5, emsg) // 5 => Unknown transfer ID continue // This error is not fatal since it doesn't affect our peer } if n == 4 && readbuf[0] == 0 && readbuf[1] == 4 && // 4 => ACK (sendbuf[1] != 3 || // we did not send DATA // or the ACK's block id is the same as the one we sent (readbuf[2] == sendbuf[2] && readbuf[3] == sendbuf[3])) { //util.Log(2, "DEBUG! TFTP: Received ACK from %v: %#v", peer_addr, readbuf[0:n]) return true } else { if readbuf[0] == 0 && readbuf[1] == 5 { // error util.Log(0, "ERROR! TFTP ERROR received while waiting for ACK from %v: %v", peer_addr, string(readbuf[4:n])) break outer // retries make no sense => bail out } else { // if we sent DATA but the ACK is not for the block we sent, // increase dup counter. If we wanted to be anal we would need to check // if the block id is one less for it to be an actual dup, but // since the dup counter is only for reporting, we don't care. if sendbuf[1] == 3 && (readbuf[2] != sendbuf[2] || readbuf[3] != sendbuf[3]) { *dups++ //util.Log(2, "DEBUG! TFTP duplicate ACK received: %#v => Ignored", string(readbuf[0:n])) // ONLY "continue", NOT "continue outer", i.e. DUPs DO NOT CAUSE A RESEND. // THIS PREVENTS http://en.wikipedia.org/wiki/Sorcerer's_Apprentice_Syndrome // When timeout happens, it will cause a resend. continue } else { emsg := fmt.Sprintf("ERROR! TFTP server waiting for ACK from %v but got: %#v", peer_addr, string(readbuf[0:n])) sendError(udp_conn, from, 0, emsg) // 0 => Unspecified error break outer // retries make no sense => bail out } } } } } util.Log(0, "ERROR! TFTP send not acknowledged by %v (retransmissions: %v, dups: %v, strays: %v)", peer_addr, *retransmissions, *dups, *strays) return false }
route1 db.Route addr *net.UDPAddr fakeStatsdServer *net.UDPConn fakeStatsdChan chan string routingAPIProcess ifrit.Process ) BeforeEach(func() { routingAPIRunner := testrunner.New(routingAPIBinPath, routingAPIArgs) routingAPIProcess = ginkgomon.Invoke(routingAPIRunner) addr, err = net.ResolveUDPAddr("udp", fmt.Sprintf("localhost:%d", 8125+GinkgoParallelNode())) Expect(err).ToNot(HaveOccurred()) fakeStatsdServer, err = net.ListenUDP("udp", addr) Expect(err).ToNot(HaveOccurred()) fakeStatsdServer.SetReadDeadline(time.Now().Add(15 * time.Second)) fakeStatsdChan = make(chan string, 1) go func(statsChan chan string) { defer GinkgoRecover() for { buffer := make([]byte, 1000) _, err := fakeStatsdServer.Read(buffer) if err != nil { close(statsChan) return } scanner := bufio.NewScanner(bytes.NewBuffer(buffer)) for scanner.Scan() { select { case statsChan <- scanner.Text():
func processor(partition int, conn *net.UDPConn, tick <-chan compactionTick, quit chan struct{}, wg *sync.WaitGroup) { wg.Add(1) defer wg.Done() shouldQuit := false buf := make([]byte, 65536+4, 65536+4) index := make(map[string]*deque) data := pb.Data{} totalMsg := 0 log.Printf("new processor %d", partition) var file *os.File defer file.Close() getFile := func(t time.Time) (*os.File, int64) { if file == nil { _, fileName, err := makePath(t) if err != nil { log.Println("Failed to create path", fileName, err) return nil, 0 } fileName += fmt.Sprintf("%d.%d.data", t.Unix(), partition) log.Printf("[%d] open new file %s for epoch %d", partition, fileName, t.Unix()) if file, err = createFile(fileName); err != nil { log.Println("Failed to open file", fileName, err) return nil, 0 } if _, err = file.WriteString(dataFileHeader); err != nil { log.Println("Failed to write to file", err) return nil, 0 } } offset, _ := file.Seek(0, 1) return file, offset } ctick := <-tick loop: for { select { case <-quit: shouldQuit = true log.Printf("[%d] will quit", partition) case ct := <-tick: log.Printf("[%d] send job to compactor %d\n", partition, ctick.t.Unix()) ctick.ch <- compactionJob{partition, totalMsg, index} index = make(map[string]*deque) file.Close() file = nil totalMsg = 0 ctick = ct if shouldQuit { log.Printf("[%d] quiting", partition) break loop } default: conn.SetReadDeadline(time.Now().Add(time.Millisecond)) length, err := conn.Read(buf[4:]) if err != nil { if nerr, ok := err.(net.Error); ok && nerr.Timeout() == false { log.Println("UDP read error", err) } continue } data.Reset() if err := data.Unmarshal(buf[4 : length+4]); err != nil { log.Println("Failed to decode", err) continue } file, offset := getFile(ctick.t) if file == nil { log.Println("Failed to get file") continue } intToByteArray(uint32(length), buf[0:4]) if _, err := file.Write(buf[:length+4]); err != nil { log.Println("Failed to write to file", err) continue } tags := data.GetHeader().GetTags() if len(tags) > maxTagsInMessage { log.Println("Too many tags in message ", len(tags)) continue } for _, tag := range tags { if len(tag) > maxTagLength { log.Println("Too long tag") continue } deque, ok := index[tag] if !ok { deque = newDeque(partition) index[tag] = deque } deque.Append(uint32(offset)) } totalMsg++ } } }
func handleDownload(e Encoder, controlConn net.Conn, dataConn *net.UDPConn, t *clientTransfer) { var wg sync.WaitGroup numBlocks := int(math.Ceil(float64(t.filesize) / float64(t.config().BlockSize))) bs := bitset.New(uint(numBlocks)) defer dataConn.Close() fo, err := os.Create(t.fullPath()) defer fo.Close() if err != nil { errMsg := "Error opening file: " + err.Error() log.Println(errMsg) t.updateProgress(Progress{Type: ERROR, Message: errMsg, Percentage: 0}) } fileWriter := make(chan Block) defer close(fileWriter) //handles writing the blocks to the file wg.Add(1) go func() { defer wg.Done() for block := range fileWriter { writeData(block.Data, block.Number*t.config().BlockSize, fo) } }() expectedBlock := 0 gaplessToBlock := 0 missedBlocks := 0 receivedBlocks := 0 lastRetransmitTime := time.Now() var retransmitBlocks []int buf := make([]byte, t.config().BlockSize+500) dataConn.SetReadDeadline(time.Now().Add(readTimeout)) for { n, _, err := dataConn.ReadFromUDP(buf) dataConn.SetReadDeadline(time.Now().Add(readTimeout)) if err != nil { if neterr, ok := err.(net.Error); ok && neterr.Timeout() { //we timedout on a read, but don't have all the data //so send a retransmit and try again restart := false if len(retransmitBlocks) <= 0 { retransmitBlocks = insertRetransmitBlock(retransmitBlocks, gaplessToBlock+1) restart = true } requestRetransmit(retransmitBlocks, bs, controlConn, e, restart) retransmitBlocks = []int{} dataConn.SetReadDeadline(time.Now().Add(readTimeout)) continue } else { log.Println("Error reading from socket: " + err.Error()) return } } pkt, err := e.Decode(buf, n) if err != nil { log.Println(err) return } //write the block to file and build out the list of blocks //to retransmit block := pkt.Payload.(Block) //send the block to be written fileWriter <- block bs.Set(uint(block.Number)) receivedBlocks++ if block.Number > expectedBlock { if (len(retransmitBlocks) + (block.Number - expectedBlock)) > t.config().MaxMissedLength { requestRetransmit(retransmitBlocks, bs, controlConn, e, true) retransmitBlocks = []int{} } else { for i := expectedBlock; i < block.Number; i++ { retransmitBlocks = insertRetransmitBlock(retransmitBlocks, i) } } missedBlocks = missedBlocks + (block.Number - expectedBlock) } //if we have received all the blocks, we are done! if int(bs.Count()) == numBlocks { pkt := Packet{Type: DONE} data, _ := e.Encode(&pkt) controlConn.Write(data) t.updateProgress(Progress{Type: TRANSFERRING, Message: "Finalizing file", Percentage: 1}) wg.Wait() return } //we will be expecting the next block number //in case of restart: these resent blocks are labeled original as well if block.Type == ORIGINAL { expectedBlock = block.Number + 1 } //keeps track of the point up to where we have received all the blocks //with no missing blocks in between for bs.Test(uint(gaplessToBlock+1)) && gaplessToBlock < numBlocks { gaplessToBlock++ } //if we meet our retransmit criteria, send message to server if shouldRetransmit(bs.Count(), lastRetransmitTime) { //send the error rate sendErrorRate(receivedBlocks, missedBlocks, controlConn, e) //request the retransmit requestRetransmit(retransmitBlocks, bs, controlConn, e, false) retransmitBlocks = []int{} lastRetransmitTime = time.Now() missedBlocks = 0 receivedBlocks = 0 } //finally, update progress t.updateProgress(Progress{Type: TRANSFERRING, Message: "Downloading...", Percentage: float64(bs.Count()) / float64(numBlocks)}) } }
// queries the given server and returns the response and an error in case something went wrong. clientNum is optional, put 0 if not needed. func (s *Server) queryServer(request []byte) (response *cubecode.Packet, err error) { // connect to server at port+1 (port is the port you connect to in game, sauerbraten listens on the one higher port for BasicInfo queries var conn *net.UDPConn conn, err = net.DialUDP("udp", nil, s.addr) if err != nil { return } defer conn.Close() // set up a buffered reader bufconn := bufio.NewReader(conn) // send the request to server _, err = conn.Write(request) if err != nil { return } // receive response from server with 5 second timeout rawResponse := make([]byte, MaxPacketLength) var bytesRead int conn.SetReadDeadline(time.Now().Add(s.timeOut)) bytesRead, err = bufconn.Read(rawResponse) if err != nil { return } // trim response to what's actually from the server packet := cubecode.NewPacket(rawResponse[:bytesRead]) if bytesRead < 5 { err = errors.New("extinfo: invalid response: too short") return } // do some basic checks on the response infoType, err := packet.ReadByte() if err != nil { return } command, err := packet.ReadByte() // only valid if infoType == EXTENDED_INFO if err != nil { return } if infoType == InfoTypeExtended { var version, commandError byte if command == ExtInfoTypeClientInfo { if bytesRead < 6 { err = errors.New("extinfo: invalid response: too short") return } version = rawResponse[4] commandError = rawResponse[5] } else { version = rawResponse[3] commandError = rawResponse[4] } if infoType != request[0] || command != request[1] { err = errors.New("extinfo: invalid response: response does not match request") return } // this package only support extinfo protocol version 105 if version != ExtInfoVersion { err = errors.New("extinfo: wrong version: expected " + strconv.Itoa(int(ExtInfoVersion)) + ", got " + strconv.Itoa(int(version))) return } if commandError == ExtInfoError { switch command { case ExtInfoTypeClientInfo: err = errors.New("extinfo: no client with cn " + strconv.Itoa(int(request[2]))) case ExtInfoTypeTeamScores: err = errors.New("extinfo: server is not running a team mode") } return } } // if not a response to EXTENDED_INFO_CLIENT_INFO, we are done if infoType != InfoTypeExtended || command != ExtInfoTypeClientInfo { offset := 0 if infoType == InfoTypeExtended { switch command { case ExtInfoTypeUptime: offset = 4 case ExtInfoTypeTeamScores: offset = 5 } } response = cubecode.NewPacket(rawResponse[offset:]) return } // handle response to EXTENDED_INFO_CLIENT_INFO // some server mods silently fail to implement responses → fail gracefully if len(rawResponse) < 7 || rawResponse[6] != ClientInfoResponseTypeCNs { err = errors.New("extinfo: invalid response") return } // get CNs out of the reponse, ignore 7 first bytes, which were: // EXTENDED_INFO, EXTENDED_INFO_CLIENT_INFO, CN from request, EXTENDED_INFO_ACK, EXTENDED_INFO_VERSION, EXTENDED_INFO_NO_ERROR, EXTENDED_INFO_CLIENT_INFO_RESPONSE_CNS clientNums := rawResponse[7:] numberOfClients, err := countClientNums(clientNums) if err != nil { return } // for each client, receive a packet and append it to a new slice clientInfos := make([]byte, 0, MaxPacketLength*numberOfClients) for i := 0; i < numberOfClients; i++ { // read from connection clientInfo := make([]byte, MaxPacketLength) conn.SetReadDeadline(time.Now().Add(s.timeOut)) _, err = bufconn.Read(clientInfo) if err != nil { return } // append bytes to slice clientInfos = append(clientInfos, clientInfo...) } response = cubecode.NewPacket(clientInfos) return }
func processor(id int, conn *net.UDPConn, tick <-chan compaction_tick, quit, done chan struct{}) { buf := make([]byte, 65536, 65536) index := make(map[string]*deque) data := &Data{} total := 0 t := (<-tick).t log.Printf("new processor %d", id) var file *os.File getFile := func(t time.Time) (*os.File, int64) { if file == nil { epoch := t.Unix() fileName := fmt.Sprintf("%s%d-%d", filePath, epoch, id) log.Printf("[%d] open new file %s for epoch %d", id, fileName, epoch) var err error file, err = os.OpenFile(fileName, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0664) if err != nil { log.Println("Failed to open file "+fileName, err) return nil, 0 } return file, 0 } offset, _ := file.Seek(0, 0) return file, offset } loop: for { select { case ctick := <-tick: log.Printf("[%d] send job to compactor\n", id) ctick.ch <- compaction_job{id, total, index} total = 0 t = ctick.t index = make(map[string]*deque) if file != nil { file.Close() file = nil } select { case <-quit: break loop default: } default: conn.SetReadDeadline(time.Now().Add(time.Millisecond)) length, err := conn.Read(buf) if err != nil { if err.(net.Error).Timeout() == false { log.Println("UDP read error", err) } continue } if err := proto.Unmarshal(buf[:length], data); err != nil { log.Println("Failed to decode", err) continue } file, offset := getFile(t) if file == nil { log.Println("Failed to get file") continue } if _, err := file.Write(buf); err != nil { log.Println("Failed to write to file", err) continue } // file.Sync() tags := data.GetHeader().GetTags() if len(tags) > max_tags_in_message { log.Println("Too many tags in message") continue } for _, tag := range tags { if len(tag) > max_tag_length { log.Println("Too long tag") continue } deque, ok := index[tag] if !ok { deque = newDeque(id) index[tag] = deque } deque.Append(uint32(offset)) } total++ } } done <- struct{}{} }
func GatherCandidates(sock *net.UDPConn, outIpList string, udpAddr string) ([]candidate, error) { laddr := sock.LocalAddr().(*net.UDPAddr) ret := []candidate{} switch { case laddr.IP.IsLoopback(): return nil, errors.New("Connecting over loopback not supported") case laddr.IP.IsUnspecified(): addrs, err := net.InterfaceAddrs() if err != nil { return nil, err } for _, addr := range addrs { ip, ok := addr.(*net.IPNet) if ok && ip.IP.IsGlobalUnicast() { ret = append(ret, candidate{&net.UDPAddr{IP: ip.IP, Port: laddr.Port}}) } } default: ret = append(ret, candidate{laddr}) } addip := func(ipStr string, port int) { ip := net.ParseIP(ipStr) if port == 0 { port = laddr.Port } bHave := false for _, info := range ret { if info.Addr.IP.Equal(ip) && info.Addr.Port == port { bHave = true break } } if !bHave { ret = append(ret, candidate{&net.UDPAddr{IP: ip, Port: port}}) } } if udpAddr != "" { addr, err := net.ResolveUDPAddr("udp", udpAddr) if err != nil { fmt.Println("Can't resolve udp address: ", err) return nil, err } p2pAddr := "" for i := 0; i < 5; i++ { sock.WriteToUDP([]byte("makehole"), addr) buf := make([]byte, 100) sock.SetReadDeadline(time.Now().Add(time.Duration(1) * time.Second)) n, _, err := sock.ReadFromUDP(buf) if err != nil { fmt.Println("Can't ReadFromUDP: ", err, addr.String()) continue } else { p2pAddr = string(buf[0:n]) fmt.Println("read: ", p2pAddr) break } } addLen := len(p2pAddr) if addLen > 0 { tmparr := strings.Split(p2pAddr, ":") var strip string var strport string strip, strport = tmparr[0], tmparr[1] ip := net.ParseIP(strip) port, _ := strconv.Atoi(strport) ret = append(ret, candidate{&net.UDPAddr{IP: ip, Port: port}}) } } arr := strings.Split(outIpList, ";") for _, ip := range arr { addip(ip, 0) } /* for _, info := range ret { log.Println("init ip:", info.Addr.String()) }*/ return ret, nil }