func echoServer(Address *string, Message *string) { var c net.PacketConn var err error c, err = net.ListenPacket("sctp", *settings.Address) if err != nil { log.Printf("Error listening: %v", err) os.Exit(-1) } defer c.Close() for { msg := make([]byte, 2048) log.Printf("Listening on %s", *settings.Address) _, addr, err := c.ReadFrom(msg) if err != nil { log.Printf("Error: %v ", err) break } fmt.Println("Message: " + string(msg)) c.WriteTo(msg, addr) } }
func serveTestUdp(in net.PacketConn, pdu_txt string, waiter *sync.WaitGroup) { defer func() { waiter.Done() }() var byteArray [10000]byte for { _, addr, err := in.ReadFrom(byteArray[:]) if nil != err { fmt.Println("[test] read failed", err.Error()) break } fmt.Println("[test] recv ok, send at next step") bin, err := hex.DecodeString(pdu_txt) if nil != err { fmt.Println("[test]", err.Error()) } else { if _, err = in.WriteTo(bin, addr); nil != err { fmt.Println("[test] write failed", err.Error()) break } } } }
func sendPacket(conn net.PacketConn, destination *net.IPAddr, payload []byte, seq int) { packet := make([]byte, len(payload)+8) packet[0] = 8 packet[1] = 0 packet[4] = uint8(os.Getpid() >> 8) packet[5] = uint8(os.Getpid() & 0xff) packet[6] = uint8(seq >> 8) packet[7] = uint8(seq & 0xff) copy(packet[8:], payload) cklen := len(packet) cksum := uint32(0) for i := 0; i < cklen-1; i += 2 { cksum += uint32(packet[i+1])<<8 | uint32(packet[i]) } if cklen&1 == 1 { cksum += uint32(packet[cklen-1]) } cksum = (cksum >> 16) + (cksum & 0xffff) cksum = cksum + (cksum >> 16) packet[2] ^= uint8(^cksum & 0xff) packet[3] ^= uint8(^cksum >> 8) _, err := conn.WriteTo(packet, destination) if err != nil { panic(err) } }
func WriteFile(w io.Writer, conn net.PacketConn, remoteAddress net.Addr, packet []byte, tid uint16) (int, net.Addr, error) { // Read data packet n, replyAddr, err := conn.ReadFrom(packet) if err != nil { return n, replyAddr, fmt.Errorf("Error reading packet: %v", err) } opcode, err := GetOpCode(packet) if err != nil { return n, replyAddr, fmt.Errorf("Error getting opcode: %v", err) } if opcode != OpDATA { return n, replyAddr, fmt.Errorf("Expected DATA packet, got %v\n", opcode) } packetTID := binary.BigEndian.Uint16(packet[2:4]) if packetTID != tid { SendError(5, "Unknown transfer id", conn, remoteAddress) return n, replyAddr, fmt.Errorf("Expected TID %d, got %d\n", tid, packetTID) } // Write data to disk _, err = w.Write(packet[4:n]) if err != nil { return n, replyAddr, fmt.Errorf("Error writing: %v", err) } ack := CreateAckPacket(tid) _, err = conn.WriteTo(ack, replyAddr) if err != nil { return n, replyAddr, fmt.Errorf("Error writing ACK packet: %v", err) } return n, replyAddr, nil }
// sends an ABORT packet to the specified peer func sendAbort(c net.PacketConn, addr net.Addr, tag []byte) { p := NewPacket(4) p.D[0] = byte(PacketAbort) copy(p.D[1:4], tag) c.WriteTo(p.D, addr) p.Free() }
// RFC 3489: Clients SHOULD retransmit the request starting with an interval // of 100ms, doubling every retransmit until the interval reaches 1.6s. // Retransmissions continue with intervals of 1.6s until a response is // received, or a total of 9 requests have been sent. func (v *packet) send(conn net.PacketConn, addr net.Addr) (*packet, error) { timeout := 100 for i := 0; i < 9; i++ { length, err := conn.WriteTo(v.bytes(), addr) if err != nil { return nil, err } if length != len(v.bytes()) { return nil, errors.New("Error in sending data.") } conn.SetReadDeadline(time.Now().Add(time.Duration(timeout) * time.Millisecond)) if timeout < 1600 { timeout *= 2 } packetBytes := make([]byte, 1024) length, _, err = conn.ReadFrom(packetBytes) if err == nil { return newPacketFromBytes(packetBytes[0:length]) } else { if !err.(net.Error).Timeout() { return nil, err } } } return nil, nil }
func SendError(code uint16, message string, conn net.PacketConn, remoteAddress net.Addr) error { errPacket := CreateErrorPacket(0, message) _, err := conn.WriteTo(errPacket, remoteAddress) if err != nil { return fmt.Errorf("Error writing error packet: %v", err) } return nil }
func benchmarkReadWriteNetUDP(b *testing.B, c net.PacketConn, wb, rb []byte, dst net.Addr) { if _, err := c.WriteTo(wb, dst); err != nil { b.Fatal(err) } if _, _, err := c.ReadFrom(rb); err != nil { b.Fatal(err) } }
func MGCPCommandRespondEcho(msg MGCPCommand, conn net.PacketConn, addr net.Addr) { str_reply := fmt.Sprintf("%s", msg) if _, err := conn.WriteTo([]byte(str_reply), addr); err != nil { fmt.Println(err) } }
func (p *Packet) Send(c net.PacketConn, addr net.Addr) error { buf, err := p.Encode() if err != nil { return err } _, err = c.WriteTo(buf, addr) return err }
func benchmarkReadWriteNetUDP(b *testing.B, c net.PacketConn, dst net.Addr) { if _, err := c.WriteTo([]byte("HELLO-R-U-THERE"), dst); err != nil { b.Fatalf("net.PacketConn.WriteTo failed: %v", err) } rb := make([]byte, 128) if _, _, err := c.ReadFrom(rb); err != nil { b.Fatalf("net.PacketConn.ReadFrom failed: %v", err) } }
func (p *Packet) Send(c net.PacketConn, addr net.Addr) error { var buf [4096]byte n, _, err := p.Encode(buf[:]) if err != nil { return err } n, err = c.WriteTo(buf[:n], addr) return err }
func packetEcho(c net.PacketConn) { defer c.Close() buf := make([]byte, 65536) for { n, addr, err := c.ReadFrom(buf) if err != nil { return } if _, err := c.WriteTo(buf[:n], addr); err != nil { return } } }
func MGCPCommandRespondErr(msg MGCPCommand, conn net.PacketConn, addr net.Addr) { var reply MGCPResponse reply.ResponseCode = "504" reply.TransID = msg.TransID reply.ResponseStr = "ERR" str_reply := fmt.Sprintf("%s", reply) if _, err := conn.WriteTo([]byte(str_reply), addr); err != nil { fmt.Println(err) } }
func handleClient(conn net.PacketConn) { var buf [512]byte _, addr, err := conn.ReadFrom(buf[0:]) if err != nil { return } daytime := time.Now() msg := fmt.Sprint(daytime) conn.WriteTo([]byte(msg), addr) }
// ReadFileLoop will read from r in blockSize chunks, sending each chunk to through conn // to remoteAddr. After each send it will wait for an ACK packet. It will loop until // EOF on r. func ReadFileLoop(r io.Reader, conn net.PacketConn, remoteAddr net.Addr, blockSize int) (int, error) { var tid uint16 var bytesRead int buffer := make([]byte, blockSize) ackBuf := make([]byte, 4) for { tid++ n, err := r.Read(buffer) if err == io.EOF { // We're done break } if err != nil { return bytesRead, fmt.Errorf("Error reading data: %v", err) } bytesRead += n packet := createDataPacket(tid, buffer[:n]) n, err = conn.WriteTo(packet, remoteAddr) if err != nil { return bytesRead, fmt.Errorf("Error writing data packet: %v", err) } // Read ack i, _, err := conn.ReadFrom(ackBuf) if err != nil { return bytesRead, fmt.Errorf("Error reading ACK packet: %v", err) } if i != 4 { return bytesRead, fmt.Errorf("Expected 4 bytes read for ACK packet, got %d", i) } ackTid, err := ParseAckPacket(ackBuf) if err != nil { return bytesRead, fmt.Errorf("Error parsing ACK packet: %v", err) } if ackTid != tid { return bytesRead, fmt.Errorf("ACK tid: %d, does not match expected: %d", ackTid, tid) } } return bytesRead, nil }
// Serve accepts incoming TFTP read requests on the listener l, // creating a new service goroutine for each. The service goroutines // use handler to get a byte stream and send it to the client. func Serve(l net.PacketConn, handler Handler) { Log("Listening on %s", l.LocalAddr()) buf := make([]byte, 512) for { n, addr, err := l.ReadFrom(buf) if err != nil { Log("Reading from socket: %s", err) continue } req, err := parseRRQ(addr, buf[:n]) if err != nil { Debug("parseRRQ: %s", err) l.WriteTo(mkError(err), addr) continue } go transfer(addr, req, handler) } }
// Send stdin packets to addr via conn until EOF, then signal master with // readShutdown. // If we experience a short write, we just warn about it and discard the // unwritten data; this is basically required by packet-based connection // types, since two packets != one big packet. func packetSend(conn net.PacketConn, addr net.Addr, master chan shutdowns) { buf := make([]byte, bufsize) for { nr, err := os.Stdin.Read(buf[0:]) if err != nil { // We do not attempt to write partial data on a // stdin read error. if err != io.EOF { warnln("stdin read error:", err) } break } nw, err := conn.WriteTo(buf[0:nr], addr) if err != nil { warnln("network write error to", addr, ":", err) break } if nr != nw { warnf("short write to %s: %d written of %d\n", addr, nw, nr) } } master <- readShutdown }
func Main(clusterName, self, buri, secret string, cl *doozer.Conn, udpConn net.PacketConn, listener, webListener net.Listener, pulseInterval, fillDelay, kickTimeout int64) { listenAddr := listener.Addr().String() canWrite := make(chan bool, 1) in := make(chan consensus.Packet, 50) out := make(chan consensus.Packet, 50) st := store.New() pr := &proposer{ seqns: make(chan int64, alpha), props: make(chan *consensus.Prop), st: st, } calSrv := func(start int64) { go gc.Pulse(self, st.Seqns, pr, pulseInterval) go gc.Clean(st, 360000, time.Tick(1e9)) consensus.NewManager(self, start, alpha, in, out, st.Ops, pr.seqns, pr.props, fillDelay, st) } if cl == nil { // we are the only node in a new cluster set(st, "/ctl/name", clusterName, store.Missing) set(st, "/ctl/node/"+self+"/addr", listenAddr, store.Missing) set(st, "/ctl/node/"+self+"/hostname", os.Getenv("HOSTNAME"), store.Missing) set(st, "/ctl/node/"+self+"/version", Version, store.Missing) set(st, "/ctl/cal/0", self, store.Missing) calSrv(<-st.Seqns) // Skip ahead alpha steps so that the registrar can provide a // meaningful cluster. for i := 0; i < alpha; i++ { st.Ops <- store.Op{1 + <-st.Seqns, store.Nop} } canWrite <- true } else { setC(cl, "/ctl/node/"+self+"/addr", listenAddr, store.Clobber) setC(cl, "/ctl/node/"+self+"/hostname", os.Getenv("HOSTNAME"), store.Clobber) setC(cl, "/ctl/node/"+self+"/version", Version, store.Clobber) rev, err := cl.Rev() if err != nil { panic(err) } stop := make(chan bool, 1) go follow(st, cl, rev+1, stop) info, err := cl.Walk("/**", rev, 0, -1) if err != nil { panic(err) } for _, ev := range info { // store.Clobber is okay here because the event // has already passed through another store mut := store.MustEncodeSet(ev.Path, string(ev.Body), store.Clobber) st.Ops <- store.Op{ev.Rev, mut} } st.Flush() ch, err := st.Wait(store.Any, rev+1) if err == nil { <-ch } go func() { n := activate(st, self, cl) calSrv(n) advanceUntil(cl, st.Seqns, n+alpha) stop <- true canWrite <- true if buri != "" { b, err := doozer.DialUri(buri) if err != nil { panic(err) } setC( b, "/ctl/ns/"+clusterName+"/"+self, listenAddr, store.Missing, ) } }() } shun := make(chan string, 3) // sufficient for a cluster of 7 go member.Clean(shun, st, pr) go server.ListenAndServe(listener, canWrite, st, pr, secret) if secret == "" && webListener != nil { web.Store = st web.ClusterName = clusterName go web.Serve(webListener) } go func() { for p := range out { addr, err := net.ResolveUDPAddr(p.Addr) if err != nil { log.Println(err) continue } n, err := udpConn.WriteTo(p.Data, addr) if err != nil { log.Println(err) continue } if n != len(p.Data) { log.Println("packet len too long:", len(p.Data)) continue } } }() lv := liveness{ timeout: kickTimeout, ival: kickTimeout / 2, times: make(map[string]int64), self: self, shun: shun, } for { t := time.Nanoseconds() buf := make([]byte, maxUDPLen) n, addr, err := udpConn.ReadFrom(buf) if err == os.EINVAL { return } if err != nil { log.Println(err) continue } buf = buf[:n] // Update liveness time stamp for this addr lv.times[addr.String()] = t lv.check(t) in <- consensus.Packet{addr.String(), buf} } }
// process the connection state SHUTDOWN-ACK-SENT func (c *connection) state_shutdownAckSent(pc net.PacketConn, tq *timerQueue, abortTimer *timer, cd *connectionData) connectionState { // Read now returns io.EOF c.L.Lock() if c.ReadErr == nil { c.ReadErr = io.EOF } unblockRead := c.readData.p != nil c.readData.p = nil c.L.Unlock() if unblockRead { c.readData.Done() } // no longer ACK data cd.DisableAcks = true // disable T2-rtx tq.StopTimer(&cd.RetransmitTimer) var ( incoming []Packet first = true retransmitsLeft = 5 retransmit = timer{ Cond: &c.Cond, } ) for { // retransmit SHUTDOWN-ACK packet if first || retransmit.Signaled() { first = false // process resends of the SHUTDOWN packet retransmitsLeft-- if retransmitsLeft <= 0 { return connectionAbort } // build SHUTDOWN packet p := NewPacket(4) p.D[0] = byte(PacketShutdownAck) copy(p.D[1:4], cd.ConnectionTag[:]) // Send to peer _, err := pc.WriteTo(p.D, c.addr) if err != nil { c.L.Lock() c.ReadErr = err c.WriteErr = err c.L.Unlock() return connectionAbort } // Start T2-rtx to resend SHUTDOWN-ACK tq.StopTimer(&retransmit) tq.StartTimer(&retransmit, 400*time.Millisecond) } // wait for connection to become ready c.L.Lock() for len(c.Incoming) == 0 && !retransmit.Signaled() && !abortTimer.Signaled() { c.Wait() } incoming = append(incoming, c.Incoming...) c.Incoming = c.Incoming[:0] c.L.Unlock() // has the abort timer been signaled if abortTimer.Signaled() { return connectionAbort } // have we found a SHUTDOWN-COMPLETE packet? shutdownComplete := false for _, p := range incoming { if PacketType(p.D[0]) == PacketShutdownComplete { shutdownComplete = true } p.Free() } // on SHUTDOWN-COMPLETE, transition to CLOSED if shutdownComplete { return connectionClosed } } }
// process the connection state SHUTDOWN-SENT func (c *connection) state_shutdownSent(pc net.PacketConn, tq *timerQueue, abortTimer *timer, cd *connectionData) connectionState { // no longer ACK data cd.DisableAcks = true // disable T2-rtx tq.StopTimer(&cd.RetransmitTimer) retransmit := timer{ Cond: &c.Cond, } var ( currentAck = cd.AcknowlegedBytes() - 1 incoming []Packet ) retransmitsLeft := 5 for { // if we have a new outgoing sequence, (re)send SHUTDOWN packet if seq := cd.AcknowlegedBytes(); seq != currentAck || retransmit.Signaled() { // process resends of the SHUTDOWN packet if seq == currentAck { retransmitsLeft-- if retransmitsLeft <= 0 { return connectionAbort } } // build SHUTDOWN packet currentAck = cd.AcknowlegedBytes() p := NewPacket(8) p.D[0] = byte(PacketShutdown) copy(p.D[1:4], cd.ConnectionTag[:]) binary.LittleEndian.PutUint32(p.D[4:8], currentAck) // Send to peer _, err := pc.WriteTo(p.D, c.addr) if err != nil { c.L.Lock() c.ReadErr = err c.WriteErr = err c.L.Unlock() return connectionAbort } // Start T2-rtx to resend SHUTDOWN tq.StopTimer(&retransmit) tq.StartTimer(&retransmit, 400*time.Millisecond) } // wait for connection to become ready timerAborted, receivedShutdown := false, false c.L.Lock() for c.shouldWaitOnCV(tq, cd) && !retransmit.Signaled() { timerAborted = abortTimer.Signaled() receivedShutdown = cd.ReceivedShutdown if timerAborted || receivedShutdown { break } c.Wait() } incoming = append(incoming, c.Incoming...) c.Incoming = c.Incoming[:0] c.L.Unlock() // on SHUTDOWN, transition to SHUTDOWN-ACK-SENT if cd.ReceivedShutdown { return connectionShutdownAckSent } // on SHUTDOWN-ACK, transition to CLOSED if cd.ReceivedShutdownAck { // Read now returns io.EOF c.L.Lock() if c.ReadErr == nil { c.ReadErr = io.EOF } unblockRead := c.readData.p != nil c.readData.p = nil c.L.Unlock() if unblockRead { c.readData.Done() } // send SHUTDOWN-COMPLETE p := NewPacket(4) p.D[0] = byte(PacketShutdownComplete) copy(p.D[1:4], cd.ConnectionTag[:]) pc.WriteTo(p.D, c.addr) return connectionClosed } if timerAborted { return connectionAbort } // process incoming data err := cd.Process(pc, c.addr, tq, incoming, nil) for _, p := range incoming { p.Free() } incoming = incoming[:0] if err != nil { c.L.Lock() c.ReadErr = err c.WriteErr = err c.L.Unlock() return connectionAbort } // deliver application data c.deliverApplicationData(cd) } }
func Main(clusterName, self, buri, rwsk, rosk string, cl *doozer.Conn, udpConn net.PacketConn, listener, webListener net.Listener, pulseInterval, fillDelay, kickTimeout int64, hi int64) { listenAddr := listener.Addr().String() canWrite := make(chan bool, 1) in := make(chan consensus.Packet, 50) out := make(chan consensus.Packet, 50) st := store.New() pr := &proposer{ seqns: make(chan int64, alpha), props: make(chan *consensus.Prop), st: st, } calSrv := func(start int64) { go gc.Pulse(self, st.Seqns, pr, pulseInterval) go gc.Clean(st, hi, time.Tick(1e9)) var m consensus.Manager m.Self = self m.DefRev = start m.Alpha = alpha m.In = in m.Out = out m.Ops = st.Ops m.PSeqn = pr.seqns m.Props = pr.props m.TFill = fillDelay m.Store = st m.Ticker = time.Tick(10e6) go m.Run() } if cl == nil { // we are the only node in a new cluster set(st, "/ctl/name", clusterName, store.Missing) set(st, "/ctl/node/"+self+"/addr", listenAddr, store.Missing) set(st, "/ctl/node/"+self+"/hostname", os.Getenv("HOSTNAME"), store.Missing) set(st, "/ctl/node/"+self+"/version", Version, store.Missing) set(st, "/ctl/cal/0", self, store.Missing) calSrv(<-st.Seqns) // Skip ahead alpha steps so that the registrar can provide a // meaningful cluster. for i := 0; i < alpha; i++ { st.Ops <- store.Op{1 + <-st.Seqns, store.Nop} } canWrite <- true } else { setC(cl, "/ctl/node/"+self+"/addr", listenAddr, store.Clobber) setC(cl, "/ctl/node/"+self+"/hostname", os.Getenv("HOSTNAME"), store.Clobber) setC(cl, "/ctl/node/"+self+"/version", Version, store.Clobber) rev, err := cl.Rev() if err != nil { panic(err) } stop := make(chan bool, 1) go follow(st, cl, rev+1, stop) errs := make(chan os.Error) go func() { e, ok := <-errs if ok { panic(e) } }() doozer.Walk(cl, rev, "/", cloner{st.Ops, cl}, errs) close(errs) st.Flush() ch, err := st.Wait(store.Any, rev+1) if err == nil { <-ch } go func() { n := activate(st, self, cl) calSrv(n) advanceUntil(cl, st.Seqns, n+alpha) stop <- true canWrite <- true if buri != "" { b, err := doozer.DialUri(buri, "") if err != nil { panic(err) } setC( b, "/ctl/ns/"+clusterName+"/"+self, listenAddr, store.Missing, ) } }() } shun := make(chan string, 3) // sufficient for a cluster of 7 go member.Clean(shun, st, pr) go server.ListenAndServe(listener, canWrite, st, pr, rwsk, rosk) if rwsk == "" && rosk == "" && webListener != nil { web.Store = st web.ClusterName = clusterName go web.Serve(webListener) } go func() { for p := range out { addr, err := net.ResolveUDPAddr("udp", p.Addr) if err != nil { log.Println(err) continue } n, err := udpConn.WriteTo(p.Data, addr) if err != nil { log.Println(err) continue } if n != len(p.Data) { log.Println("packet len too long:", len(p.Data)) continue } } }() lv := liveness{ timeout: kickTimeout, ival: kickTimeout / 2, times: make(map[string]int64), self: self, shun: shun, } for { t := time.Nanoseconds() buf := make([]byte, maxUDPLen) n, addr, err := udpConn.ReadFrom(buf) if err == os.EINVAL { return } if err != nil { log.Println(err) continue } buf = buf[:n] // Update liveness time stamp for this addr lv.times[addr.String()] = t lv.check(t) in <- consensus.Packet{addr.String(), buf} } }
// receives packets from the receive loop and dispatches them // to their corresponding connections func processLoop(c net.PacketConn, l *listener, rld *receiveLoopData, pld *processLoopData) { var ( quit bool wg = new(sync.WaitGroup) tq = newTimerQueue() connections = struct { sync.RWMutex M map[uint32]*connection }{ M: make(map[uint32]*connection), } ) // keep a refcount to `c` for ourselves wg.Add(1) defer wg.Done() // wait for all pending connections, then close the socket go func() { wg.Wait() c.Close() tq.Close() }() // generate a new key for cookies cookieKey := make([]byte, sha1.Size) if _, err := crand.Read(cookieKey); err != nil { l.L.Lock() l.err = fmt.Errorf("Failed to generate cookie secret: %v", err) l.L.Unlock() l.Broadcast() return } sig := hmac.New(sha1.New, cookieKey) // generate addler32(localaddr) var localAddrSum [4]byte binary.LittleEndian.PutUint32(localAddrSum[:], adler32.Checksum([]byte(c.LocalAddr().String()))) shouldWait := func() bool { waitOnConnections := pld.rejectNewConnections && pld.remainingConnections == 0 return pld.err == nil && len(pld.Q) == 0 && !waitOnConnections } var packets []processLoopPacket for !quit { // wait for a packet to become available pld.L.Lock() for shouldWait() { pld.Wait() } rejectNewConnections := pld.rejectNewConnections pldErr := pld.err quit = pldErr != nil packets, pld.Q = pld.Q, packets[:0] remainingConnections := pld.remainingConnections pld.L.Unlock() if rejectNewConnections && remainingConnections == 0 { quit = true } // process packets for ii := range packets { buffer, addr := packets[ii].D, packets[ii].A switch PacketType(buffer[0]) { case PacketInit: // are we accepting new connections? if rejectNewConnections { continue } // verify length const MinInitPacketLength = 9 if len(buffer) < MinInitPacketLength { continue } // verify protocol magic if !bytes.Equal(buffer[1:5], protocolMagic) { continue } // verify version if buffer[5] != version1 { sendAbort(c, addr, buffer[6:9]) continue } var outgoing [32]byte now := time.Now() outgoing[0] = byte(PacketCookie) if _, err := crand.Read(outgoing[1:4]); err != nil { return } copy(outgoing[4:7], buffer[6:9]) outgoing[7] = version1 binary.LittleEndian.PutUint32(outgoing[8:12], uint32(now.Add(5*time.Second).Unix()+1)) sig.Reset() sig.Write(outgoing[1:12]) sig.Write(localAddrSum[:]) sig.Sum(outgoing[1:12]) c.WriteTo(outgoing[:], addr) case PacketCookieEcho: // are we accepting new connections if rejectNewConnections { continue } // verify length const CookieEchoPacketLength = 32 if len(buffer) != CookieEchoPacketLength { continue } // verify signature sig.Reset() sig.Write(buffer[1:12]) sig.Write(localAddrSum[:]) if !hmac.Equal(sig.Sum(nil), buffer[12:]) { continue } // verify version if buffer[7] != version1 { sendAbort(c, addr, buffer[1:4]) continue } // vetify timeout now := time.Now() if time.Unix(int64(binary.LittleEndian.Uint32(buffer[8:12])), 0).Before(now) { sendAbort(c, addr, buffer[1:4]) continue } // decode connection tag to uin32 tagId := binary.LittleEndian.Uint32(buffer[3:7]) >> 8 connections.RLock() _, ok := connections.M[tagId] connections.RUnlock() // create new connection if !ok { // create connection wg.Add(1) connections.Lock() conn := newConnection(c, addr, tq, buffer[1:4], func() { connections.Lock() delete(connections.M, tagId) connections.Unlock() wg.Done() pld.L.Lock() pld.remainingConnections-- remain := pld.remainingConnections pld.L.Unlock() if remain == 0 { pld.Signal() } }) connections.M[tagId] = conn connections.Unlock() pld.L.Lock() pld.remainingConnections++ pld.L.Unlock() l.L.Lock() l.pending = append(l.pending, conn) l.L.Unlock() l.Signal() } // send COOKIE-ACK var outgoing [4]byte outgoing[0] = byte(PacketCookieAck) copy(outgoing[1:4], buffer[1:4]) c.WriteTo(outgoing[:], addr) default: // parse connection tag tagId := binary.LittleEndian.Uint32(buffer[0:4]) >> 8 connections.RLock() conn, ok := connections.M[tagId] connections.RUnlock() // discard packets addressed to unknown connections if !ok { sendAbort(c, addr, buffer[1:4]) continue } // copy the packet data so we can reuse the buffer p := NewPacket(len(buffer)) copy(p.D, buffer) // queue the packet onto the connection conn.L.Lock() conn.Incoming = append(conn.Incoming, p) conn.L.Unlock() conn.Signal() } } // return the packet to the receive loop rld.L.Lock() for ii := range packets { rld.buffers = append(rld.buffers, packets[ii].D) } rld.L.Unlock() rld.Signal() // if we were signaled to quit, terminate existing connections if pldErr != nil { connections.Lock() m := connections.M connections.M = make(map[uint32]*connection) connections.Unlock() for _, c := range m { c.closeWithError(pldErr) } } } }
func Main(clusterName, self, baddr string, cl *doozer.Client, udpConn net.PacketConn, listener, webListener net.Listener, pulseInterval, fillDelay, kickTimeout int64) { listenAddr := listener.Addr().String() var activateSeqn int64 useSelf := make(chan bool, 1) st := store.New() pr := &proposer{ seqns: make(chan int64, alpha), props: make(chan *consensus.Prop), st: st, } calSrv := func() { go gc.Pulse(self, st.Seqns, pr, pulseInterval) go gc.Clean(st, 360000, time.Tick(1e9)) } if cl == nil { // we are the only node in a new cluster set(st, "/ctl/name", clusterName, store.Missing) set(st, "/ctl/node/"+self+"/addr", listenAddr, store.Missing) set(st, "/ctl/node/"+self+"/hostname", os.Getenv("HOSTNAME"), store.Missing) set(st, "/ctl/node/"+self+"/version", Version, store.Missing) set(st, "/ctl/cal/0", self, store.Missing) calSrv() close(useSelf) } else { setC(cl, "/ctl/node/"+self+"/addr", listenAddr, store.Clobber) setC(cl, "/ctl/node/"+self+"/hostname", os.Getenv("HOSTNAME"), store.Clobber) setC(cl, "/ctl/node/"+self+"/version", Version, store.Clobber) rev, err := cl.Rev() if err != nil { panic(err) } walk, err := cl.Walk("/**", &rev, nil, nil) if err != nil { panic(err) } watch, err := cl.Watch("/**", rev+1) if err != nil { panic(err) } go follow(st.Ops, watch.C) follow(st.Ops, walk.C) st.Flush() ch, err := st.Wait(rev + 1) if err == nil { <-ch } go func() { activateSeqn = activate(st, self, cl) calSrv() advanceUntil(cl, st.Seqns, activateSeqn+alpha) err := watch.Cancel() if err != nil { panic(err) } close(useSelf) if baddr != "" { b := doozer.New("<boot>", baddr) setC( b, "/ctl/ns/"+clusterName+"/"+self, listenAddr, store.Missing, ) } }() } start := <-st.Seqns cmw := st.Watch(store.Any) in := make(chan consensus.Packet, 50) out := make(chan consensus.Packet, 50) consensus.NewManager(self, start, alpha, in, out, st.Ops, pr.seqns, pr.props, cmw, fillDelay, st) if cl == nil { // Skip ahead alpha steps so that the registrar can provide a // meaningful cluster. for i := start + 1; i < start+alpha+1; i++ { st.Ops <- store.Op{i, store.Nop} } } shun := make(chan string, 3) // sufficient for a cluster of 7 go member.Clean(shun, st, pr) sv := &server.Server{listenAddr, st, pr, self, alpha} go sv.Serve(listener, useSelf) if webListener != nil { web.Store = st web.ClusterName = clusterName go web.Serve(webListener) } go func() { for p := range out { addr, err := net.ResolveUDPAddr(p.Addr) if err != nil { log.Println(err) continue } n, err := udpConn.WriteTo(p.Data, addr) if err != nil { log.Println(err) continue } if n != len(p.Data) { log.Println("packet len too long:", len(p.Data)) continue } } }() lv := liveness{ timeout: kickTimeout, ival: kickTimeout / 2, times: make(map[string]int64), self: self, shun: shun, } for { t := time.Nanoseconds() buf := make([]byte, maxUDPLen) n, addr, err := udpConn.ReadFrom(buf) if err == os.EINVAL { return } if err != nil { log.Println(err) continue } buf = buf[:n] // Update liveness time stamp for this addr lv.times[addr.String()] = t lv.check(t) in <- consensus.Packet{addr.String(), buf} } }