func parseMsg(msg []byte) (command ZmqCommand, seq int, payload []byte, err error) { intsz := sizeInt() commandi, err := decodeInt(msg[0:intsz]) if err != nil { slog.Fatalf("Could not parse command %v", err) } command = ZmqCommand(commandi) seq, err = decodeInt(msg[intsz:(intsz + intsz)]) if err != nil { slog.Fatalf("Could not parse seq # %v", err) } payload = msg[2*intsz:] return }
func (src *Client) SetNotifier(n stream.ProcessedNotifier) *Client { if n.Blocking() == true { slog.Fatalf("Can't use a blocking Notifier") } src.notifier = n return src }
func sendMsgNoBlock(sndCh chan<- stream.Object, command ZmqCommand, seq int, payload []byte) { select { case sndCh <- [][]byte{encodeInt(int(command)), encodeInt(seq), payload}: default: slog.Fatalf("%v", "Should be non-blocking send") } }
func getPartition(p cube.Partition) Partition { switch pt := p.(type) { case cube.TimePartition: return &TimePartition{&pt} default: slog.Fatalf("Unknown Partition Type %v", reflect.TypeOf(pt)) } panic("Never Here") }
func NewDynamicBBManager(bbHosts []string) *DynamicBBManager { dm := DynamicBBManager{make(map[time.Time]Era), make([]time.Time, 0, 0), bbHosts, time.Now()} err := dm.pullLatestEra() if err != nil || len(dm.Eras) == 0 { slog.Fatalf("Cannot get a valid era %v", err) } // Keep updating with periodic info go dm.keepErasCurrent() return &dm }
func encodeInt(val int) []byte { if val < 0 { panic("Can't encode negative val") } buf := new(bytes.Buffer) err := binary.Write(buf, binary.LittleEndian, uint32(val)) if err != nil { slog.Fatalf("Could not encode binary %v", err) } return buf.Bytes() }
func (e *Executor) ExecErr(sql string, args ...interface{}) (driver.Result, error) { exec := e.conn.(driver.Execer) dargs := make([]driver.Value, len(args)) for n, arg := range args { var err error dargs[n], err = driver.DefaultParameterConverter.ConvertValue(arg) if err != nil { slog.Fatalf("sql: converting Exec argument #%d's type: %v", n, err) } } return exec.Exec(sql, dargs) }
func (src *UnixgramSource) Run() error { //the socket has to run from the same goroutine because it is not thread safe //memory barrier executed when goroutines moved between threads //reference: https://groups.google.com/forum/#!topic/golang-nuts/eABYrBA5LEk defer close(src.Out()) // If the socket exists, rm it. syscall.Unlink(src.path) socket, err := net.ListenPacket("unixgram", src.path) if err != nil { slog.Fatalf("Listen: %v", err) return err } defer socket.Close() // Allow other processes to write here os.Chmod(src.path, 0777) count := 0 sent := 0 lf := []byte{'\n'} for { count++ buf := make([]byte, MAX_READ_SIZE) nr, _, err := socket.ReadFrom(buf) if err != nil { return err } // Now, tokenize on \n, writing out each part of the slice as // a separate message for _, msg := range bytes.Split(buf[:nr], lf) { if len(msg) > 0 { wi := src.decodeNginxLog(msg) sent++ src.Out() <- msg[:wi] } } select { case <-src.StopNotifier: slog.Infof("Closing: count ", count, "Sent:", sent) return nil default: } } }
func (e *Executor) UpsertCubes(p cube.Partition, c []cube.Cuber) { tx, err := e.conn.Begin() if err != nil { slog.Fatalf("Error starting transaction %v", err) } part := getPartition(p) //TODO: have a cache of existing partition tables...dont recreate if not necessary e.Exec(e.table.CreatePartitionTableSql(part)) e.Exec(e.table.CreateTemporaryCopyTableSql(part)) cy := pq.NewCopierFromConn(e.conn) err = cy.Start(e.table.CopyTableSql(part)) if err != nil { slog.Fatalf("Error starting copy %v", err) } for _, cube := range c { err = cy.Send(e.table.CopyDataFull(cube)) if err != nil { slog.Fatalf("Error copying %v", err) } } err = cy.Close() if err != nil { slog.Fatalf("Error Ending Copy %v", err) } e.Exec(e.table.MergeCopySql(part)) err = tx.Commit() if err != nil { slog.Fatalf("Error Committing tx %v ", err) } }
func (c *OrderedChain) Add(o Operator) Chain { parallel, ok := o.(ParallelizableOperator) if ok && parallel.IsParallel() { if !parallel.IsOrdered() { parallel = parallel.MakeOrdered() if !parallel.IsOrdered() { slog.Fatalf("%s", "Couldn't make parallel operator ordered") } } c.SimpleChain.Add(parallel) } else { c.SimpleChain.Add(o) } return c }
func (src *Client) connect() error { defer func() { src.retries++ }() conn, err := net.Dial("tcp", src.addr) if err != nil { slog.Errorf("Cannot establish a connection with %s %v", src.addr, err) return err } wg_sub := &sync.WaitGroup{} defer wg_sub.Wait() rcvChData := make(chan stream.Object, 10) receiver := source.NewIOReaderSourceLengthDelim(conn) receiver.SetOut(rcvChData) rcvChCloseNotifier := make(chan bool) wg_sub.Add(1) go func() { defer wg_sub.Done() defer close(rcvChCloseNotifier) err := receiver.Run() if err != nil { slog.Errorf("Error in client reciever: %v", err) } }() //receiver will be closed by the sender after it is done sending. receiver closed via a hard stop. writeNotifier := stream.NewNonBlockingProcessedNotifier(2) sndChData := make(chan stream.Object, src.hwm) sndChCloseNotifier := make(chan bool) defer close(sndChData) sender := sink.NewMultiPartWriterSink(conn) sender.CompletedNotifier = writeNotifier sender.SetIn(sndChData) wg_sub.Add(1) go func() { defer receiver.Stop() //close receiver defer wg_sub.Done() defer close(sndChCloseNotifier) err := sender.Run() if err != nil { slog.Errorf("Error in client sender: %v", err) } }() //sender closed by closing the sndChData channel or by a hard stop if src.buf.Len() > 0 { leftover := src.buf.Reset() for i, value := range leftover { sendData(sndChData, value, i+1) } } timer := src.resetAckTimer() closing := false //defer log.Println("Exiting client loop") opName := stream.Name(src) writesNotCompleted := uint(0) for { upstreamCh := src.In() if !src.buf.CanAdd() || closing { //disable upstream listening upstreamCh = nil } if closing && src.buf.Len() == 0 { sendClose(sndChData, 100) return nil } select { case msg, ok := <-upstreamCh: if !ok { //softClose //make sure everything was sent closing = true } else { bytes := msg.([]byte) seq, err := src.buf.Add(bytes) if err != nil { slog.Fatalf("Error adding item to buffer %v", err) return err } sendData(sndChData, bytes, seq) writesNotCompleted += 1 metrics.Gm.Event(&opName) // These are batched //slog.Logf(logger.Levels.Debug, "Sent batch -- length %d seq %d", len(bytes), seq) } case cnt := <-writeNotifier.NotificationChannel(): writesNotCompleted -= cnt if timer == nil { slog.Debugf("Seting timer %v, %v", time.Now(), time.Now().UnixNano()) timer = src.resetAckTimer() } case obj, ok := <-rcvChData: slog.Debugf("in Rcv: %v", ok) if !ok { return errors.New("Connection to Server was Broken in Recieve Direction") } command, seq, _, err := parseMsg(obj.([]byte)) if err != nil { slog.Fatalf("%v", err) } if command == ACK { if src.processAck(seq) { timer = src.resetAckTimer() } } else { slog.Fatalf("Unknown Command: %v", command) } case <-rcvChCloseNotifier: //connection threw an eof to the reader? return errors.New("In Select: Recieve Closed") case <-sndChCloseNotifier: return errors.New("Connection to Server was Broken in Send Direction") case <-timer: return errors.New(fmt.Sprintf("Time Out Waiting For Ack, %d %v %v", len(rcvChData), time.Now(), time.Now().UnixNano())) case <-src.StopNotifier: sender.Stop() return nil } } }
func (op *BatcherOperator) Run() error { defer close(op.Out()) /* batchExpired puts a lower bound on how often flushes occur */ var batchExpired <-chan time.Time batchExpired = nil //INVARIANT: if container.HasItems() then it will be flushed eventually //We create a state machine with 3 boolean states, state hi = container.HasItems(), wcb = op.DownstreamWillCallback(), bne = (batchExpired != nil) (batch not expired) //invariants required: // INVARIANT LIMITED_DRCB => repeated DRCB calls will eventually cause wcb == false // INVARIANT !wcb can only become wcb after a FLUSH // INVARIANT CAN FLUSH OR WAIT => either DownstreamWillCallback or DownstreamCanAcceptFlush is true //lets analyse cases where hi == true: // wcb && bne => // Case IN => wcb && bne [Case BE or DRCB will eventually happen] // Case BE => PROGRESS 1 || wcb && !bne // Case DRCB => wcb && bne [can't recurse indefinitely by LIMITED_DRCB] || !wcb && bne // wcb && !bne => // Case IN => wcb && !bne [case DRCB will eventually happen] // Case BE => impossible // Case DRCB => // DownstreamCanAcceptFlush => PROGRESS 2 // else: wcb && bne || wcb && !bne [can't recurse indef by LIMITED_DRCB] || !wcb && bne //!wcb && bne // case IN => !wcb && bne [case BE will eventually happen] // case BE => // !DownstreamCanAcceptFlush => impossible [INVARIANT CANFLUSH_OR_WAIT] // else => PROGRESS 2 //case DRCB => impossisible (!wcb) //!wcb && !bne => impossible (all cases disallow this) //liveness: has items => either batch_expired != nil or DownstreamWillCallback for { in := op.In() if op.container.IsFull() { if op.DownstreamCanAcceptFlush() { //PROGRESS 1 op.Flush() batchExpired = time.After(op.minWaitBetweenFlushes) } else { if !op.DownstreamWillCallback() && batchExpired == nil { panic("Batcher deadlocked. Should not happen") } in = nil } } select { //case IN case obj, ok := <-in: if ok { op.container.Add(obj) if !op.DownstreamWillCallback() && op.container.HasItems() && batchExpired == nil { //used by first item batchExpired = time.After(op.minWaitAfterFirstItem) } //IMPOSSIBLE: hi && !wcb && !bne } else { if op.container.HasItems() { op.LastFlush() } if op.container.HasItems() { slog.Fatalf("Last flush did not empty container, some stuff will never be sent") } slog.Debugf("Batch Operator ", op.name, " flushed ", op.total_flushes) return nil } //case BE case <-batchExpired: batchExpired = nil if op.DownstreamCanAcceptFlush() { //PROGRESS 1 op.Flush() batchExpired = time.After(op.minWaitBetweenFlushes) } if !op.DownstreamWillCallback() && op.container.HasItems() && batchExpired == nil { batchExpired = time.After(op.minWaitForLeftover) } //impossibe: hi && !wcb && !bne case <-op.StopNotifier: //INVARIANT and PROGRESS Violated. Hard Stop return nil //case DRCB case count := <-op.processedDownstream.NotificationChannel(): if op.outstanding == 0 { panic("Should never happen, will cause underflow") } op.outstanding -= count if op.DownstreamCanAcceptFlush() && op.container.HasItems() && batchExpired == nil { op.Flush() batchExpired = time.After(op.minWaitBetweenFlushes) } if !op.DownstreamWillCallback() && op.container.HasItems() && batchExpired == nil { batchExpired = time.After(op.minWaitForLeftover) } //impossibe: hi && !wcb && !bne } } }