Esempio n. 1
0
func (src *Client) Run() error {
	src.running = true
	defer func() {
		src.running = false
	}()

	slog.Gm.Register(stream.Name(src))
	go func(op string, s *Client) { // Update the queue depth on input for each phase
		for {
			slog.Gm.Update(&op, s.GetInDepth())
			time.Sleep(1 * time.Second)
		}
	}(stream.Name(src), src)

	for src.retries < RETRY_MAX {
		err := src.connect()
		if err == nil {
			slog.Logf(logger.Levels.Warn, "Connection failed without error")
			return err
		} else {
			slog.Logf(logger.Levels.Error, "Connection failed with error, retrying: %s", err)
			time.Sleep(1 * time.Second)
		}
	}
	slog.Logf(logger.Levels.Error, "Connection failed retries exceeded. Leftover: %d", src.buf.Len())
	return nil //>>>>>>>>>>>>>>???????????????????????
}
Esempio n. 2
0
func (s *WeightedEra) NormalizeAndPopulateMap() {
	total := float32(0)
	scalar := float32(1)
	for _, n := range s.nodes {
		wn := n.(*WeightedNode)
		total += float32(wn.weight)
	}

	if total == 0 {
		slog.Logf(logger.Levels.Error, "Total Node Weight 0")
		return
	}

	slog.Logf(logger.Levels.Debug, "Total Node Weight %f", total)
	if total < MAX_WEIGHT {
		// Scale weights up
		scalar = MAX_WEIGHT / total
		total = MAX_WEIGHT
	}

	lastPosit := 0
	for _, n := range s.nodes {
		wn := n.(*WeightedNode)
		wn.weight = uint32(((float32(wn.weight) * scalar) / total) * MAX_WEIGHT)
		slog.Logf(logger.Levels.Debug, "New Weight %d", wn.weight)
		for i := lastPosit; uint32(i) < wn.weight && i < MAX_WEIGHT; i++ {
			s.nodeMap[i] = wn
			lastPosit++
		}
	}

	return
}
Esempio n. 3
0
func (src *NextReaderSource) Run() error {
	//This operator always stops the read nexter before exiting.
	//But can't defer here since in the case of a hardstop readnexter.Stop() was already called

	defer close(src.Out())
	var count uint32
	count = 0
	slog.Logf(logger.Levels.Debug, "Reading up to %d %s", src.MaxItems, " tuples")
	for {
		b, eofReached, err := src.readnexter.ReadNext()
		//if I've been stopped, exit no matter what I've read
		select {
		case <-src.StopNotifier:
			//In this case readNexter was stopped
			return nil
		default:
		}
		if err != nil {
			slog.Logf(logger.Levels.Error, "Reader encountered error %v", err)
			src.readnexter.Stop()
			return err
		} else if len(b) > 0 {
			count++
			src.Out() <- b
		}
		if eofReached || (count >= src.MaxItems) {
			slog.Logf(logger.Levels.Debug, "Got eof in Next Reader Source %d, %d", count, src.MaxItems)
			src.readnexter.Stop()
			return nil
		}
	}

}
Esempio n. 4
0
func (c *DynamicBBManager) keepErasCurrent() {
	for {
		time.Sleep(60 * time.Second)
		slog.Logf(logger.Levels.Debug, "Updating to new era")
		err := c.pullLatestEra()
		if err != nil {
			slog.Logf(logger.Levels.Error, "Cannot get a valid era %v", err)
		}
	}
}
Esempio n. 5
0
func (e *Executor) Exec(sql string, args ...interface{}) driver.Result {
	res, err := e.ExecErr(sql, args...)
	if err != nil {
		slog.Logf(logger.Levels.Error, "Sql: %v Err: %v", sql, err)
	}
	return res
}
Esempio n. 6
0
func (op *FanoutOperator) Run() error {
	defer op.runner.WaitGroup().Wait()
	op.runner.AsyncRunAll()

	defer func() {
		for _, out := range op.outputs {
			close(out)
		}
	}()

	for {
		select {
		case obj, ok := <-op.In():
			if ok {
				for _, out := range op.outputs {
					out <- obj
				}
			} else {
				return nil
			}
		case <-op.StopNotifier:
			op.runner.HardStop()
			return nil
		case <-op.runner.CloseNotifier():
			slog.Logf(logger.Levels.Error, "Unexpected child close in fanout op")
			op.runner.HardStop()
			return errors.New("Unexpected child close")
		}
	}
}
Esempio n. 7
0
func (w *CallbackWorker) Validate(inCh chan stream.Object, typeName string) bool {

	calltype := w.callback.Type()

	slog.Logf(logger.Levels.Info, "Checking %s", typeName)

	//TODO: forbid struct results pass pointers to structs instead

	if calltype.Kind() != reflect.Func {
		slog.Fatalf("%s: `Processor` should be %s but got %s", typeName, reflect.Func, calltype.Kind())
	}
	if calltype.NumIn() != 1 {
		slog.Fatalf("%s: `Processor` should have 1 parameter but it has %d parameters", typeName, calltype.NumIn())
	}
	/*if !intype.AssignableTo(calltype.In(0)) {
		log.Panicf("%s: `Processor` should have a parameter or type %s but is %s", typeName, calltype.In(0), intype)
	}*/
	if calltype.NumOut() != 1 {
		slog.Fatalf("%s `Processor` should return 1 value but it returns %d values", typeName, calltype.NumOut())
	}
	if calltype.Out(0).Kind() != reflect.Slice {
		slog.Fatalf("%s `Processor` should return a slice but return %s", typeName, calltype.Out(0).Kind())
	}
	/*if calltype.Out(0).Elem() != outtype {
		log.Panicf("%s `Processor` should return a slice of %s but is %s", typeName, outtype, calltype.Out(0).Elem())
	}*/
	return true
}
Esempio n. 8
0
func (sink MultiPartWriterSink) Run() error {
	defer func() {
		closer, ok := sink.writer.(io.Closer)
		if ok {
			closer.Close()
		}
	}()

	for {
		select {
		case msg, ok := <-sink.In():
			if ok {
				if err := sink.writeValue(msg.([][]byte), sink.writer); err != nil {
					slog.Logf(logger.Levels.Error, "Writer got error %v", err)
					return err
				}
				if sink.CompletedNotifier != nil {
					sink.CompletedNotifier.Notify(1)
				}
			} else {
				return nil
			}
		case <-sink.StopNotifier:
			return nil
		}

	}

}
Esempio n. 9
0
/* A stop is a hard stop as per the Operator interface */
func (c *SimpleChain) Stop() error {
	if !c.sentstop {
		c.sentstop = true
		slog.Logf(logger.Levels.Warn, "In hard close")
		c.runner.HardStop()
	}
	return nil
}
Esempio n. 10
0
func (c *SimpleChain) Wait() error {
	slog.Logf(logger.Levels.Info, "Waiting for closenotify %s", c.Name)
	<-c.runner.CloseNotifier()
	select {
	case err := <-c.runner.ErrorChannel():
		slog.Logf(logger.Levels.Warn, "Hard Close in SimpleChain %s %v", c.Name, err)
		c.Stop()
	default:
		slog.Logf(logger.Levels.Info, "Soft Close in SimpleChain %s", c.Name)
		c.SoftStop()
	}
	slog.Logf(logger.Levels.Info, "Waiting for wg")
	c.runner.WaitGroup().Wait()
	slog.Logf(logger.Levels.Info, "Exiting SimpleChain")

	return nil
}
Esempio n. 11
0
func (c *SimpleChain) SoftStop() error {
	if !c.sentstop {
		c.sentstop = true
		slog.Logf(logger.Levels.Warn, "In soft close")
		ops := c.runner.Operators()
		ops[0].Stop()
	}
	return nil
}
Esempio n. 12
0
func JsonGeneralDecoder() func([]byte, interface{}) {
	fn := func(input []byte, to_populate interface{}) {
		err := json.Unmarshal(input, to_populate)
		if err != nil {
			slog.Logf(logger.Levels.Error, "Error unmarshaling json: %v %v\n", err.Error(), string(input))
		}
	}
	return fn
}
Esempio n. 13
0
func (c *SimpleChain) Add(o Operator) Chain {
	ops := c.runner.Operators()
	if len(ops) > 0 {
		slog.Logf(logger.Levels.Info, "Setting input channel of %s", Name(o))
		last := ops[len(ops)-1]
		lastOutCh := last.(Out).Out()
		o.(In).SetIn(lastOutCh)
	}

	out, ok := o.(Out)
	if ok {
		slog.Logf(logger.Levels.Info, "Setting output channel of %s", Name(o))
		ch := make(chan Object, CHAN_SLACK)
		out.SetOut(ch)
	}

	c.runner.Add(o)
	return c
}
Esempio n. 14
0
func (c *DynamicBBManager) pullLatestEra() (err error) {
	for _, url := range c.BBHosts {
		if resp, err := http.Get(url); err == nil {
			if bbbody, err := ioutil.ReadAll(resp.Body); err == nil {
				// Try parsing this.
				bbr := BBResult{}
				if err := json.Unmarshal(bbbody, &bbr); err == nil {
					ctime := time.Now()

					we := NewWeightedEra()
					for _, node := range bbr.Nodes {
						n := NewWeightedNode(node.Name, node.Ip, strconv.Itoa(node.Port), node.Disk_free, node.Load)
						slog.Logf(logger.Levels.Debug, "Trasport LOG INFO %v", n)
						we.Add(n)
					}

					// Once all the nodes are in for this era, re-weight the Era
					we.NormalizeAndPopulateMap()
					c.Eras[ctime] = we
					c.CurrentTime = ctime
					c.ErasAdded = append(c.ErasAdded, ctime)

					// And Remove any super old eras
					if len(c.ErasAdded) > MAX_ERAS_SAVED {
						delete(c.Eras, c.ErasAdded[0])
						c.ErasAdded = append(c.ErasAdded[:1], c.ErasAdded[2:]...)
					}

					// Once we have hit one BB server with no error, no need to try any others.
					break
				} else {
					slog.Logf(logger.Levels.Error, "Unmarshal Error %v", err)
				}
			} else {
				slog.Logf(logger.Levels.Error, "Read Error %v", err)
			}
		} else {
			slog.Logf(logger.Levels.Error, "Network GET Error %v", err)
		}
	}
	return
}
Esempio n. 15
0
func (src *UnixgramSource) Run() error {
	//the socket has to run from the same goroutine because it is not thread safe
	//memory barrier executed when goroutines moved between threads
	//reference: https://groups.google.com/forum/#!topic/golang-nuts/eABYrBA5LEk
	defer close(src.Out())

	// If the socket exists, rm it.
	syscall.Unlink(src.path)

	socket, err := net.ListenPacket("unixgram", src.path)
	if err != nil {
		slog.Fatalf("Listen: %v", err)
		return err
	}

	defer socket.Close()

	// Allow other processes to write here
	os.Chmod(src.path, 0777)

	count := 0
	sent := 0
	lf := []byte{'\n'}

	for {
		count++

		buf := make([]byte, MAX_READ_SIZE)
		nr, _, err := socket.ReadFrom(buf)
		if err != nil {
			return err
		}

		// Now, tokenize on \n, writing out each part of the slice as
		// a separate message
		for _, msg := range bytes.Split(buf[:nr], lf) {
			if len(msg) > 0 {
				wi := src.decodeNginxLog(msg)
				sent++
				src.Out() <- msg[:wi]
			}
		}

		select {
		case <-src.StopNotifier:
			slog.Logf(logger.Levels.Info, "Closing: count ", count, "Sent:", sent)
			return nil
		default:
		}
	}
}
Esempio n. 16
0
func NewJsonEncodeRop() stream.Operator {
	generator := func() interface{} {
		fn := func(in interface{}) [][]byte {
			out, err := json.Marshal(in)
			if err != nil {
				slog.Logf(logger.Levels.Error, "Error marshaling json %v\t%+v", err, in)
			}
			return [][]byte{out}
		}
		return fn
	}

	return mapper.NewOpFactory(generator, "NewJsonEncodeRop")
}
Esempio n. 17
0
func (p lengthDelimMultiPartValueWriter) writeValue(msgs [][]byte, writer io.Writer) error {
	total := 0
	for _, msg := range msgs {
		total += len(msg)
	}

	err := binary.Write(writer, binary.LittleEndian, uint32(total))
	if err != nil {
		return err
	}
	for _, msg := range msgs {
		_, err = writer.Write(msg)
		if err != nil {
			return err
		}
	}
	slog.Logf(logger.Levels.Debug, "Write Returned %v, %v", time.Now(), time.Now().UnixNano())
	return nil
}
Esempio n. 18
0
func (r *Runner) AsyncRun(op Operator) {
	r.wg.Add(1)
	go func() {
		defer r.wg.Done()
		err := op.Run()
		if err != nil {
			slog.Logf(logger.Levels.Error, "Got an err from a child in runner: %v", err)
			select {
			case r.errors <- err:
			default:
			}
		}
		//on first exit, the cn channel is closed
		select {
		case <-r.closenotifier: //if already closed no-op
		default:
			close(r.closenotifier)
		}
	}()
}
Esempio n. 19
0
func (op *DistributeOperator) Run() error {
	defer op.runner.WaitGroup().Wait()
	defer func() {
		for _, out := range op.outputs {
			close(out)
		}
	}()

	for {
		select {
		case obj, ok := <-op.In():
			if ok {
				key := op.mapper(obj)
				ch, ok := op.outputs[key]
				if !ok {
					op.createBranch(key)
					ch, ok = op.outputs[key]
					if !ok {
						slog.Fatalf("couldn't find channel right after key create")
					}

				}
				ch <- obj
			} else {
				return nil
			}
		case <-op.StopNotifier:
			op.runner.HardStop()
			return nil
		case <-op.runner.CloseNotifier():
			slog.Logf(logger.Levels.Error, "Unexpected child close in distribute op")
			op.runner.HardStop()
			return errors.New("Unexpected distribute child close")
		}
	}
}
Esempio n. 20
0
func (src *Client) connect() error {
	defer func() {
		src.retries++
	}()

	conn, err := net.Dial("tcp", src.addr)
	if err != nil {
		slog.Logf(logger.Levels.Error, "Cannot establish a connection with %s %v", src.addr, err)
		return err
	}

	wg_sub := &sync.WaitGroup{}
	defer wg_sub.Wait()

	rcvChData := make(chan stream.Object, 10)
	receiver := source.NewIOReaderSourceLengthDelim(conn)
	receiver.SetOut(rcvChData)
	rcvChCloseNotifier := make(chan bool)
	wg_sub.Add(1)
	go func() {
		defer wg_sub.Done()
		defer close(rcvChCloseNotifier)
		err := receiver.Run()
		if err != nil {
			slog.Logf(logger.Levels.Error, "Error in client reciever: %v", err)
		}
	}()
	//receiver will be closed by the sender after it is done sending. receiver closed via a hard stop.

	writeNotifier := stream.NewNonBlockingProcessedNotifier(2)
	sndChData := make(chan stream.Object, src.hwm)
	sndChCloseNotifier := make(chan bool)
	defer close(sndChData)
	sender := sink.NewMultiPartWriterSink(conn)
	sender.CompletedNotifier = writeNotifier
	sender.SetIn(sndChData)
	wg_sub.Add(1)
	go func() {
		defer receiver.Stop() //close receiver
		defer wg_sub.Done()
		defer close(sndChCloseNotifier)
		err := sender.Run()
		if err != nil {
			slog.Logf(logger.Levels.Error, "Error in client sender: %v", err)
		}
	}()
	//sender closed by closing the sndChData channel or by a hard stop

	if src.buf.Len() > 0 {
		leftover := src.buf.Reset()
		for i, value := range leftover {
			sendData(sndChData, value, i+1)
		}
	}

	timer := src.resetAckTimer()

	closing := false

	//defer log.Println("Exiting client loop")
	opName := stream.Name(src)
	writesNotCompleted := uint(0)
	for {
		upstreamCh := src.In()
		if !src.buf.CanAdd() || closing {
			//disable upstream listening
			upstreamCh = nil
		}
		if closing && src.buf.Len() == 0 {
			sendClose(sndChData, 100)
			return nil
		}
		select {
		case msg, ok := <-upstreamCh:
			if !ok {
				//softClose
				//make sure everything was sent
				closing = true
			} else {
				bytes := msg.([]byte)
				seq, err := src.buf.Add(bytes)
				if err != nil {
					slog.Fatalf("Error adding item to buffer %v", err)
					return err
				}
				sendData(sndChData, bytes, seq)
				writesNotCompleted += 1
				slog.Gm.Event(&opName) // These are batched
				//slog.Logf(logger.Levels.Debug, "Sent batch -- length %d seq %d", len(bytes), seq)
			}
		case cnt := <-writeNotifier.NotificationChannel():
			writesNotCompleted -= cnt
			if timer == nil {
				slog.Logf(logger.Levels.Debug, "Seting timer %v, %v", time.Now(), time.Now().UnixNano())
				timer = src.resetAckTimer()
			}
		case obj, ok := <-rcvChData:
			slog.Logf(logger.Levels.Debug, "in Rcv: %v", ok)
			if !ok {
				return errors.New("Connection to Server was Broken in Recieve Direction")
			}

			command, seq, _, err := parseMsg(obj.([]byte))
			if err != nil {
				slog.Fatalf("%v", err)
			}
			if command == ACK {
				if src.processAck(seq) {
					timer = src.resetAckTimer()
				}
			} else {
				slog.Fatalf("Unknown Command: %v", command)
			}
		case <-rcvChCloseNotifier:
			//connection threw an eof to the reader?
			return errors.New("In Select: Recieve Closed")
		case <-sndChCloseNotifier:
			return errors.New("Connection to Server was Broken in Send Direction")
		case <-timer:
			return errors.New(fmt.Sprintf("Time Out Waiting For Ack, %d %v %v", len(rcvChData), time.Now(), time.Now().UnixNano()))
		case <-src.StopNotifier:
			sender.Stop()
			return nil
		}
	}
}
Esempio n. 21
0
func (src Server) handleConnection(conn net.Conn) {
	wg_sub := &sync.WaitGroup{}
	defer wg_sub.Wait()

	opName := stream.Name(src)
	sndChData := make(chan stream.Object, 100)
	sndChCloseNotifier := make(chan bool, 1)
	defer close(sndChData)
	//side effect: this will close conn on exit
	sender := sink.NewMultiPartWriterSink(conn)
	sender.SetIn(sndChData)
	wg_sub.Add(1)
	go func() {
		defer wg_sub.Done()
		defer close(sndChCloseNotifier)
		err := sender.Run()
		if err != nil {
			slog.Logf(logger.Levels.Error, "Error in server sender %v", err)
		}
	}()
	defer sender.Stop()

	//this will actually close conn too
	rcvChData := make(chan stream.Object, 100)
	receiver := source.NewIOReaderSourceLengthDelim(conn)
	receiver.SetOut(rcvChData)
	rcvChCloseNotifier := make(chan bool, 1)
	wg_sub.Add(1)
	go func() {
		defer wg_sub.Done()
		defer close(rcvChCloseNotifier)
		err := receiver.Run()
		if err != nil {
			slog.Logf(logger.Levels.Error, "Error in server reciever %v", err)
		}
	}()
	defer receiver.Stop()

	lastGotAck := 0
	lastSentAck := 0
	var timer <-chan time.Time
	timer = nil
	for {
		select {
		case obj, ok := <-rcvChData:

			if !ok {
				//send last ack back??
				slog.Logf(logger.Levels.Error, "Receive Channel Closed Without Close Message")
				return
			}
			command, seq, payload, err := parseMsg(obj.([]byte))
			slog.Gm.Event(&opName)

			if err == nil {
				if command == DATA {
					lastGotAck = seq
					if (lastGotAck - lastSentAck) > src.hwm/2 {
						sendAck(sndChData, lastGotAck)
						lastSentAck = lastGotAck
						timer = nil
					} else if timer == nil {
						slog.Logf(logger.Levels.Debug, "Setting timer %v", time.Now())
						timer = time.After(100 * time.Millisecond)
					}
					src.Out() <- payload
				} else if command == CLOSE {
					if lastGotAck > lastSentAck {
						sendAck(sndChData, lastGotAck)
					}
					slog.Logf(logger.Levels.Info, "%s", "Server got close")
					return
				} else {
					slog.Fatalf("%v", "Server Got Unknown Command")
				}
			} else {
				slog.Fatalf("Server could not parse packet: %v", err)
			}
		case <-rcvChCloseNotifier:
			if len(rcvChData) > 0 {
				continue //drain channel before exiting
			}
			slog.Logf(logger.Levels.Error, "Client asked for a close on recieve- should not happen, timer is nil = %v, %v", (timer == nil), time.Now())
			return
		case <-sndChCloseNotifier:
			slog.Logf(logger.Levels.Error, "%v", "Server asked for a close on send - should not happen")
			return
		case <-timer:
			sendAck(sndChData, lastGotAck)
			lastSentAck = lastGotAck
			timer = nil
		case <-src.StopNotifier:
			return
		}

	}
}
Esempio n. 22
0
func (w *EfficientWorker) Validate(inCh chan stream.Object, typeName string) bool {
	slog.Logf(logger.Levels.Info, "Checking %s", typeName)
	return true
}
Esempio n. 23
0
func (src Server) Run() error {
	defer close(src.Out())

	ln, err := net.Listen("tcp", src.addr)
	if err != nil {
		slog.Logf(logger.Levels.Error, "Error listening %v", err)
		return err
	}

	wg_sub := &sync.WaitGroup{}
	defer wg_sub.Wait()

	//If soft close is enabled, server will exit after last connection exits.
	scl := make(chan bool)
	wg_scl := &sync.WaitGroup{}
	first_connection := true

	wg_sub.Add(1)
	go func() {
		defer wg_sub.Done()
		hardCloseListener(src.StopNotifier, scl, ln)
	}()

	slog.Gm.Register(stream.Name(src))
	for {
		conn, err := ln.Accept()
		if err != nil {
			hardClose := false
			softClose := false
			select {
			case _, ok := <-src.StopNotifier:
				if !ok {
					hardClose = true
				}
			case _, ok := <-scl:
				if !ok {
					softClose = true
				}
			default:
			}
			if !hardClose && !softClose {
				slog.Logf(logger.Levels.Error, "Accept Error %v", err)
			}
			return nil
		}
		wg_sub.Add(1)
		wg_scl.Add(1)
		if first_connection {
			first_connection = false
			//close scl after all connections exit (need to make sure wg_scl > 1 before launching. Launched once)
			if src.EnableSoftClose {
				wg_sub.Add(1)
				go func() {
					defer wg_sub.Done()
					softCloserRunner(scl, wg_scl)
				}()
			}
		}
		go func() {
			defer wg_sub.Done()
			defer wg_scl.Done()
			defer conn.Close() //handle connection will close conn because of reader and writer. But just as good coding practice
			src.handleConnection(conn)
		}()
	}

}
Esempio n. 24
0
func sendAck(sndCh chan<- stream.Object, seq int) {
	slog.Logf(logger.Levels.Debug, "Sending back ack %d", seq)
	sendMsg(sndCh, ACK, seq, []byte{})
}
Esempio n. 25
0
func sendClose(sndCh chan<- stream.Object, seq int) {
	slog.Logf(logger.Levels.Debug, "Sending Close %d", seq)
	sendMsg(sndCh, CLOSE, seq, []byte{})
}
Esempio n. 26
0
func (op *BatcherOperator) Run() error {
	defer close(op.Out())

	/* batchExpired puts a lower bound on how often flushes occur */
	var batchExpired <-chan time.Time
	batchExpired = nil

	//INVARIANT: if container.HasItems() then it will be flushed eventually
	//We create a state machine with 3 boolean states, state hi = container.HasItems(), wcb = op.DownstreamWillCallback(), bne = (batchExpired != nil) (batch not expired)
	//invariants required:
	//     INVARIANT LIMITED_DRCB => repeated DRCB calls will eventually cause wcb == false
	//     INVARIANT !wcb can only become wcb after a FLUSH
	//	   INVARIANT CAN FLUSH OR WAIT => either DownstreamWillCallback or DownstreamCanAcceptFlush is true
	//lets analyse cases where hi == true:

	// wcb  && bne =>
	// Case IN => wcb && bne [Case BE or DRCB will eventually happen]
	// Case BE => PROGRESS 1 || wcb && !bne
	// Case DRCB => wcb && bne [can't recurse indefinitely by LIMITED_DRCB] || !wcb && bne

	// wcb && !bne =>
	// Case IN => wcb && !bne [case DRCB will eventually happen]
	// Case BE => impossible
	// Case DRCB =>
	//		DownstreamCanAcceptFlush => PROGRESS 2
	//		else: wcb && bne || wcb && !bne [can't recurse indef by LIMITED_DRCB] || !wcb && bne

	//!wcb && bne
	// case IN => !wcb && bne [case BE will eventually happen]
	// case BE =>
	//		!DownstreamCanAcceptFlush => impossible [INVARIANT CANFLUSH_OR_WAIT]
	//		else => PROGRESS 2
	//case DRCB => impossisible (!wcb)

	//!wcb && !bne => impossible (all cases disallow this)

	//liveness: has items => either batch_expired != nil or DownstreamWillCallback
	for {
		select {
		//case IN
		case obj, ok := <-op.In():
			if ok {
				op.container.Add(obj)
				if !op.DownstreamWillCallback() && op.container.HasItems() && batchExpired == nil { //used by first item
					batchExpired = time.After(op.minWaitAfterFirstItem)
				}
				//IMPOSSIBLE: hi && !wcb && !bne
			} else {
				if op.container.HasItems() {
					op.LastFlush()
				}
				if op.container.HasItems() {
					slog.Fatalf("Last flush did not empty container, some stuff will never be sent")
				}
				slog.Logf(logger.Levels.Debug, "Batch Operator ", op.name, " flushed ", op.total_flushes)
				return nil
			}
		//case BE
		case <-batchExpired:
			batchExpired = nil
			if op.DownstreamCanAcceptFlush() {
				//PROGRESS 1
				op.Flush()
				batchExpired = time.After(op.minWaitBetweenFlushes)
			}
			if !op.DownstreamWillCallback() && op.container.HasItems() && batchExpired == nil {
				batchExpired = time.After(op.minWaitForLeftover)
			}
			//impossibe: hi && !wcb && !bne
		case <-op.StopNotifier:
			//INVARIANT and PROGRESS Violated. Hard Stop
			return nil
		//case DRCB
		case count := <-op.processedDownstream.NotificationChannel():
			op.outstanding -= count
			if op.DownstreamCanAcceptFlush() && op.container.HasItems() && batchExpired == nil {
				op.Flush()
				batchExpired = time.After(op.minWaitBetweenFlushes)
			}
			if !op.DownstreamWillCallback() && op.container.HasItems() && batchExpired == nil {
				batchExpired = time.After(op.minWaitForLeftover)
			}
			//impossibe: hi && !wcb && !bne
		}
	}
}