Ejemplo n.º 1
0
func (w *CallbackWorker) Validate(inCh chan stream.Object, typeName string) bool {

	calltype := w.callback.Type()

	slog.Logf(logger.Levels.Info, "Checking %s", typeName)

	//TODO: forbid struct results pass pointers to structs instead

	if calltype.Kind() != reflect.Func {
		slog.Fatalf("%s: `Processor` should be %s but got %s", typeName, reflect.Func, calltype.Kind())
	}
	if calltype.NumIn() != 1 {
		slog.Fatalf("%s: `Processor` should have 1 parameter but it has %d parameters", typeName, calltype.NumIn())
	}
	/*if !intype.AssignableTo(calltype.In(0)) {
		log.Panicf("%s: `Processor` should have a parameter or type %s but is %s", typeName, calltype.In(0), intype)
	}*/
	if calltype.NumOut() != 1 {
		slog.Fatalf("%s `Processor` should return 1 value but it returns %d values", typeName, calltype.NumOut())
	}
	if calltype.Out(0).Kind() != reflect.Slice {
		slog.Fatalf("%s `Processor` should return a slice but return %s", typeName, calltype.Out(0).Kind())
	}
	/*if calltype.Out(0).Elem() != outtype {
		log.Panicf("%s `Processor` should return a slice of %s but is %s", typeName, outtype, calltype.Out(0).Elem())
	}*/
	return true
}
Ejemplo n.º 2
0
func parseMsg(msg []byte) (command ZmqCommand, seq int, payload []byte, err error) {
	intsz := sizeInt()
	commandi, err := decodeInt(msg[0:intsz])
	if err != nil {
		slog.Fatalf("Could not parse command %v", err)
	}
	command = ZmqCommand(commandi)
	seq, err = decodeInt(msg[intsz:(intsz + intsz)])
	if err != nil {
		slog.Fatalf("Could not parse seq # %v", err)
	}
	payload = msg[2*intsz:]
	return
}
Ejemplo n.º 3
0
func sendMsgNoBlock(sndCh chan<- stream.Object, command ZmqCommand, seq int, payload []byte) {
	select {
	case sndCh <- [][]byte{encodeInt(int(command)), encodeInt(seq), payload}:
	default:
		slog.Fatalf("%v", "Should be non-blocking send")
	}
}
Ejemplo n.º 4
0
func (src *Client) SetNotifier(n stream.ProcessedNotifier) *Client {
	if n.Blocking() == true {
		slog.Fatalf("Can't use a blocking Notifier")
	}
	src.notifier = n
	return src
}
Ejemplo n.º 5
0
func getPartition(p cube.Partition) Partition {
	switch pt := p.(type) {
	case cube.TimePartition:
		return &TimePartition{&pt}
	default:
		slog.Fatalf("Unknown Partition Type %v", reflect.TypeOf(pt))
	}
	panic("Never Here")
}
Ejemplo n.º 6
0
func encodeInt(val int) []byte {
	if val < 0 {
		panic("Can't encode negative val")
	}
	buf := new(bytes.Buffer)
	err := binary.Write(buf, binary.LittleEndian, uint32(val))
	if err != nil {
		slog.Fatalf("Could not encode binary %v", err)
	}
	return buf.Bytes()
}
Ejemplo n.º 7
0
func NewDynamicBBManager(bbHosts []string) *DynamicBBManager {
	dm := DynamicBBManager{make(map[time.Time]Era), make([]time.Time, 0, 0), bbHosts, time.Now()}
	err := dm.pullLatestEra()
	if err != nil || len(dm.Eras) == 0 {
		slog.Fatalf("Cannot get a valid era %v", err)
	}

	// Keep updating with periodic info
	go dm.keepErasCurrent()
	return &dm
}
Ejemplo n.º 8
0
func (e *Executor) ExecErr(sql string, args ...interface{}) (driver.Result, error) {
	exec := e.conn.(driver.Execer)

	dargs := make([]driver.Value, len(args))
	for n, arg := range args {
		var err error
		dargs[n], err = driver.DefaultParameterConverter.ConvertValue(arg)
		if err != nil {
			slog.Fatalf("sql: converting Exec argument #%d's type: %v", n, err)
		}
	}
	return exec.Exec(sql, dargs)
}
Ejemplo n.º 9
0
func (src *UnixgramSource) Run() error {
	//the socket has to run from the same goroutine because it is not thread safe
	//memory barrier executed when goroutines moved between threads
	//reference: https://groups.google.com/forum/#!topic/golang-nuts/eABYrBA5LEk
	defer close(src.Out())

	// If the socket exists, rm it.
	syscall.Unlink(src.path)

	socket, err := net.ListenPacket("unixgram", src.path)
	if err != nil {
		slog.Fatalf("Listen: %v", err)
		return err
	}

	defer socket.Close()

	// Allow other processes to write here
	os.Chmod(src.path, 0777)

	count := 0
	sent := 0
	lf := []byte{'\n'}

	for {
		count++

		buf := make([]byte, MAX_READ_SIZE)
		nr, _, err := socket.ReadFrom(buf)
		if err != nil {
			return err
		}

		// Now, tokenize on \n, writing out each part of the slice as
		// a separate message
		for _, msg := range bytes.Split(buf[:nr], lf) {
			if len(msg) > 0 {
				wi := src.decodeNginxLog(msg)
				sent++
				src.Out() <- msg[:wi]
			}
		}

		select {
		case <-src.StopNotifier:
			slog.Logf(logger.Levels.Info, "Closing: count ", count, "Sent:", sent)
			return nil
		default:
		}
	}
}
Ejemplo n.º 10
0
func (e *Executor) UpsertCubes(p cube.Partition, c []cube.Cuber) {
	tx, err := e.conn.Begin()
	if err != nil {
		slog.Fatalf("Error starting transaction %v", err)
	}

	part := getPartition(p)

	//TODO: have a cache of existing partition tables...dont recreate if not necessary
	e.Exec(e.table.CreatePartitionTableSql(part))

	e.Exec(e.table.CreateTemporaryCopyTableSql(part))
	cy := pq.NewCopierFromConn(e.conn)
	err = cy.Start(e.table.CopyTableSql(part))
	if err != nil {
		slog.Fatalf("Error starting copy %v", err)
	}

	for _, cube := range c {
		err = cy.Send(e.table.CopyDataFull(cube))
		if err != nil {
			slog.Fatalf("Error copying %v", err)
		}
	}

	err = cy.Close()
	if err != nil {
		slog.Fatalf("Error Ending Copy %v", err)
	}

	e.Exec(e.table.MergeCopySql(part))

	err = tx.Commit()
	if err != nil {
		slog.Fatalf("Error Committing tx %v ", err)
	}

}
Ejemplo n.º 11
0
func (c *OrderedChain) Add(o Operator) Chain {
	parallel, ok := o.(ParallelizableOperator)
	if ok {
		if !parallel.IsOrdered() {
			parallel = parallel.MakeOrdered()
			if !parallel.IsOrdered() {
				slog.Fatalf("%s", "Couldn't make parallel operator ordered")
			}
		}
		c.SimpleChain.Add(parallel)
	} else {
		c.SimpleChain.Add(o)
	}
	return c
}
Ejemplo n.º 12
0
func (op *DistributeOperator) Run() error {
	defer op.runner.WaitGroup().Wait()
	defer func() {
		for _, out := range op.outputs {
			close(out)
		}
	}()

	for {
		select {
		case obj, ok := <-op.In():
			if ok {
				key := op.mapper(obj)
				ch, ok := op.outputs[key]
				if !ok {
					op.createBranch(key)
					ch, ok = op.outputs[key]
					if !ok {
						slog.Fatalf("couldn't find channel right after key create")
					}

				}
				ch <- obj
			} else {
				return nil
			}
		case <-op.StopNotifier:
			op.runner.HardStop()
			return nil
		case <-op.runner.CloseNotifier():
			slog.Logf(logger.Levels.Error, "Unexpected child close in distribute op")
			op.runner.HardStop()
			return errors.New("Unexpected distribute child close")
		}
	}
}
Ejemplo n.º 13
0
func (src *Client) connect() error {
	defer func() {
		src.retries++
	}()

	conn, err := net.Dial("tcp", src.addr)
	if err != nil {
		slog.Logf(logger.Levels.Error, "Cannot establish a connection with %s %v", src.addr, err)
		return err
	}

	wg_sub := &sync.WaitGroup{}
	defer wg_sub.Wait()

	rcvChData := make(chan stream.Object, 10)
	receiver := source.NewIOReaderSourceLengthDelim(conn)
	receiver.SetOut(rcvChData)
	rcvChCloseNotifier := make(chan bool)
	wg_sub.Add(1)
	go func() {
		defer wg_sub.Done()
		defer close(rcvChCloseNotifier)
		err := receiver.Run()
		if err != nil {
			slog.Logf(logger.Levels.Error, "Error in client reciever: %v", err)
		}
	}()
	//receiver will be closed by the sender after it is done sending. receiver closed via a hard stop.

	writeNotifier := stream.NewNonBlockingProcessedNotifier(2)
	sndChData := make(chan stream.Object, src.hwm)
	sndChCloseNotifier := make(chan bool)
	defer close(sndChData)
	sender := sink.NewMultiPartWriterSink(conn)
	sender.CompletedNotifier = writeNotifier
	sender.SetIn(sndChData)
	wg_sub.Add(1)
	go func() {
		defer receiver.Stop() //close receiver
		defer wg_sub.Done()
		defer close(sndChCloseNotifier)
		err := sender.Run()
		if err != nil {
			slog.Logf(logger.Levels.Error, "Error in client sender: %v", err)
		}
	}()
	//sender closed by closing the sndChData channel or by a hard stop

	if src.buf.Len() > 0 {
		leftover := src.buf.Reset()
		for i, value := range leftover {
			sendData(sndChData, value, i+1)
		}
	}

	timer := src.resetAckTimer()

	closing := false

	//defer log.Println("Exiting client loop")
	opName := stream.Name(src)
	writesNotCompleted := uint(0)
	for {
		upstreamCh := src.In()
		if !src.buf.CanAdd() || closing {
			//disable upstream listening
			upstreamCh = nil
		}
		if closing && src.buf.Len() == 0 {
			sendClose(sndChData, 100)
			return nil
		}
		select {
		case msg, ok := <-upstreamCh:
			if !ok {
				//softClose
				//make sure everything was sent
				closing = true
			} else {
				bytes := msg.([]byte)
				seq, err := src.buf.Add(bytes)
				if err != nil {
					slog.Fatalf("Error adding item to buffer %v", err)
					return err
				}
				sendData(sndChData, bytes, seq)
				writesNotCompleted += 1
				slog.Gm.Event(&opName) // These are batched
				//slog.Logf(logger.Levels.Debug, "Sent batch -- length %d seq %d", len(bytes), seq)
			}
		case cnt := <-writeNotifier.NotificationChannel():
			writesNotCompleted -= cnt
			if timer == nil {
				slog.Logf(logger.Levels.Debug, "Seting timer %v, %v", time.Now(), time.Now().UnixNano())
				timer = src.resetAckTimer()
			}
		case obj, ok := <-rcvChData:
			slog.Logf(logger.Levels.Debug, "in Rcv: %v", ok)
			if !ok {
				return errors.New("Connection to Server was Broken in Recieve Direction")
			}

			command, seq, _, err := parseMsg(obj.([]byte))
			if err != nil {
				slog.Fatalf("%v", err)
			}
			if command == ACK {
				if src.processAck(seq) {
					timer = src.resetAckTimer()
				}
			} else {
				slog.Fatalf("Unknown Command: %v", command)
			}
		case <-rcvChCloseNotifier:
			//connection threw an eof to the reader?
			return errors.New("In Select: Recieve Closed")
		case <-sndChCloseNotifier:
			return errors.New("Connection to Server was Broken in Send Direction")
		case <-timer:
			return errors.New(fmt.Sprintf("Time Out Waiting For Ack, %d %v %v", len(rcvChData), time.Now(), time.Now().UnixNano()))
		case <-src.StopNotifier:
			sender.Stop()
			return nil
		}
	}
}
Ejemplo n.º 14
0
func (src Server) handleConnection(conn net.Conn) {
	wg_sub := &sync.WaitGroup{}
	defer wg_sub.Wait()

	opName := stream.Name(src)
	sndChData := make(chan stream.Object, 100)
	sndChCloseNotifier := make(chan bool, 1)
	defer close(sndChData)
	//side effect: this will close conn on exit
	sender := sink.NewMultiPartWriterSink(conn)
	sender.SetIn(sndChData)
	wg_sub.Add(1)
	go func() {
		defer wg_sub.Done()
		defer close(sndChCloseNotifier)
		err := sender.Run()
		if err != nil {
			slog.Logf(logger.Levels.Error, "Error in server sender %v", err)
		}
	}()
	defer sender.Stop()

	//this will actually close conn too
	rcvChData := make(chan stream.Object, 100)
	receiver := source.NewIOReaderSourceLengthDelim(conn)
	receiver.SetOut(rcvChData)
	rcvChCloseNotifier := make(chan bool, 1)
	wg_sub.Add(1)
	go func() {
		defer wg_sub.Done()
		defer close(rcvChCloseNotifier)
		err := receiver.Run()
		if err != nil {
			slog.Logf(logger.Levels.Error, "Error in server reciever %v", err)
		}
	}()
	defer receiver.Stop()

	lastGotAck := 0
	lastSentAck := 0
	var timer <-chan time.Time
	timer = nil
	for {
		select {
		case obj, ok := <-rcvChData:

			if !ok {
				//send last ack back??
				slog.Logf(logger.Levels.Error, "Receive Channel Closed Without Close Message")
				return
			}
			command, seq, payload, err := parseMsg(obj.([]byte))
			slog.Gm.Event(&opName)

			if err == nil {
				if command == DATA {
					lastGotAck = seq
					if (lastGotAck - lastSentAck) > src.hwm/2 {
						sendAck(sndChData, lastGotAck)
						lastSentAck = lastGotAck
						timer = nil
					} else if timer == nil {
						slog.Logf(logger.Levels.Debug, "Setting timer %v", time.Now())
						timer = time.After(100 * time.Millisecond)
					}
					src.Out() <- payload
				} else if command == CLOSE {
					if lastGotAck > lastSentAck {
						sendAck(sndChData, lastGotAck)
					}
					slog.Logf(logger.Levels.Info, "%s", "Server got close")
					return
				} else {
					slog.Fatalf("%v", "Server Got Unknown Command")
				}
			} else {
				slog.Fatalf("Server could not parse packet: %v", err)
			}
		case <-rcvChCloseNotifier:
			if len(rcvChData) > 0 {
				continue //drain channel before exiting
			}
			slog.Logf(logger.Levels.Error, "Client asked for a close on recieve- should not happen, timer is nil = %v, %v", (timer == nil), time.Now())
			return
		case <-sndChCloseNotifier:
			slog.Logf(logger.Levels.Error, "%v", "Server asked for a close on send - should not happen")
			return
		case <-timer:
			sendAck(sndChData, lastGotAck)
			lastSentAck = lastGotAck
			timer = nil
		case <-src.StopNotifier:
			return
		}

	}
}
Ejemplo n.º 15
0
func (op *BatcherOperator) Run() error {
	defer close(op.Out())

	/* batchExpired puts a lower bound on how often flushes occur */
	var batchExpired <-chan time.Time
	batchExpired = nil

	//INVARIANT: if container.HasItems() then it will be flushed eventually
	//We create a state machine with 3 boolean states, state hi = container.HasItems(), wcb = op.DownstreamWillCallback(), bne = (batchExpired != nil) (batch not expired)
	//invariants required:
	//     INVARIANT LIMITED_DRCB => repeated DRCB calls will eventually cause wcb == false
	//     INVARIANT !wcb can only become wcb after a FLUSH
	//	   INVARIANT CAN FLUSH OR WAIT => either DownstreamWillCallback or DownstreamCanAcceptFlush is true
	//lets analyse cases where hi == true:

	// wcb  && bne =>
	// Case IN => wcb && bne [Case BE or DRCB will eventually happen]
	// Case BE => PROGRESS 1 || wcb && !bne
	// Case DRCB => wcb && bne [can't recurse indefinitely by LIMITED_DRCB] || !wcb && bne

	// wcb && !bne =>
	// Case IN => wcb && !bne [case DRCB will eventually happen]
	// Case BE => impossible
	// Case DRCB =>
	//		DownstreamCanAcceptFlush => PROGRESS 2
	//		else: wcb && bne || wcb && !bne [can't recurse indef by LIMITED_DRCB] || !wcb && bne

	//!wcb && bne
	// case IN => !wcb && bne [case BE will eventually happen]
	// case BE =>
	//		!DownstreamCanAcceptFlush => impossible [INVARIANT CANFLUSH_OR_WAIT]
	//		else => PROGRESS 2
	//case DRCB => impossisible (!wcb)

	//!wcb && !bne => impossible (all cases disallow this)

	//liveness: has items => either batch_expired != nil or DownstreamWillCallback
	for {
		select {
		//case IN
		case obj, ok := <-op.In():
			if ok {
				op.container.Add(obj)
				if !op.DownstreamWillCallback() && op.container.HasItems() && batchExpired == nil { //used by first item
					batchExpired = time.After(op.minWaitAfterFirstItem)
				}
				//IMPOSSIBLE: hi && !wcb && !bne
			} else {
				if op.container.HasItems() {
					op.LastFlush()
				}
				if op.container.HasItems() {
					slog.Fatalf("Last flush did not empty container, some stuff will never be sent")
				}
				slog.Logf(logger.Levels.Debug, "Batch Operator ", op.name, " flushed ", op.total_flushes)
				return nil
			}
		//case BE
		case <-batchExpired:
			batchExpired = nil
			if op.DownstreamCanAcceptFlush() {
				//PROGRESS 1
				op.Flush()
				batchExpired = time.After(op.minWaitBetweenFlushes)
			}
			if !op.DownstreamWillCallback() && op.container.HasItems() && batchExpired == nil {
				batchExpired = time.After(op.minWaitForLeftover)
			}
			//impossibe: hi && !wcb && !bne
		case <-op.StopNotifier:
			//INVARIANT and PROGRESS Violated. Hard Stop
			return nil
		//case DRCB
		case count := <-op.processedDownstream.NotificationChannel():
			op.outstanding -= count
			if op.DownstreamCanAcceptFlush() && op.container.HasItems() && batchExpired == nil {
				op.Flush()
				batchExpired = time.After(op.minWaitBetweenFlushes)
			}
			if !op.DownstreamWillCallback() && op.container.HasItems() && batchExpired == nil {
				batchExpired = time.After(op.minWaitForLeftover)
			}
			//impossibe: hi && !wcb && !bne
		}
	}
}