func (h *Harness) processPending(results map[int]*result) {
	for {
		result, ok := results[h.nextSequenceNumber]
		if !ok {
			return
		}

		output, err := h.sql.Execute("harness", result.query)
		if err != nil {
			h.losef("[%d] Could not execute statement that %v claimed was fine: %s", h.nextSequenceNumber, result.node, err)
		}

		if !bytes.Equal(result.body, output.Stdout) {
			h.losef(`[%d] Received incorrect output from %v for query %s

Output: %s

Correct output: %s`, h.nextSequenceNumber, result.node, result.query, util.FmtOutput(result.body), util.FmtOutput(output.Stdout))
		} else {
			state.RecordCorrectQuery()
			log.Printf(`[harness] [%d] Received correct output from %v for query %s

Output: %s`, h.nextSequenceNumber, result.node, result.query, util.FmtOutput(result.body))
		}

		// Update bookkeeping
		delete(results, h.nextSequenceNumber)
		h.nextSequenceNumber += 1
	}
}
Beispiel #2
0
func (stream *stream) writeTo(sock *fullDuplex) {
	for {
		var buf []byte
		select {
		case <-stream.conn.kill:
			stream.closeLater(sock)
			return
		case buf = <-stream.out:
		}
		// This is the special "close" notification
		if buf == nil {
			sock.CloseWrite()
			return
		}

		n, err := sock.Write(buf)
		state.RecordWrite(n)
		log.Debugf("Wrote %s %d bytes: %#v", stream.l, n, string(buf))
		if err != nil || n != len(buf) {
			log.Printf("writeTo %s: %s", stream.l, err)
			break
		}
	}

	// Close the other end of the connection
	time.Sleep(stream.conn.link.Delay())
	stream.other(sock).CloseRead()
}
Beispiel #3
0
func AfterParse() {
	conf.args = flag.Args()

	if conf.seed == 0 {
		conf.seed = randomSeed()
	}

	if conf.nodeCount < 2 {
		log.Fatal("You need at least two agents")
	}

	if conf.sqlcluster == "" {
		conf.sqlcluster = sqlclusterPath()
		if conf.sqlcluster == "" {
			log.Fatalf("No `sqlcluster' binary provided, and could not find one heuristically. Please provide a `-r <sqlcluster>' argument, or move your stripe-ctf.com/sqlcluster Go package into your GOPATH.", scriptDir())
		} else {
			log.Printf("Found sqlcluster binary at %s", conf.sqlcluster)
		}
	}

	if conf.containerRoot == "" {
		conf.containerRoot = conf.root
	}

	if conf.containerIds == "" {
		conf.parsedContainerIds = make([]string, 0)
	} else {
		conf.parsedContainerIds = strings.Split(conf.containerIds, ",")
	}

	log.SetVerbose(conf.verbose)
	log.Printf("Using seed %v", conf.seed)

	// Always seed the default RNG. It controls comparatively little, so
	// don't worry about exposing it
	rand.Seed(randomSeed())
	conf.wg = exit.NewWaitGroup()

	conf.gotRequest = make(chan int, 1)

	recordConfig()
}
func (h *Harness) issueQuery(reqid int, node *agent.Agent, query string) bool {
	log.Debugf("[harness] Making request to %v: %#v", node, query)

	b := strings.NewReader(query)
	url := node.ConnectionString + "/sql"

	start := time.Now()
	resp, err := unixClient.Post(url, "application/octet-stream", b)
	end := time.Now()

	if err != nil {
		log.Printf("[harness] Sleeping 100ms after request error from %s (in response to %#v): %s", node, query, err)
		return false
	}

	body, err := ioutil.ReadAll(resp.Body)
	resp.Body.Close()

	if err != nil {
		log.Printf("[harness] Error reading body from %v (in response to %#v): %s", node, query, err)
	}

	if resp.StatusCode != 200 {
		log.Printf("[harness] Sleeping 100ms after HTTP %d status code from %s (in response to %#v): %s", resp.StatusCode, node, query, body)
		return false
	}

	log.Debugf("[harness] Received response to %v (%#v): %s", node, query, body)

	h.result <- &result{
		reqid: reqid,
		node:  node,
		query: query,
		start: start,
		end:   end,
		resp:  body,
	}
	return true
}
Beispiel #5
0
func (sql *SQL) Execute(tag string, command string) (*Output, error) {
	// TODO: make sure I can catch non-lock issuez
	sql.mutex.Lock()
	defer sql.mutex.Unlock()

	defer func() { sql.sequenceNumber += 1 }()
	if tag == "primary" || log.Verbose() {
		log.Printf("[%s] [%d] Executing %#v", tag, sql.sequenceNumber, command)
	}

	subprocess := exec.Command("sqlite3", sql.path)
	subprocess.Stdin = strings.NewReader(command + ";")

	var stdout, stderr bytes.Buffer
	subprocess.Stdout = &stdout
	subprocess.Stderr = &stderr

	if err := subprocess.Start(); err != nil {
		log.Panic(err)
	}

	var o, e []byte

	if err := subprocess.Wait(); err != nil {
		exitstatus := getExitstatus(err)
		switch true {
		case exitstatus < 0:
			log.Panic(err)
		case exitstatus == 1:
			fallthrough
		case exitstatus == 2:
			o = stderr.Bytes()
			e = nil
		}
	} else {
		o = stdout.Bytes()
		e = stderr.Bytes()
	}

	output := &Output{
		Stdout:         o,
		Stderr:         e,
		SequenceNumber: sql.sequenceNumber,
	}

	return output, nil
}
// Rules for sequence numbers:
//
// - Gaps are temporarily OK, but not in the long run.
func (h *Harness) resultHandler() {
	results := make(map[int]*result)
	for {
		result := <-h.result
		sequenceNumber, body, err := h.parseResponse(result.resp)
		if err != nil {
			h.lose(err.Error())
			return
		}
		result.body = body

		if sequenceNumber < h.nextSequenceNumber {
			h.losef(`[%d] Received an already-processed sequence number from %v in response to %s

Output: %s`, sequenceNumber, result.node, result.query, util.FmtOutput(result.resp))
			return
		}

		if old, ok := results[sequenceNumber]; ok {
			h.losef(`[%d] Received a still-pending sequence number from %v in response to %s

Output: %s

This sequence number was originally received in response to %s

Original output: %s`, sequenceNumber, result.node, result.query, util.FmtOutput(result.resp), old.query, util.FmtOutput(old.resp))
			return
		}

		if sequenceNumber > h.nextSequenceNumber {
			log.Printf("[%d] Result from %v waiting on sequence number %d", sequenceNumber, result.node, h.nextSequenceNumber)
		}

		// Notify SPOF monkey that we got a valid request
		select {
		case state.GotRequest() <- result.reqid:
		default:
		}

		results[sequenceNumber] = result
		h.processPending(results)
	}
}
Beispiel #7
0
func main() {
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, `
Usage: %s [options] [...args]

Octopus will run your sqlcluster and issue queries to it. It simulates
a lossy network, and to make things even more fun, will turn loose the
following chaotic monkeys:

  NET MONKEYS
  - latency: adjusts base latency between nodes
  - jitter: adjusts latency variance between nodes
  - lagsplit: spikes latency along a network partition
  - link: destroys individual links between nodes
  - netsplit: destroys links along a network partition

  NODE MONKEYS
  - freeze: stops nodes (by sending SIGTSTP)
  - murder: ruthlessly murders nodes (by sending SIGTERM)

Any positional arguments given to Octopus will be passed through to
SQLCluster. This should be mostly useful for passing a -v flag, like
so:

  ./octopus -- -v

OPTIONS:
`, os.Args[0])
		flag.PrintDefaults()
	}
	state.AddFlags()
	flag.Parse()
	state.AfterParse()

	// Handle SIGINT
	sigchan := make(chan os.Signal)
	signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)

	d := director.NewDirector()
	if state.Dryrun() {
		d.Dryrun()
		return
	}

	// Create the working directory
	if err := os.MkdirAll(state.Root(), os.ModeDir|0755); err != nil {
		log.Fatal(err)
	}

	d.Start()

	go func() {
		select {
		case <-sigchan:
			log.Println("Terminating due to signal")
		case <-state.WaitGroup().Quit:
			// Someone else requested an exit
		case <-time.After(state.Duration()):
			// Time's up!
			log.Printf("The allotted %s have elapsed. Exiting!", state.Duration())
		}
		state.WaitGroup().Exit()
	}()

	h := harness.New(d.Agents())
	h.Start()

	d.StartMonkeys()

	<-state.WaitGroup().Quit

	if state.Write() != "" {
		results := state.JSONResults()
		if err := ioutil.WriteFile(state.Write(), results, 0755); err != nil {
			log.Fatalf("Could not write resuts: %s", err)
		}
	} else {
		results := state.PrettyPrintResults()
		log.Println(results)
	}

	state.WaitGroup().Exit()
}