コード例 #1
0
ファイル: http2_test.go プロジェクト: stellar/bridge-server
func checkIfConnectionToServerIsHTTP2(t *testing.T, wg *sync.WaitGroup, c chan os.Signal) {

	defer wg.Done()

	tr := &http.Transport{
		TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
	}

	err := http2.ConfigureTransport(tr)

	if err != nil {
		t.Fatal("Unable to upgrade client transport to HTTP/2")
	}

	client := http.Client{Transport: tr}
	r, err := client.Get(fmt.Sprintf("https://localhost:%d", port))

	c <- os.Interrupt

	if err != nil {
		t.Fatalf("Error encountered while connecting to test server: %s", err)
	}

	if r.Proto != "HTTP/2.0" {
		t.Fatalf("Expected HTTP/2 connection to server, but connection was using %s", r.Proto)
	}
}
コード例 #2
0
ファイル: boomer.go プロジェクト: rakyll/boom
func (b *Boomer) runWorker(n int) {
	var throttle <-chan time.Time
	if b.Qps > 0 {
		throttle = time.Tick(time.Duration(1e6/(b.Qps)) * time.Microsecond)
	}

	tr := &http.Transport{
		TLSClientConfig: &tls.Config{
			InsecureSkipVerify: true,
		},
		DisableCompression: b.DisableCompression,
		DisableKeepAlives:  b.DisableKeepAlives,
		// TODO(jbd): Add dial timeout.
		TLSHandshakeTimeout: time.Duration(b.Timeout) * time.Millisecond,
		Proxy:               http.ProxyURL(b.ProxyAddr),
	}
	if b.H2 {
		http2.ConfigureTransport(tr)
	} else {
		tr.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper)
	}
	client := &http.Client{Transport: tr}
	for i := 0; i < n; i++ {
		if b.Qps > 0 {
			<-throttle
		}
		b.makeRequest(client)
	}
}
コード例 #3
0
ファイル: setup.go プロジェクト: petertseng/p2
func (c *PreparerConfig) getClient(
	cxnTimeout time.Duration,
	insecureSkipVerify bool,
) (*http.Client, error) {
	tlsConfig, err := netutil.GetTLSConfig(c.CertFile, c.KeyFile, c.CAFile)
	if err != nil {
		return nil, err
	}
	tlsConfig.InsecureSkipVerify = insecureSkipVerify
	transport := &http.Transport{
		TLSClientConfig: tlsConfig,
		// same dialer as http.DefaultTransport
		Dial: (&net.Dialer{
			Timeout:   cxnTimeout,
			KeepAlive: cxnTimeout,
		}).Dial,
	}
	if c.HTTP2 {
		if err = http2.ConfigureTransport(transport); err != nil {
			return nil, err
		}
	} else {
		// Disable http2 - as the docs for http.Transport tell us,
		// "If TLSNextProto is nil, HTTP/2 support is enabled automatically."
		// as the Go 1.6 release notes tell us,
		// "Programs that must disable HTTP/2 can do so by setting Transport.TLSNextProto
		// to a non-nil, empty map."
		transport.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
	}
	return &http.Client{Transport: transport}, nil
}
コード例 #4
0
ファイル: openshift-copies.go プロジェクト: containers/image
// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get.
func tlsCacheGet(config *restConfig) (http.RoundTripper, error) {
	// REMOVED: any actual caching

	// Get the TLS options for this client config
	tlsConfig, err := tlsConfigFor(config)
	if err != nil {
		return nil, err
	}
	// The options didn't require a custom TLS config
	if tlsConfig == nil {
		return http.DefaultTransport, nil
	}

	// REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here.
	t := &http.Transport{
		// http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings
		// ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY
		Proxy:               newProxierWithNoProxyCIDR(http.ProxyFromEnvironment),
		TLSHandshakeTimeout: 10 * time.Second,
		TLSClientConfig:     tlsConfig,
		Dial: (&net.Dialer{
			Timeout:   30 * time.Second,
			KeepAlive: 30 * time.Second,
		}).Dial,
	}
	// Allow clients to disable http2 if needed.
	if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 {
		_ = http2.ConfigureTransport(t)
	}
	return t, nil
}
コード例 #5
0
ファイル: attack.go プロジェクト: alphagov/paas-cf
// HTTP2 returns a functional option which enables or disables HTTP/2 support
// on requests performed by an Attacker.
func HTTP2(enabled bool) func(*Attacker) {
	return func(a *Attacker) {
		if tr := a.client.Transport.(*http.Transport); enabled {
			http2.ConfigureTransport(tr)
		} else {
			tr.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
		}
	}
}
コード例 #6
0
ファイル: http.go プロジェクト: Cloven/minikube
// SetTransportDefaults applies the defaults from http.DefaultTransport
// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset
func SetTransportDefaults(t *http.Transport) *http.Transport {
	t = SetOldTransportDefaults(t)
	// Allow HTTP2 clients but default off for now
	if s := os.Getenv("ENABLE_HTTP2"); len(s) > 0 {
		if err := http2.ConfigureTransport(t); err != nil {
			glog.Warningf("Transport failed http2 configuration: %v", err)
		}
	}
	return t
}
コード例 #7
0
ファイル: http.go プロジェクト: humblec/kubernetes
// SetTransportDefaults applies the defaults from http.DefaultTransport
// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset
func SetTransportDefaults(t *http.Transport) *http.Transport {
	t = SetOldTransportDefaults(t)
	// Allow clients to disable http2 if needed.
	if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 {
		glog.Infof("HTTP2 has been explicitly disabled")
	} else {
		if err := http2.ConfigureTransport(t); err != nil {
			glog.Warningf("Transport failed http2 configuration: %v", err)
		}
	}
	return t
}
コード例 #8
0
ファイル: network.go プロジェクト: spazbite187/snatchtls
// GetHTTPClient returns a configured HTTP client struct.
func GetHTTPClient(tlsConfig *tls.Config) http.Client {
	tr := &http.Transport{
		TLSClientConfig:       tlsConfig,
		DisableCompression:    true,
		TLSHandshakeTimeout:   Timeout,
		ResponseHeaderTimeout: Timeout,
	}
	// enable HTTP/2
	err := http2.ConfigureTransport(tr)
	if err != nil {
		log.Printf("Unable to enable HTTP/2 \n%s", err)
	}
	client := http.Client{
		Transport: tr,
		Timeout:   Timeout,
	}
	return client
}
コード例 #9
0
ファイル: cluster.go プロジェクト: mhurne/vault
// refreshRequestForwardingConnection ensures that the client/transport are
// alive and that the current active address value matches the most
// recently-known address.
func (c *Core) refreshRequestForwardingConnection(clusterAddr string) error {
	c.requestForwardingConnectionLock.Lock()
	defer c.requestForwardingConnectionLock.Unlock()

	// It's nil but we don't have an address anyways, so exit
	if c.requestForwardingConnection == nil && clusterAddr == "" {
		return nil
	}

	// NOTE: We don't fast path the case where we have a connection because the
	// address is the same, because the cert/key could have changed if the
	// active node ended up being the same node. Before we hit this function in
	// Leader() we'll have done a hash on the advertised info to ensure that we
	// won't hit this function unnecessarily anyways.

	// Disabled, potentially
	if clusterAddr == "" {
		c.requestForwardingConnection = nil
		return nil
	}

	tlsConfig, err := c.ClusterTLSConfig()
	if err != nil {
		c.logger.Printf("[ERR] core/refreshRequestForwardingConnection: error fetching cluster tls configuration: %v", err)
		return err
	}
	tp := &http.Transport{
		TLSClientConfig: tlsConfig,
	}
	err = http2.ConfigureTransport(tp)
	if err != nil {
		c.logger.Printf("[ERR] core/refreshRequestForwardingConnection: error configuring transport: %v", err)
		return err
	}
	c.requestForwardingConnection = &activeConnection{
		Client: &http.Client{
			Transport: tp,
		},
		clusterAddr: clusterAddr,
	}

	return nil
}
コード例 #10
0
ファイル: proxy.go プロジェクト: xingfeng2510/etcd
// NewHandler creates a new HTTP handler, listening on the given transport,
// which will proxy requests to an etcd cluster.
// The handler will periodically update its view of the cluster.
func NewHandler(t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler {
	if t.TLSClientConfig != nil {
		// Enable http2, see Issue 5033.
		err := http2.ConfigureTransport(t)
		if err != nil {
			plog.Infof("Error enabling Transport HTTP/2 support: %v", err)
		}
	}

	p := &reverseProxy{
		director:  newDirector(urlsFunc, failureWait, refreshInterval),
		transport: t,
	}

	mux := http.NewServeMux()
	mux.Handle("/", p)
	mux.HandleFunc("/v2/config/local/proxy", p.configHandler)

	return mux
}
コード例 #11
0
ファイル: client.go プロジェクト: pasinskim/mender
// Client initialization
func New(conf Config) (*ApiClient, error) {

	var client *http.Client
	if conf == (Config{}) {
		client = newHttpClient()
	} else {
		var err error
		client, err = newHttpsClient(conf)
		if err != nil {
			return nil, err
		}
	}

	if client.Transport == nil {
		client.Transport = &http.Transport{}
	}

	if err := http2.ConfigureTransport(client.Transport.(*http.Transport)); err != nil {
		log.Warnf("failed to enable HTTP/2 for client: %v", err)
	}

	return &ApiClient{*client}, nil
}
コード例 #12
0
ファイル: v1compat_handler.go プロジェクト: lhchavez/quark
func registerV1CompatHandlers(mux *http.ServeMux, db *sql.DB) {
	runs, err := context().QueueManager.Get("default")
	if err != nil {
		panic(err)
	}
	guids, err := v1CompatGetPendingRuns(context(), db)
	if err != nil {
		panic(err)
	}
	if err := v1CompatInjectRuns(context(), runs, db, guids); err != nil {
		panic(err)
	}
	context().Log.Info("Injected pending runs", "count", len(guids))

	transport := &http.Transport{
		Dial: (&net.Dialer{
			Timeout:   30 * time.Second,
			KeepAlive: 30 * time.Second,
		}).Dial,
		TLSHandshakeTimeout:   10 * time.Second,
		ExpectContinueTimeout: 1 * time.Second,
	}
	if !*insecure {
		cert, err := ioutil.ReadFile(context().Config.TLS.CertFile)
		if err != nil {
			panic(err)
		}
		certPool := x509.NewCertPool()
		certPool.AppendCertsFromPEM(cert)
		keyPair, err := tls.LoadX509KeyPair(
			context().Config.TLS.CertFile,
			context().Config.TLS.KeyFile,
		)
		transport.TLSClientConfig = &tls.Config{
			Certificates: []tls.Certificate{keyPair},
			RootCAs:      certPool,
			ClientAuth:   tls.RequireAndVerifyClientCert,
		}
		if err != nil {
			panic(err)
		}
		if err := http2.ConfigureTransport(transport); err != nil {
			panic(err)
		}
	}

	client := &http.Client{Transport: transport}

	finishedRunsChan := make(chan *grader.RunInfo, 1)
	context().InflightMonitor.PostProcessor.AddListener(finishedRunsChan)
	go v1CompatRunPostProcessor(db, finishedRunsChan, client)

	mux.Handle("/", http.FileServer(&wrappedFileSystem{
		fileSystem: &assetfs.AssetFS{
			Asset:     Asset,
			AssetDir:  AssetDir,
			AssetInfo: AssetInfo,
			Prefix:    "data",
		},
	}))

	mux.Handle("/metrics", prometheus.Handler())

	mux.HandleFunc("/grader/status/", func(w http.ResponseWriter, r *http.Request) {
		ctx := context()
		w.Header().Set("Content-Type", "text/json; charset=utf-8")
		runData := ctx.InflightMonitor.GetRunData()
		status := graderStatusResponse{
			Status: "ok",
			RunningQueue: graderStatusQueue{
				Runners: []string{},
				Running: make([]graderRunningStatus, len(runData)),
			},
		}

		for i, data := range runData {
			status.RunningQueue.Running[i].RunnerName = data.Runner
			status.RunningQueue.Running[i].ID = data.ID
		}
		for _, queueInfo := range ctx.QueueManager.GetQueueInfo() {
			for _, l := range queueInfo.Lengths {
				status.RunningQueue.RunQueueLength += l
			}
		}
		encoder := json.NewEncoder(w)
		w.Header().Set("Content-Type", "text/json; charset=utf-8")
		if err := encoder.Encode(&status); err != nil {
			ctx.Log.Error("Error writing /grader/status/ response", "err", err)
		}
	})

	mux.HandleFunc("/run/grade/", func(w http.ResponseWriter, r *http.Request) {
		ctx := context()
		decoder := json.NewDecoder(r.Body)
		defer r.Body.Close()

		var request runGradeRequest
		if err := decoder.Decode(&request); err != nil {
			ctx.Log.Error("Error receiving grade request", "err", err)
			w.WriteHeader(http.StatusBadRequest)
			return
		}
		ctx.Log.Info("/run/grade/", "request", request)
		if err = v1CompatInjectRuns(ctx, runs, db, request.GUIDs); err != nil {
			w.WriteHeader(http.StatusInternalServerError)
		}
		w.Header().Set("Content-Type", "text/json; charset=utf-8")
		fmt.Fprintf(w, "{\"status\":\"ok\"}")
	})

	mux.HandleFunc("/broadcast/", func(w http.ResponseWriter, r *http.Request) {
		ctx := context()
		decoder := json.NewDecoder(r.Body)
		defer r.Body.Close()

		var message broadcaster.Message
		if err := decoder.Decode(&message); err != nil {
			ctx.Log.Error("Error receiving broadcast request", "err", err)
			w.WriteHeader(http.StatusBadRequest)
			return
		}
		ctx.Log.Info("/broadcast/", "message", message)
		if err := v1CompatBroadcast(ctx, client, &message); err != nil {
			ctx.Log.Error("Error sending broadcast message", "err", err)
		}
		w.Header().Set("Content-Type", "text/json; charset=utf-8")
		fmt.Fprintf(w, "{\"status\":\"ok\"}")
	})

	mux.HandleFunc("/reload-config/", func(w http.ResponseWriter, r *http.Request) {
		ctx := context()
		ctx.Log.Info("/reload-config/")
		w.Header().Set("Content-Type", "text/json; charset=utf-8")
		fmt.Fprintf(w, "{\"status\":\"ok\"}")
	})
}
コード例 #13
0
ファイル: forwarding_test.go プロジェクト: quixoten/vault
func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uint64) {
	testPlaintext := "the quick brown fox"
	testPlaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA=="

	if rpc {
		os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "1")
	} else {
		os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "")
	}

	handler1 := http.NewServeMux()
	handler2 := http.NewServeMux()
	handler3 := http.NewServeMux()

	coreConfig := &vault.CoreConfig{
		LogicalBackends: map[string]logical.Factory{
			"transit": transit.Factory,
		},
	}

	// Chicken-and-egg: Handler needs a core. So we create handlers first, then
	// add routes chained to a Handler-created handler.
	cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true)
	for _, core := range cores {
		defer core.CloseListeners()
	}
	handler1.Handle("/", Handler(cores[0].Core))
	handler2.Handle("/", Handler(cores[1].Core))
	handler3.Handle("/", Handler(cores[2].Core))

	// make it easy to get access to the active
	core := cores[0].Core
	vault.TestWaitActive(t, core)

	root := cores[0].Root

	wg := sync.WaitGroup{}

	funcs := []string{"encrypt", "decrypt", "rotate", "change_min_version"}
	keys := []string{"test1", "test2", "test3"}

	hosts := []string{
		fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[1].Listeners[0].Address.Port),
		fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[2].Listeners[0].Address.Port),
	}

	transport := &http.Transport{
		TLSClientConfig: cores[0].TLSConfig,
	}
	http2.ConfigureTransport(transport)

	client := &http.Client{
		Transport: transport,
		CheckRedirect: func(*http.Request, []*http.Request) error {
			return fmt.Errorf("redirects not allowed in this test")
		},
	}

	//core.Logger().Printf("[TRACE] mounting transit")
	req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/mounts/transit", cores[0].Listeners[0].Address.Port),
		bytes.NewBuffer([]byte("{\"type\": \"transit\"}")))
	if err != nil {
		t.Fatal(err)
	}
	req.Header.Set(AuthHeaderName, root)
	_, err = client.Do(req)
	if err != nil {
		t.Fatal(err)
	}
	//core.Logger().Printf("[TRACE] done mounting transit")

	var totalOps uint64
	var successfulOps uint64
	var key1ver int64 = 1
	var key2ver int64 = 1
	var key3ver int64 = 1
	var numWorkers uint64 = 50
	var numWorkersStarted uint64
	var waitLock sync.Mutex
	waitCond := sync.NewCond(&waitLock)

	// This is the goroutine loop
	doFuzzy := func(id int, parallel bool) {
		var myTotalOps uint64
		var mySuccessfulOps uint64
		var keyVer int64 = 1
		// Check for panics, otherwise notify we're done
		defer func() {
			if err := recover(); err != nil {
				core.Logger().Error("got a panic: %v", err)
				t.Fail()
			}
			atomic.AddUint64(&totalOps, myTotalOps)
			atomic.AddUint64(&successfulOps, mySuccessfulOps)
			wg.Done()
		}()

		// Holds the latest encrypted value for each key
		latestEncryptedText := map[string]string{}

		client := &http.Client{
			Transport: transport,
		}

		var chosenFunc, chosenKey, chosenHost string

		myRand := rand.New(rand.NewSource(int64(id) * 400))

		doReq := func(method, url string, body io.Reader) (*http.Response, error) {
			req, err := http.NewRequest(method, url, body)
			if err != nil {
				return nil, err
			}
			req.Header.Set(AuthHeaderName, root)
			resp, err := client.Do(req)
			if err != nil {
				return nil, err
			}
			return resp, nil
		}

		doResp := func(resp *http.Response) (*api.Secret, error) {
			if resp == nil {
				return nil, fmt.Errorf("nil response")
			}
			defer resp.Body.Close()

			// Make sure we weren't redirected
			if resp.StatusCode > 300 && resp.StatusCode < 400 {
				return nil, fmt.Errorf("got status code %d, resp was %#v", resp.StatusCode, *resp)
			}

			result := &api.Response{Response: resp}
			err := result.Error()
			if err != nil {
				return nil, err
			}

			secret, err := api.ParseSecret(result.Body)
			if err != nil {
				return nil, err
			}

			return secret, nil
		}

		for _, chosenHost := range hosts {
			for _, chosenKey := range keys {
				// Try to write the key to make sure it exists
				_, err := doReq("POST", chosenHost+"keys/"+fmt.Sprintf("%s-%t", chosenKey, parallel), bytes.NewBuffer([]byte("{}")))
				if err != nil {
					panic(err)
				}
			}
		}

		if !parallel {
			chosenHost = hosts[id%len(hosts)]
			chosenKey = fmt.Sprintf("key-%t-%d", parallel, id)

			_, err := doReq("POST", chosenHost+"keys/"+chosenKey, bytes.NewBuffer([]byte("{}")))
			if err != nil {
				panic(err)
			}
		}

		atomic.AddUint64(&numWorkersStarted, 1)

		waitCond.L.Lock()
		for atomic.LoadUint64(&numWorkersStarted) != numWorkers {
			waitCond.Wait()
		}
		waitCond.L.Unlock()
		waitCond.Broadcast()

		core.Logger().Trace("Starting goroutine", "id", id)

		startTime := time.Now()
		for {
			// Stop after 10 seconds
			if time.Now().Sub(startTime) > 10*time.Second {
				return
			}

			myTotalOps++

			// Pick a function and a key
			chosenFunc = funcs[myRand.Int()%len(funcs)]
			if parallel {
				chosenKey = fmt.Sprintf("%s-%t", keys[myRand.Int()%len(keys)], parallel)
				chosenHost = hosts[myRand.Int()%len(hosts)]
			}

			switch chosenFunc {
			// Encrypt our plaintext and store the result
			case "encrypt":
				//core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id)
				resp, err := doReq("POST", chosenHost+"encrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"plaintext\": \"%s\"}", testPlaintextB64))))
				if err != nil {
					panic(err)
				}

				secret, err := doResp(resp)
				if err != nil {
					panic(err)
				}

				latest := secret.Data["ciphertext"].(string)
				if latest == "" {
					panic(fmt.Errorf("bad ciphertext"))
				}
				latestEncryptedText[chosenKey] = secret.Data["ciphertext"].(string)

				mySuccessfulOps++

			// Decrypt the ciphertext and compare the result
			case "decrypt":
				ct := latestEncryptedText[chosenKey]
				if ct == "" {
					mySuccessfulOps++
					continue
				}

				//core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id)
				resp, err := doReq("POST", chosenHost+"decrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"ciphertext\": \"%s\"}", ct))))
				if err != nil {
					panic(err)
				}

				secret, err := doResp(resp)
				if err != nil {
					// This could well happen since the min version is jumping around
					if strings.Contains(err.Error(), keysutil.ErrTooOld) {
						mySuccessfulOps++
						continue
					}
					panic(err)
				}

				ptb64 := secret.Data["plaintext"].(string)
				pt, err := base64.StdEncoding.DecodeString(ptb64)
				if err != nil {
					panic(fmt.Errorf("got an error decoding base64 plaintext: %v", err))
				}
				if string(pt) != testPlaintext {
					panic(fmt.Errorf("got bad plaintext back: %s", pt))
				}

				mySuccessfulOps++

			// Rotate to a new key version
			case "rotate":
				//core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id)
				_, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/rotate", bytes.NewBuffer([]byte("{}")))
				if err != nil {
					panic(err)
				}
				if parallel {
					switch chosenKey {
					case "test1":
						atomic.AddInt64(&key1ver, 1)
					case "test2":
						atomic.AddInt64(&key2ver, 1)
					case "test3":
						atomic.AddInt64(&key3ver, 1)
					}
				} else {
					keyVer++
				}

				mySuccessfulOps++

			// Change the min version, which also tests the archive functionality
			case "change_min_version":
				var latestVersion int64 = keyVer
				if parallel {
					switch chosenKey {
					case "test1":
						latestVersion = atomic.LoadInt64(&key1ver)
					case "test2":
						latestVersion = atomic.LoadInt64(&key2ver)
					case "test3":
						latestVersion = atomic.LoadInt64(&key3ver)
					}
				}

				setVersion := (myRand.Int63() % latestVersion) + 1

				//core.Logger().Printf("[TRACE] %s, %s, %d, new min version %d", chosenFunc, chosenKey, id, setVersion)

				_, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/config", bytes.NewBuffer([]byte(fmt.Sprintf("{\"min_decryption_version\": %d}", setVersion))))
				if err != nil {
					panic(err)
				}

				mySuccessfulOps++
			}
		}
	}

	atomic.StoreUint64(&numWorkers, num)

	// Spawn some of these workers for 10 seconds
	for i := 0; i < int(atomic.LoadUint64(&numWorkers)); i++ {
		wg.Add(1)
		//core.Logger().Printf("[TRACE] spawning %d", i)
		go doFuzzy(i+1, parallel)
	}

	// Wait for them all to finish
	wg.Wait()

	if totalOps == 0 || totalOps != successfulOps {
		t.Fatalf("total/successful ops zero or mismatch: %d/%d; rpc: %t, parallel: %t, num %d", totalOps, successfulOps, rpc, parallel, num)
	}
	t.Logf("total operations tried: %d, total successful: %d; rpc: %t, parallel: %t, num %d", totalOps, successfulOps, rpc, parallel, num)
}
コード例 #14
0
ファイル: main.go プロジェクト: rsc/tmp
func main() {
	flag.Parse()

	ready := make(chan struct{})
	go func() {
		<-ready

		var client helloworld.GreeterClient
		if *useGRPC {
			opts := []grpc.DialOption{
				grpc.WithBlock(),
				grpc.WithTimeout(3 * time.Second),
				grpc.WithInsecure(),
			}
			conn, err := grpc.Dial(*addr, opts...)
			if err != nil {
				log.Fatalf("grpc.Dial: %v", err)
			}
			client = helloworld.NewGreeterClient(conn)
		} else {
			t := (http.DefaultTransport.(*http.Transport))
			t.TLSClientConfig = &tls.Config{
				InsecureSkipVerify: true,
			}
			if *useHTTP2 {
				if err := http2.ConfigureTransport(t); err != nil {
					log.Fatal(err)
				}
			}
		}

		ctx := context.Background()

		for i := 0; i < *numRuns; i++ {
			randomBytes := make([]byte, *msgSize)
			n, err := rand.Read(randomBytes)
			if err != nil {
				log.Fatal(err)
			}
			if n != *msgSize {
				log.Fatal("didn't read enough bytes")
			}
			msg := string(randomBytes)

			t1 := time.Now()
			var proto string
			if *useGRPC {
				_, err = client.SayHello(ctx, &helloworld.HelloRequest{Name: msg})
				proto = "GRPC"
			} else {
				var resp *http.Response
				resp, err = http.Post("https://"+*addr, "text/plain", bytes.NewReader(randomBytes))
				proto = "HTTP"
				if resp != nil {
					proto = resp.Proto
					resp.Body.Close()
				}
			}
			if *verbose {
				fmt.Println()
			}
			fmt.Printf("%v\t%v\t%v\n", time.Now().Sub(t1), *latency, proto)
			if err != nil {
				log.Fatal(err)
			}
		}

		os.Exit(0)
	}()

	var server *grpc.Server
	if *useGRPC {
		server = grpc.NewServer()
		helloworld.RegisterGreeterServer(server, greeter{})
	}
	l, err := net.Listen("tcp", *addr)
	if err != nil {
		log.Fatal(err)
	}
	rate := Rate{Latency: *latency}
	l = &Listener{l, rate, rate}
	close(ready)
	if *useGRPC {
		log.Fatal(server.Serve(l))
	} else {
		var config tls.Config
		var err error
		if *useHTTP2 {
			config.NextProtos = []string{"h2"}
		}
		config.Certificates = make([]tls.Certificate, 1)
		config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
		if err != nil {
			log.Fatal(err)
		}
		srv := &http.Server{Addr: *addr, TLSConfig: &config, Handler: http.HandlerFunc(validate)}
		tlsListener := tls.NewListener(l, &config)
		log.Fatal(srv.Serve(tlsListener))
	}
}
コード例 #15
0
ファイル: clientutil.go プロジェクト: marsmensch/blobstash
func setupHTTP2() {
	if err := http2.ConfigureTransport(transport.(*http.Transport)); err != nil {
		fmt.Printf("HTTP2 ERROR: %+v", err)
	}

}
コード例 #16
0
ファイル: main.go プロジェクト: lhchavez/quark
func main() {
	rand.Seed(time.Now().UTC().UnixNano())
	flag.Parse()

	if err := loadContext(); err != nil {
		panic(err)
	}

	ctx := globalContext.Load().(*common.Context)
	expvar.Publish("config", &globalContext.Load().(*common.Context).Config)
	inputManager = common.NewInputManager(ctx)
	inputPath := path.Join(ctx.Config.Runner.RuntimePath, "input")
	go inputManager.PreloadInputs(
		inputPath,
		runner.NewRunnerCachedInputFactory(inputPath),
		&ioLock,
	)
	transport := &http.Transport{
		Dial: (&net.Dialer{
			Timeout:   30 * time.Second,
			KeepAlive: 30 * time.Second,
		}).Dial,
		TLSHandshakeTimeout:   10 * time.Second,
		ExpectContinueTimeout: 1 * time.Second,
	}
	if !*insecure {
		cert, err := ioutil.ReadFile(ctx.Config.TLS.CertFile)
		if err != nil {
			panic(err)
		}
		certPool := x509.NewCertPool()
		certPool.AppendCertsFromPEM(cert)
		keyPair, err := tls.LoadX509KeyPair(
			ctx.Config.TLS.CertFile,
			ctx.Config.TLS.KeyFile,
		)
		transport.TLSClientConfig = &tls.Config{
			Certificates: []tls.Certificate{keyPair},
			RootCAs:      certPool,
			ClientAuth:   tls.RequireAndVerifyClientCert,
		}
		if err != nil {
			panic(err)
		}
		if err := http2.ConfigureTransport(transport); err != nil {
			panic(err)
		}
	}

	client := &http.Client{Transport: transport}

	baseURL, err := url.Parse(ctx.Config.Runner.GraderURL)
	if err != nil {
		panic(err)
	}

	setupMetrics(ctx)
	ctx.Log.Info("omegaUp runner ready to serve")

	go func() {
		for {
			results, err := runner.RunHostBenchmark(
				ctx,
				inputManager,
				&minijail,
				&ioLock,
			)
			if err != nil {
				ctx.Log.Error("Failed to run benchmark", "err", err)
			} else {
				ctx.Log.Info("Benchmark successful", "results", results)
			}
			gaugesUpdate(results)
			time.Sleep(time.Duration(1) * time.Minute)
		}
	}()

	var sleepTime float32 = 1

	for {
		if err := processRun(ctx, client, baseURL); err != nil {
			if err, ok := err.(net.Error); ok && err.Timeout() {
				// Timeouts are expected. Just retry.
				sleepTime = 1
				continue
			}
			ctx.Log.Error("error grading run", "err", err)
			// Randomized exponential backoff.
			time.Sleep(time.Duration(rand.Float32()*sleepTime) * time.Second)
			if sleepTime < 64 {
				sleepTime *= 2
			}
		} else {
			sleepTime = 1
		}
	}
}