func TestIsDatabaseCreatedOdbcDriver2(t *testing.T) {
	dbName := "cf-broker-testing.create-db"

	sqlClient, err := sql.Open("odbc", buildConnectionString(odbcPars))
	defer sqlClient.Close()

	sqlClient.Exec("drop database [" + dbName + "]")

	logger = lagertest.NewTestLogger("process-controller")
	mssqlProv := NewMssqlProvisioner(logger, "odbc", odbcPars)
	mssqlProv.Init()
	if err != nil {
		t.Errorf("Provisioner init error, %v", err)
	}
	err = mssqlProv.CreateDatabase(dbName)
	if err != nil {
		t.Errorf("Database create error, %v", err)
	}

	// Act
	exists, err := mssqlProv.IsDatabaseCreated(dbName)

	// Assert
	if err != nil {
		t.Errorf("Check for database error, %v", err)
	}
	if !exists {
		t.Errorf("Check for database error, expected true, but received false")
	}

	defer sqlClient.Exec("drop database [" + dbName + "]")
}
Example #2
0
func start(creator RunnerCreator, network, addr string, argv ...string) *RunningGarden {
	tmpDir := filepath.Join(
		os.TempDir(),
		fmt.Sprintf("test-garden-%d", ginkgo.GinkgoParallelNode()),
	)

	if GraphRoot == "" {
		GraphRoot = filepath.Join(tmpDir, "graph")
	}

	graphPath := filepath.Join(GraphRoot, fmt.Sprintf("node-%d", ginkgo.GinkgoParallelNode()))

	r := &RunningGarden{
		GraphRoot: GraphRoot,
		GraphPath: graphPath,
		tmpdir:    tmpDir,
		logger:    lagertest.NewTestLogger("garden-runner"),

		Client: client.New(connection.New(network, addr)),
	}

	c := cmd(tmpDir, graphPath, network, addr, GardenBin, BinPath, RootFSPath, argv...)
	r.process = ifrit.Invoke(creator.Create(c))
	r.Pid = c.Process.Pid

	return r
}
func TestDeleteDatabaseOdbcDriver(t *testing.T) {
	dbName := "cf-broker-testing.delete-db"

	sqlClient, err := sql.Open("odbc", buildConnectionString(odbcPars))
	defer sqlClient.Close()

	sqlClient.Exec("drop database [" + dbName + "]")

	logger = lagertest.NewTestLogger("process-controller")
	mssqlProv := NewMssqlProvisioner(logger, "odbc", odbcPars)
	err = mssqlProv.Init()
	if err != nil {
		t.Errorf("Database init error, %v", err)
	}
	defer mssqlProv.Close()

	err = mssqlProv.CreateDatabase(dbName)

	// Act

	err = mssqlProv.DeleteDatabase(dbName)

	// Assert
	if err != nil {
		t.Errorf("Database delete error, %v", err)
	}

	row := sqlClient.QueryRow("SELECT count(*) FROM sys.databases where name = ?", dbName)
	dbCount := 0
	row.Scan(&dbCount)
	if dbCount != 0 {
		t.Errorf("Database %s was not deleted", dbName)
	}
}
Example #4
0
func start(network, addr string, argv ...string) *RunningGarden {
	tmpDir := filepath.Join(
		os.TempDir(),
		fmt.Sprintf("test-garden-%d", ginkgo.GinkgoParallelNode()),
	)
	Expect(os.MkdirAll(tmpDir, 0755)).To(Succeed())

	if GraphRoot == "" {
		GraphRoot = filepath.Join(tmpDir, "graph")
	}

	graphPath := filepath.Join(GraphRoot, fmt.Sprintf("node-%d", ginkgo.GinkgoParallelNode()))
	stateDirPath := filepath.Join(tmpDir, "state")
	depotPath := filepath.Join(tmpDir, "containers")
	snapshotsPath := filepath.Join(tmpDir, "snapshots")

	if err := os.MkdirAll(stateDirPath, 0755); err != nil {
		Expect(err).ToNot(HaveOccurred())
	}

	if err := os.MkdirAll(depotPath, 0755); err != nil {
		Expect(err).ToNot(HaveOccurred())
	}

	if err := os.MkdirAll(snapshotsPath, 0755); err != nil {
		Expect(err).ToNot(HaveOccurred())
	}

	MustMountTmpfs(graphPath)

	r := &RunningGarden{
		GraphRoot:     GraphRoot,
		GraphPath:     graphPath,
		StateDirPath:  stateDirPath,
		DepotPath:     depotPath,
		SnapshotsPath: snapshotsPath,
		tmpdir:        tmpDir,
		logger:        lagertest.NewTestLogger("garden-runner"),

		Client: client.New(connection.New(network, addr)),
	}

	c := cmd(stateDirPath, depotPath, snapshotsPath, graphPath, network, addr, GardenBin, BinPath, RootFSPath, argv...)
	r.runner = ginkgomon.New(ginkgomon.Config{
		Name:              "garden-linux",
		Command:           c,
		AnsiColorCode:     "31m",
		StartCheck:        "garden-linux.started",
		StartCheckTimeout: 30 * time.Second,
	})

	r.process = ifrit.Invoke(r.runner)
	r.Pid = c.Process.Pid

	return r
}
Example #5
0
func NewETCDHelper(serializationFormat *format.Format, cryptor encryption.Cryptor, client etcd.StoreClient) *ETCDHelper {
	logger := lagertest.NewTestLogger("etcd-helper")

	return &ETCDHelper{
		client:     client,
		format:     serializationFormat,
		serializer: format.NewSerializer(cryptor),
		logger:     logger,
	}
}
Example #6
0
func generateConfig(natsPort, statusPort, proxyPort uint16) *config.Config {
	logger := lagertest.NewTestLogger("test")
	c := config.DefaultConfig(logger)

	c.Port = proxyPort
	c.Index = 2
	c.TraceKey = "my_trace_key"

	// Hardcode the IP to localhost to avoid leaving the machine while running tests
	c.Ip = "127.0.0.1"

	c.StartResponseDelayInterval = 10 * time.Millisecond
	c.PublishStartMessageIntervalInSeconds = 10
	c.PruneStaleDropletsInterval = 0
	c.DropletStaleThreshold = 0
	c.PublishActiveAppsInterval = 0
	c.Zone = "z1"

	c.EndpointTimeout = 500 * time.Millisecond

	c.Status = config.StatusConfig{
		Port: statusPort,
		User: "******",
		Pass: "******",
	}

	c.Nats = []config.NatsConfig{
		{
			Host: "localhost",
			Port: natsPort,
			User: "******",
			Pass: "******",
		},
	}

	c.Logging = config.LoggingConfig{
		File:          "/dev/stdout",
		Level:         "info",
		MetronAddress: "localhost:3457",
		JobName:       "router_test_z1_0",
	}

	c.OAuth = token_fetcher.OAuthConfig{
		TokenEndpoint: "http://localhost",
		Port:          8080,
	}

	c.RouteServiceSecret = "kCvXxNMB0JO2vinxoru9Hg=="

	return c
}
Example #7
0
func Start(bin, iodaemonBin, nstarBin string, argv ...string) *RunningGarden {
	network := "unix"
	addr := fmt.Sprintf("/tmp/garden_%d.sock", GinkgoParallelNode())
	tmpDir := filepath.Join(
		os.TempDir(),
		fmt.Sprintf("test-garden-%d", ginkgo.GinkgoParallelNode()),
	)

	if GraphRoot == "" {
		GraphRoot = filepath.Join(tmpDir, "graph")
	}

	graphPath := filepath.Join(GraphRoot, fmt.Sprintf("node-%d", ginkgo.GinkgoParallelNode()))
	depotDir := filepath.Join(tmpDir, "containers")

	MustMountTmpfs(graphPath)

	r := &RunningGarden{
		DepotDir: depotDir,

		GraphRoot: GraphRoot,
		GraphPath: graphPath,
		tmpdir:    tmpDir,
		logger:    lagertest.NewTestLogger("garden-runner"),

		Client: client.New(connection.New(network, addr)),
	}

	c := cmd(tmpDir, depotDir, graphPath, network, addr, bin, iodaemonBin, nstarBin, TarPath, RootFSPath, argv...)
	r.process = ifrit.Invoke(&ginkgomon.Runner{
		Name:              "guardian",
		Command:           c,
		AnsiColorCode:     "31m",
		StartCheck:        "guardian.started",
		StartCheckTimeout: 30 * time.Second,
	})

	r.Pid = c.Process.Pid

	return r
}
func TestIsDatabaseCreatedOdbcDriver(t *testing.T) {
	dbName := "cf-broker-testing.nonexisting-db"

	logger = lagertest.NewTestLogger("process-controller")
	mssqlProv := NewMssqlProvisioner(logger, "odbc", odbcPars)
	err := mssqlProv.Init()
	if err != nil {
		t.Errorf("Provisioner init error, %v", err)
	}

	// Act
	exists, err := mssqlProv.IsDatabaseCreated(dbName)

	// Assert
	if err != nil {
		t.Errorf("Check for database error, %v", err)
	}
	if exists {
		t.Errorf("Check for database error, expected false, but received true")
	}
}
func TestCreateUserOdbcDriver(t *testing.T) {
	dbName := "cf-broker-testing.create-db"
	userNanme := "cf-broker-testing.create-user"

	sqlClient, err := sql.Open("odbc", buildConnectionString(odbcPars))
	defer sqlClient.Close()

	sqlClient.Exec("drop database [" + dbName + "]")

	logger = lagertest.NewTestLogger("process-controller")
	mssqlProv := NewMssqlProvisioner(logger, "odbc", odbcPars)
	err = mssqlProv.Init()

	if err != nil {
		t.Errorf("Provisioner init error, %v", err)
	}

	err = mssqlProv.CreateDatabase(dbName)
	if err != nil {
		t.Errorf("Database create error, %v", err)
	}

	// Act
	err = mssqlProv.CreateUser(dbName, userNanme, "passwordAa_0")

	// Assert
	if err != nil {
		t.Errorf("User create error, %v", err)
	}

	defer sqlClient.Exec("drop database [" + dbName + "]")

	row := sqlClient.QueryRow(fmt.Sprintf("select count(*)  from [%s].sys.database_principals  where name = ?", dbName), userNanme)
	dbCount := 0
	row.Scan(&dbCount)
	if dbCount == 0 {
		t.Errorf("User was not created")
	}
}
func TestCreateDatabaseMssqlDriver(t *testing.T) {
	dbName := "cf-broker-testing.create-db"

	sqlClient, err := sql.Open("mssql", buildConnectionString(mssqlPars))
	defer sqlClient.Close()

	err = sqlClient.Ping()
	if err != nil {
		t.Skipf("Could not connect with pure mssql driver to %v", mssqlPars)
		return
	}

	sqlClient.Exec("drop database [" + dbName + "]")

	logger = lagertest.NewTestLogger("process-controller")
	mssqlProv := NewMssqlProvisioner(logger, "mssql", mssqlPars)
	err = mssqlProv.Init()
	if err != nil {
		t.Errorf("Provisioner init error, %v", err)
	}

	// Act
	err = mssqlProv.CreateDatabase(dbName)

	// Assert
	if err != nil {
		t.Errorf("Database create error, %v", err)
	}
	defer sqlClient.Exec("drop database [" + dbName + "]")

	row := sqlClient.QueryRow("SELECT count(*) FROM sys.databases where name = ?", dbName)
	dbCount := 0
	row.Scan(&dbCount)
	if dbCount == 0 {
		t.Errorf("Database was not created")
	}
}
	. "github.com/onsi/gomega"
	"github.com/pivotal-golang/lager/lagertest"
)

var _ = Describe("Middleware", func() {
	var (
		client       *http.Client
		ts           *httptest.Server
		dummyHandler http.HandlerFunc
		testSink     *lagertest.TestSink
	)

	BeforeEach(func() {

		// logger
		logger := lagertest.NewTestLogger("dummy-api")

		// dummy handler
		dummyHandler = func(w http.ResponseWriter, r *http.Request) {
			fmt.Fprintf(w, "Dummy handler")
		}

		// wrap dummy handler in logwrap
		dummyHandler = handlers.LogWrap(dummyHandler, logger)

		// test server
		ts = httptest.NewServer(dummyHandler)

		client = &http.Client{}

		// test sink
Example #12
0
	Describe("ConfigureHost", func() {
		var (
			vethCreator    *fakedevices.FaveVethCreator
			linkConfigurer *fakedevices.FakeLink
			bridger        *fakedevices.FakeBridge

			configurer     *network.NetworkConfigurer
			existingBridge *net.Interface
			config         *network.HostConfig
		)

		BeforeEach(func() {
			vethCreator = &fakedevices.FaveVethCreator{}
			linkConfigurer = &fakedevices.FakeLink{AddIPReturns: make(map[string]error)}
			bridger = &fakedevices.FakeBridge{}
			configurer = &network.NetworkConfigurer{Veth: vethCreator, Link: linkConfigurer, Bridge: bridger, Logger: lagertest.NewTestLogger("test")}

			existingBridge = &net.Interface{Name: "bridge"}

			config = &network.HostConfig{}

		})

		JustBeforeEach(func() {
			linkConfigurer.InterfaceByNameFunc = func(name string) (*net.Interface, bool, error) {
				if name == "bridge" {
					return existingBridge, true, nil
				}

				return nil, false, nil
			}
Example #13
0
	peerAddr = "127.0.0.1:1234"
	drain = make(chan struct{})

	fakeEngine = new(enginefakes.FakeEngine)
	fakeWorkerClient = new(workerfakes.FakeClient)

	fakeSchedulerFactory = new(jobserverfakes.FakeSchedulerFactory)

	var err error

	cliDownloadsDir, err = ioutil.TempDir("", "cli-downloads")
	Expect(err).NotTo(HaveOccurred())

	constructedEventHandler = &fakeEventHandlerFactory{}

	logger := lagertest.NewTestLogger("callbacks")

	sink = lager.NewReconfigurableSink(lager.NewWriterSink(GinkgoWriter, lager.DEBUG), lager.DEBUG)
	logger.RegisterSink(sink)

	handler, err := api.NewHandler(
		logger,

		externalURL,

		wrappa.NewAPIAuthWrappa(true, authValidator, userContextReader),

		fakeTokenGenerator,
		providerFactory,
		oAuthBaseURL,
Example #14
0
var _ = Describe("HealthRunner", func() {

	var (
		healthPort     int
		logger         *lagertest.TestLogger
		healthRunner   health.Runner
		healthProcess  ifrit.Process
		startupTimeout = 5 * time.Second
	)

	BeforeEach(func() {

		healthPort = 10000 + GinkgoParallelNode()

		logger = lagertest.NewTestLogger("HealthRunner Test")
		healthRunner = health.NewRunner(uint(healthPort), logger)
		healthProcess = ifrit.Invoke(healthRunner)
		isReady := healthProcess.Ready()
		Eventually(isReady, startupTimeout).Should(BeClosed(), "Error starting Health Runner")
	})

	AfterEach(func() {
		healthProcess.Signal(os.Kill)
		err := <-healthProcess.Wait()
		Expect(err).ToNot(HaveOccurred())
	})

	Context("when the runner is running", func() {
		It("accepts connections on health port", func() {
			conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", healthPort))
Example #15
0
	"time"

	"github.com/onsi/gomega/ghttp"
)

var _ = Describe("backend", func() {
	var server *ghttp.Server
	var dotNetBackend garden.Backend
	var serverUri *url.URL
	var logger *lagertest.TestLogger
	var client *dotnet.Client

	BeforeEach(func() {
		server = ghttp.NewServer()
		logger = lagertest.NewTestLogger("backend")
		serverUri, _ = url.Parse(server.URL())
		client = dotnet.NewClient(logger, serverUri)
		dotNetBackend, _ = backend.NewDotNetBackend(client, logger)
	})

	AfterEach(func() {
		//shut down the server between tests
		if server.HTTPTestServer != nil {
			server.Close()
		}
	})

	Describe("Capacity", func() {
		BeforeEach(func() {
			server.AppendHandlers(
var _ = Describe("RoutesHandler", func() {
	var (
		routesHandler    *handlers.RoutesHandler
		request          *http.Request
		responseRecorder *httptest.ResponseRecorder
		database         *fake_db.FakeDB
		logger           *lagertest.TestLogger
		validator        *fake_validator.FakeRouteValidator
		token            *fake_token.FakeToken
	)

	BeforeEach(func() {
		database = &fake_db.FakeDB{}
		validator = &fake_validator.FakeRouteValidator{}
		token = &fake_token.FakeToken{}
		logger = lagertest.NewTestLogger("routing-api-test")
		routesHandler = handlers.NewRoutesHandler(token, 50, validator, database, logger)
		responseRecorder = httptest.NewRecorder()
	})

	Describe(".List", func() {
		It("response with a 200 OK", func() {
			request = handlers.NewTestRequest("")

			routesHandler.List(responseRecorder, request)

			Expect(responseRecorder.Code).To(Equal(http.StatusOK))
		})

		It("checks for route.admin scope", func() {
			request = handlers.NewTestRequest("")
Example #17
0
	. "github.com/sclevine/agouti/matchers"

	"github.com/cloudfoundry/gunk/urljoiner"
	"github.com/concourse/atc"
	"github.com/concourse/atc/db"
	"github.com/concourse/atc/event"
)

var _ = Describe("One-off Builds", func() {
	var atcProcess ifrit.Process
	var dbListener *pq.Listener
	var atcPort uint16
	var pipelineDBFactory db.PipelineDBFactory

	BeforeEach(func() {
		dbLogger := lagertest.NewTestLogger("test")
		postgresRunner.Truncate()
		dbConn = postgresRunner.Open()
		dbListener = pq.NewListener(postgresRunner.DataSourceName(), time.Second, time.Minute, nil)
		bus := db.NewNotificationsBus(dbListener, dbConn)
		sqlDB = db.NewSQL(dbLogger, dbConn, bus)
		pipelineDBFactory = db.NewPipelineDBFactory(dbLogger, dbConn, bus, sqlDB)

		atcProcess, atcPort = startATC(atcBin, 1)
	})

	AfterEach(func() {
		ginkgomon.Interrupt(atcProcess)

		Expect(dbConn.Close()).To(Succeed())
		Expect(dbListener.Close()).To(Succeed())
	var pipelineDBFactory db.PipelineDBFactory

	var pipelinesDB *fakes.FakePipelinesDB

	BeforeEach(func() {
		postgresRunner.Truncate()

		dbConn = db.Wrap(postgresRunner.Open())

		listener = pq.NewListener(postgresRunner.DataSourceName(), time.Second, time.Minute, nil)
		Eventually(listener.Ping, 5*time.Second).ShouldNot(HaveOccurred())
		bus := db.NewNotificationsBus(listener, dbConn)

		pipelinesDB = new(fakes.FakePipelinesDB)

		pipelineDBFactory = db.NewPipelineDBFactory(lagertest.NewTestLogger("test"), dbConn, bus, pipelinesDB)
	})

	AfterEach(func() {
		err := dbConn.Close()
		Expect(err).NotTo(HaveOccurred())

		err = listener.Close()
		Expect(err).NotTo(HaveOccurred())
	})

	Describe("default pipeline", func() {
		It("is the first one returned from the DB", func() {
			savedPipelineOne := db.SavedPipeline{
				ID: 1,
				Pipeline: db.Pipeline{
Example #19
0
		resourceLimits garden.ResourceLimits
		server         *ghttp.Server
		hijacker       HijackStreamer
		network        string
		address        string
	)

	BeforeEach(func() {
		server = ghttp.NewServer()
		network = "tcp"
		address = server.HTTPTestServer.Listener.Addr().String()
		hijacker = NewHijackStreamer(network, address)
	})

	JustBeforeEach(func() {
		connection = NewWithHijacker(network, address, hijacker, lagertest.NewTestLogger("test-connection"))
	})

	BeforeEach(func() {
		rlimits := &garden.ResourceLimits{
			As:         uint64ptr(1),
			Core:       uint64ptr(2),
			Cpu:        uint64ptr(4),
			Data:       uint64ptr(5),
			Fsize:      uint64ptr(6),
			Locks:      uint64ptr(7),
			Memlock:    uint64ptr(8),
			Msgqueue:   uint64ptr(9),
			Nice:       uint64ptr(10),
			Nofile:     uint64ptr(11),
			Nproc:      uint64ptr(12),
Example #20
0
				ContainerSpec: garden.ContainerSpec{
					Handle:    "some-handle",
					GraceTime: time.Second * 1,
				},
			},
			fake_port_pool.New(1000),
			fakeRunner,
			fakeCgroups,
			fakeQuotaManager,
			fakeBandwidthManager,
			new(fake_process_tracker.FakeProcessTracker),
			new(networkFakes.FakeFilter),
			new(fake_iptables_manager.FakeIPTablesManager),
			new(fake_network_statisticser.FakeNetworkStatisticser),
			fakeOomWatcher,
			lagertest.NewTestLogger("linux-container-limits-test"),
		)
	})

	Describe("Limiting bandwidth", func() {
		limits := garden.BandwidthLimits{
			RateInBytesPerSecond:      128,
			BurstRateInBytesPerSecond: 256,
		}

		It("sets the limit via the bandwidth manager with the new limits", func() {
			err := container.LimitBandwidth(limits)
			Expect(err).ToNot(HaveOccurred())

			Expect(fakeBandwidthManager.EnforcedLimits).To(ContainElement(limits))
		})
func uint64ptr(n uint64) *uint64 {
	return &n
}

var _ = Describe("container", func() {
	var server *ghttp.Server
	var container garden.Container
	var logger *lagertest.TestLogger
	var client *dotnet.Client
	var externalIP string

	BeforeEach(func() {
		server = ghttp.NewServer()
		externalIP = "10.11.12.13"
		logger = lagertest.NewTestLogger("container")
		serverUri, _ := url.Parse(server.URL())
		client = dotnet.NewClient(logger, serverUri)
		container = netContainer.NewContainer(client, "containerhandle", logger)
	})

	AfterEach(func() {
		//shut down the server between tests
		if server.HTTPTestServer != nil {
			server.Close()
		}
	})

	Describe("Info", func() {
		Describe("for a valid handle", func() {
			BeforeEach(func() {
Example #22
0
var _ = Describe("SQL DB Teams", func() {
	var dbConn db.Conn
	var listener *pq.Listener

	var database db.DB

	BeforeEach(func() {
		postgresRunner.Truncate()

		dbConn = db.Wrap(postgresRunner.Open())
		listener = pq.NewListener(postgresRunner.DataSourceName(), time.Second, time.Minute, nil)

		Eventually(listener.Ping, 5*time.Second).ShouldNot(HaveOccurred())
		bus := db.NewNotificationsBus(listener, dbConn)

		database = db.NewSQL(lagertest.NewTestLogger("test"), dbConn, bus)

		database.DeleteTeamByName(atc.DefaultTeamName)
	})

	AfterEach(func() {
		err := dbConn.Close()
		Expect(err).NotTo(HaveOccurred())

		err = listener.Close()
		Expect(err).NotTo(HaveOccurred())
	})

	Describe("the default team", func() {
		Describe("it exists", func() {
			BeforeEach(func() {
Example #23
0
	dummyRequest *http.Request
	lh           light.Handler
)

var _ = Describe("Light", func() {
	var expectedLightState light.LightState
	var expectedReturn []byte
	var err error

	BeforeEach(func() {
		expectedLightState = light.LightState{
			StateKnown: false,
			LightOn:    false,
		}

		fakeLogger = lagertest.NewTestLogger("light test")
		fakeGpio = new(gpio_fakes.FakeGpio)
		fakeResponseWriter = new(test_helpers_fakes.FakeResponseWriter)

		lh = light.NewHandler(
			fakeLogger,
			fakeGpio,
			gpioLightPin,
		)

		dummyRequest = new(http.Request)
	})

	Describe("Reading state", func() {
		Context("When reading light state returns with error", func() {
			BeforeEach(func() {
Example #24
0
func (r *Runner) Run(signals <-chan os.Signal, ready chan<- struct{}) error {
	logger := lagertest.NewTestLogger("garden-runner")

	if err := os.MkdirAll(r.tmpdir, 0755); err != nil {
		return err
	}

	depotPath := filepath.Join(r.tmpdir, "containers")
	snapshotsPath := filepath.Join(r.tmpdir, "snapshots")

	if err := os.MkdirAll(depotPath, 0755); err != nil {
		return err
	}

	if err := os.MkdirAll(snapshotsPath, 0755); err != nil {
		return err
	}

	var appendDefaultFlag = func(ar []string, key, value string) []string {
		for _, a := range r.argv {
			if a == key {
				return ar
			}
		}

		if value != "" {
			return append(ar, key, value)
		} else {
			return append(ar, key)
		}
	}

	var hasFlag = func(ar []string, key string) bool {
		for _, a := range ar {
			if a == key {
				return true
			}
		}

		return false
	}

	gardenArgs := make([]string, len(r.argv))
	copy(gardenArgs, r.argv)

	gardenArgs = appendDefaultFlag(gardenArgs, "--listenNetwork", r.network)
	gardenArgs = appendDefaultFlag(gardenArgs, "--listenAddr", r.addr)
	gardenArgs = appendDefaultFlag(gardenArgs, "--bin", r.binPath)
	if r.rootFSPath != "" { //rootfs is an optional parameter
		gardenArgs = appendDefaultFlag(gardenArgs, "--rootfs", r.rootFSPath)
	}
	gardenArgs = appendDefaultFlag(gardenArgs, "--depot", depotPath)
	gardenArgs = appendDefaultFlag(gardenArgs, "--snapshots", snapshotsPath)
	gardenArgs = appendDefaultFlag(gardenArgs, "--graph", r.graphPath)
	gardenArgs = appendDefaultFlag(gardenArgs, "--logLevel", "debug")
	gardenArgs = appendDefaultFlag(gardenArgs, "--networkPool", fmt.Sprintf("10.250.%d.0/24", ginkgo.GinkgoParallelNode()))
	gardenArgs = appendDefaultFlag(gardenArgs, "--portPoolStart", strconv.Itoa(51000+(1000*ginkgo.GinkgoParallelNode())))
	gardenArgs = appendDefaultFlag(gardenArgs, "--portPoolSize", "1000")
	gardenArgs = appendDefaultFlag(gardenArgs, "--tag", strconv.Itoa(ginkgo.GinkgoParallelNode()))

	btrfsIsSupported := strings.EqualFold(os.Getenv("BTRFS_SUPPORTED"), "true")
	hasDisabledFlag := hasFlag(gardenArgs, "-disableQuotas=true")

	if !btrfsIsSupported && !hasDisabledFlag {
		// We should disabled quotas if BTRFS is not supported
		gardenArgs = appendDefaultFlag(gardenArgs, "--disableQuotas", "")
	}

	gardenArgs = appendDefaultFlag(gardenArgs, "--debugAddr", fmt.Sprintf(":808%d", ginkgo.GinkgoParallelNode()))

	var signal os.Signal

	r.Command = exec.Command(r.bin, gardenArgs...)

	process := ifrit.Invoke(&ginkgomon.Runner{
		Name:              "garden-linux",
		Command:           r.Command,
		AnsiColorCode:     "31m",
		StartCheck:        "garden-linux.started",
		StartCheckTimeout: 30 * time.Second,
		Cleanup: func() {
			if signal == syscall.SIGQUIT {
				logger.Info("cleanup-subvolumes")

				// remove contents of subvolumes before deleting the subvolume
				if err := os.RemoveAll(r.graphPath); err != nil {
					logger.Error("remove graph", err)
				}

				if btrfsIsSupported {
					// need to remove subvolumes before cleaning graphpath
					subvolumesOutput, err := exec.Command("btrfs", "subvolume", "list", r.graphRoot).CombinedOutput()
					logger.Debug(fmt.Sprintf("listing-subvolumes: %s", string(subvolumesOutput)))
					if err != nil {
						logger.Fatal("listing-subvolumes-error", err)
					}
					for _, line := range strings.Split(string(subvolumesOutput), "\n") {
						fields := strings.Fields(line)
						if len(fields) < 1 {
							continue
						}
						subvolumeRelativePath := fields[len(fields)-1]
						subvolumeAbsolutePath := filepath.Join(r.graphRoot, subvolumeRelativePath)
						if strings.Contains(subvolumeAbsolutePath, r.graphPath) {
							if b, err := exec.Command("btrfs", "subvolume", "delete", subvolumeAbsolutePath).CombinedOutput(); err != nil {
								logger.Fatal(fmt.Sprintf("deleting-subvolume: %s", string(b)), err)
							}
						}
					}

					if err := os.RemoveAll(r.graphPath); err != nil {
						logger.Error("remove graph again", err)
					}
				}

				logger.Info("cleanup-tempdirs")
				if err := os.RemoveAll(r.tmpdir); err != nil {
					logger.Error("cleanup-tempdirs-failed", err, lager.Data{"tmpdir": r.tmpdir})
				} else {
					logger.Info("tempdirs-removed")
				}
			}
		},
	})

	close(ready)

	for {
		select {
		case signal = <-signals:
			// SIGQUIT means clean up the containers, the garden process (SIGTERM) and the temporary directories
			// SIGKILL, SIGTERM and SIGINT are passed through to the garden process
			if signal == syscall.SIGQUIT {
				logger.Info("received-signal SIGQUIT")
				if err := r.destroyContainers(); err != nil {
					logger.Error("destroy-containers-failed", err)
					return err
				}
				logger.Info("destroyed-containers")
				process.Signal(syscall.SIGTERM)
			} else {
				logger.Info("received-signal", lager.Data{"signal": signal})
				process.Signal(signal)
			}

		case waitErr := <-process.Wait():
			logger.Info("process-exited")
			return waitErr
		}
	}
}
Example #25
0
	lastLogLine := func() lager.LogFormat {
		if len(brokerLogger.Logs()) == 0 {
			// better way to raise error?
			err := errors.New("expected some log lines but there were none!")
			Expect(err).NotTo(HaveOccurred())
		}

		return brokerLogger.Logs()[0]
	}

	BeforeEach(func() {
		fakeServiceBroker = &fakes.FakeServiceBroker{
			InstanceLimit: 3,
		}
		brokerLogger = lagertest.NewTestLogger("broker-api")
		brokerAPI = brokerapi.New(fakeServiceBroker, brokerLogger, credentials)
	})

	Describe("respose headers", func() {
		makeRequest := func() *httptest.ResponseRecorder {
			recorder := httptest.NewRecorder()
			request, _ := http.NewRequest("GET", "/v2/catalog", nil)
			request.SetBasicAuth(credentials.Username, credentials.Password)
			brokerAPI.ServeHTTP(recorder, request)
			return recorder
		}

		It("has a Content-Type header", func() {
			response := makeRequest()
Example #26
0
	)

	BeforeEach(func() {

		fakeVerifier1 = new(afakes.FakeVerifier)
		fakeVerifier2 = new(afakes.FakeVerifier)

		httpClient = &http.Client{}
		verifierBasket = NewVerifierBasket(fakeVerifier1, fakeVerifier2)
	})

	It("fails to verify if none of the passed in verifiers return true", func() {
		fakeVerifier1.VerifyReturns(false, nil)
		fakeVerifier2.VerifyReturns(false, nil)

		result, err := verifierBasket.Verify(lagertest.NewTestLogger("test"), httpClient)
		Expect(err).ToNot(HaveOccurred())
		Expect(result).To(BeFalse())
	})

	It("verifies if any of the embedded verifiers return true", func() {
		fakeVerifier1.VerifyReturns(false, nil)
		fakeVerifier2.VerifyReturns(true, nil)

		result, err := verifierBasket.Verify(lagertest.NewTestLogger("test"), httpClient)
		Expect(err).ToNot(HaveOccurred())
		Expect(result).To(BeTrue())

		fakeVerifier1.VerifyReturns(true, nil)
		fakeVerifier2.VerifyReturns(false, nil)
	. "github.com/onsi/gomega"
	"github.com/pivotal-golang/lager/lagertest"
)

var _ = Describe("RouterGroupsHandler", func() {

	var (
		routerGroupHandler *handlers.RouterGroupsHandler
		request            *http.Request
		responseRecorder   *httptest.ResponseRecorder
		token              *fake_token.FakeToken
		logger             *lagertest.TestLogger
	)

	BeforeEach(func() {
		logger = lagertest.NewTestLogger("test-router-group")
		token = &fake_token.FakeToken{}
		routerGroupHandler = handlers.NewRouteGroupsHandler(token, logger)
		responseRecorder = httptest.NewRecorder()
	})

	Describe("ListRouterGroups", func() {
		It("responds with 200 OK and returns default router group details", func() {
			var err error
			request, err = http.NewRequest("GET", routing_api.ListRouterGroups, nil)
			Expect(err).NotTo(HaveOccurred())
			routerGroupHandler.ListRouterGroups(responseRecorder, request)
			Expect(responseRecorder.Code).To(Equal(http.StatusOK))
			payload := responseRecorder.Body.String()
			Expect(payload).To(MatchJSON(`[
			{
Example #28
0
	"github.com/concourse/atc/engine/fakes"
)

var _ = Describe("DBEngine", func() {
	var (
		logger lager.Logger

		fakeEngineA *fakes.FakeEngine
		fakeEngineB *fakes.FakeEngine
		fakeBuildDB *fakes.FakeBuildDB

		dbEngine Engine
	)

	BeforeEach(func() {
		logger = lagertest.NewTestLogger("test")

		fakeEngineA = new(fakes.FakeEngine)
		fakeEngineA.NameReturns("fake-engine-a")

		fakeEngineB = new(fakes.FakeEngine)
		fakeEngineB.NameReturns("fake-engine-b")

		fakeBuildDB = new(fakes.FakeBuildDB)

		dbEngine = NewDBEngine(Engines{fakeEngineA, fakeEngineB}, fakeBuildDB)
	})

	Describe("CreateBuild", func() {
		var (
			build db.Build
Example #29
0
	Expect(workPoolCreateError).ToNot(HaveOccurred())

	encryptionKey, err := encryption.NewKey("label", "passphrase")
	Expect(err).NotTo(HaveOccurred())
	keyManager, err := encryption.NewKeyManager(encryptionKey, nil)
	Expect(err).NotTo(HaveOccurred())
	cryptor = encryption.NewCryptor(keyManager, rand.Reader)
})

var _ = AfterSuite(func() {
	etcdRunner.Stop()
	consulRunner.Stop()
})

var _ = BeforeEach(func() {
	logger = lagertest.NewTestLogger("test")

	fakeAuctioneerClient = new(auctioneerfakes.FakeClient)
	etcdRunner.Reset()

	consulRunner.Reset()
	consulSession = consulRunner.NewSession("a-session")

	etcdClient := etcdRunner.Client()
	etcdClient.SetConsistency(etcdclient.STRONG_CONSISTENCY)
	storeClient = etcd.NewStoreClient(etcdClient)
	fakeStoreClient = &fakes.FakeStoreClient{}
	consulHelper = test_helpers.NewConsulHelper(consulSession)
	serviceClient = bbs.NewServiceClient(consulSession, clock)
	fakeTaskCompletionClient = new(faketaskworkpool.FakeTaskCompletionClient)
	fakeRepClientFactory = new(repfakes.FakeClientFactory)
Example #30
0
		Expect(err).ToNot(HaveOccurred())

		config = test_util.SpecConfig(statusPort, proxyPort, natsPort)
		config.EnableSSL = true
		config.SSLPort = 4443 + uint16(gConfig.GinkgoConfig.ParallelNode)
		config.SSLCertificate = cert
		config.CipherSuites = []uint16{tls.TLS_RSA_WITH_AES_256_CBC_SHA}
		config.EnablePROXY = true

		// set pid file
		f, err := ioutil.TempFile("", "gorouter-test-pidfile-")
		Expect(err).ToNot(HaveOccurred())
		config.PidFile = f.Name()

		mbusClient = natsRunner.MessageBus
		logger = lagertest.NewTestLogger("router-test")
		registry = rregistry.NewRouteRegistry(logger, config, new(fakes.FakeRouteRegistryReporter))
		varz = vvarz.NewVarz(registry)
		logcounter := schema.NewLogCounter()
		proxy := proxy.NewProxy(proxy.ProxyArgs{
			EndpointTimeout: config.EndpointTimeout,
			Logger:          logger,
			Ip:              config.Ip,
			TraceKey:        config.TraceKey,
			Registry:        registry,
			Reporter:        varz,
			AccessLogger:    &access_log.NullAccessLogger{},
		})

		router, err = NewRouter(logger, config, proxy, mbusClient, registry, varz, logcounter, nil)