Example #1
0
func TestDeploy(t *testing.T) {
	conn := db.New()
	s := server{dbConn: conn}

	createMachineDeployment := `
	{"Machines":[
		{"Provider":"Amazon",
		"Role":"Master",
		"Size":"m4.large"
	}, {"Provider":"Amazon",
		"Role":"Worker",
		"Size":"m4.large"
	}]}`

	_, err := s.Deploy(context.Background(),
		&pb.DeployRequest{Deployment: createMachineDeployment})

	assert.NoError(t, err)

	var spec string
	conn.Transact(func(view db.Database) error {
		clst, err := view.GetCluster()
		assert.NoError(t, err)
		spec = clst.Spec
		return nil
	})

	exp, err := stitch.FromJSON(createMachineDeployment)
	assert.NoError(t, err)

	actual, err := stitch.FromJSON(spec)
	assert.NoError(t, err)

	assert.Equal(t, exp, actual)
}
Example #2
0
func TestVagrantDeployment(t *testing.T) {
	conn := db.New()
	s := server{dbConn: conn}

	vagrantDeployment := `
	{"Machines":[
		{"Provider":"Vagrant",
		"Role":"Master",
		"Size":"m4.large"
	}, {"Provider":"Vagrant",
		"Role":"Worker",
		"Size":"m4.large"
	}]}`
	vagrantErrMsg := "The Vagrant provider is in development." +
		" The stitch will continue to run, but" +
		" probably won't work correctly."

	_, err := s.Deploy(context.Background(),
		&pb.DeployRequest{Deployment: vagrantDeployment})

	assert.Error(t, err, vagrantErrMsg)

	var spec string
	conn.Transact(func(view db.Database) error {
		clst, err := view.GetCluster()
		assert.NoError(t, err)
		spec = clst.Spec
		return nil
	})

	exp, err := stitch.FromJSON(vagrantDeployment)
	assert.NoError(t, err)

	actual, err := stitch.FromJSON(spec)
	assert.NoError(t, err)

	assert.Equal(t, exp, actual)
}
Example #3
0
func updatePolicy(view db.Database, role db.Role, spec string) {
	compiled, err := stitch.FromJSON(spec)
	if err != nil {
		log.WithError(err).Warn("Invalid spec.")
		return
	}

	updateConnections(view, compiled)
	if role == db.Master {
		updatePlacements(view, compiled)

		// The container table is aspirational -- it's the set of containers that
		// should exist.  In the workers, however, the container table is just
		// what's running locally.  That's why we only sync the database
		// containers on the master.
		updateContainers(view, compiled)
	}
}
Example #4
0
func updateTxn(view db.Database) error {
	cluster, err := view.GetCluster()
	if err != nil {
		return err
	}

	stitch, err := stitch.FromJSON(cluster.Spec)
	if err != nil {
		return err
	}

	cluster.Namespace = stitch.Namespace
	view.Commit(cluster)

	machineTxn(view, stitch)
	aclTxn(view, stitch)
	return nil
}
Example #5
0
func (s server) Deploy(cts context.Context, deployReq *pb.DeployRequest) (
	*pb.DeployReply, error) {

	stitch, err := stitch.FromJSON(deployReq.Deployment)
	if err != nil {
		return &pb.DeployReply{}, err
	}

	if len(stitch.Machines) > ipdef.MaxMinionCount {
		return &pb.DeployReply{}, fmt.Errorf("cannot boot more than %d "+
			"machines", ipdef.MaxMinionCount)
	}

	err = s.dbConn.Transact(func(view db.Database) error {
		cluster, err := view.GetCluster()
		if err != nil {
			cluster = view.InsertCluster()
		}

		cluster.Spec = stitch.String()
		view.Commit(cluster)
		return nil
	})
	if err != nil {
		return &pb.DeployReply{}, err
	}

	// XXX: Remove this error when the Vagrant provider is done.
	for _, machine := range stitch.Machines {
		if machine.Provider == db.Vagrant {
			err = errors.New("The Vagrant provider is in development." +
				" The stitch will continue to run, but" +
				" probably won't work correctly.")
			return &pb.DeployReply{}, err
		}
	}

	return &pb.DeployReply{}, nil
}