func (s *MySuite) TestToVerifyXmlContentForDataTableDrivenExecution(c *C) {
	value := gauge_messages.ProtoItem_TableDrivenScenario
	scenario := gauge_messages.ProtoScenario{Failed: proto.Bool(false), ScenarioHeading: proto.String("Scenario")}
	scenario1 := gauge_messages.ProtoScenario{Failed: proto.Bool(false), ScenarioHeading: proto.String("Scenario")}
	item := &gauge_messages.ProtoItem{TableDrivenScenario: &gauge_messages.ProtoTableDrivenScenario{Scenarios: []*gauge_messages.ProtoScenario{&scenario, &scenario1}}, ItemType: &value}
	spec := &gauge_messages.ProtoSpec{SpecHeading: proto.String("HEADING"), FileName: proto.String("FILENAME"), Items: []*gauge_messages.ProtoItem{item}}
	specResult := &gauge_messages.ProtoSpecResult{ProtoSpec: spec, ScenarioCount: proto.Int(1), Failed: proto.Bool(false)}
	suiteResult := &gauge_messages.ProtoSuiteResult{SpecResults: []*gauge_messages.ProtoSpecResult{specResult}}
	message := &gauge_messages.SuiteExecutionResult{SuiteResult: suiteResult}

	builder := &XmlBuilder{currentId: 0}
	bytes, err := builder.getXmlContent(message)
	var suites JUnitTestSuites
	xml.Unmarshal(bytes, &suites)

	c.Assert(err, Equals, nil)
	c.Assert(len(suites.Suites), Equals, 1)
	c.Assert(suites.Suites[0].Errors, Equals, 0)
	c.Assert(suites.Suites[0].Failures, Equals, 0)
	c.Assert(suites.Suites[0].Package, Equals, "FILENAME")
	c.Assert(suites.Suites[0].Name, Equals, "HEADING")
	c.Assert(suites.Suites[0].Tests, Equals, 2)
	c.Assert(suites.Suites[0].Timestamp, Equals, builder.suites.Suites[0].Timestamp)
	c.Assert(suites.Suites[0].SystemError.Contents, Equals, "")
	c.Assert(suites.Suites[0].SystemOutput.Contents, Equals, "")
	c.Assert(len(suites.Suites[0].TestCases), Equals, 2)
	c.Assert(suites.Suites[0].TestCases[0].Name, Equals, "Scenario 0")
	c.Assert(suites.Suites[0].TestCases[1].Name, Equals, "Scenario 1")
}
func (s *MySuite) TestToVerifyXmlContentForFailingExecutionResult(c *C) {
	value := gauge_messages.ProtoItem_Scenario
	item := &gauge_messages.ProtoItem{Scenario: &gauge_messages.ProtoScenario{Failed: proto.Bool(true), ScenarioHeading: proto.String("Scenario1")}, ItemType: &value}
	spec := &gauge_messages.ProtoSpec{SpecHeading: proto.String("HEADING"), FileName: proto.String("FILENAME"), Items: []*gauge_messages.ProtoItem{item}}
	specResult := &gauge_messages.ProtoSpecResult{ProtoSpec: spec, ScenarioCount: proto.Int(1), Failed: proto.Bool(true), ScenarioFailedCount: proto.Int(1)}
	suiteResult := &gauge_messages.ProtoSuiteResult{SpecResults: []*gauge_messages.ProtoSpecResult{specResult}}
	message := &gauge_messages.SuiteExecutionResult{SuiteResult: suiteResult}

	builder := &XmlBuilder{currentId: 0}
	bytes, err := builder.getXmlContent(message)
	var suites JUnitTestSuites
	xml.Unmarshal(bytes, &suites)

	c.Assert(err, Equals, nil)
	c.Assert(len(suites.Suites), Equals, 1)
	// spec1 || testSuite
	c.Assert(suites.Suites[0].Errors, Equals, 0)
	c.Assert(suites.Suites[0].Failures, Equals, 1)
	c.Assert(suites.Suites[0].Package, Equals, "FILENAME")
	c.Assert(suites.Suites[0].Name, Equals, "HEADING")
	c.Assert(suites.Suites[0].Tests, Equals, 1)
	c.Assert(suites.Suites[0].Timestamp, Equals, builder.suites.Suites[0].Timestamp)
	c.Assert(suites.Suites[0].SystemError.Contents, Equals, "")
	c.Assert(suites.Suites[0].SystemOutput.Contents, Equals, "")
	// scenario1 of spec1 || testCase
	c.Assert(len(suites.Suites[0].TestCases), Equals, 1)
	c.Assert(suites.Suites[0].TestCases[0].Classname, Equals, "HEADING")
	c.Assert(suites.Suites[0].TestCases[0].Name, Equals, "Scenario1")
	c.Assert(suites.Suites[0].TestCases[0].Failure.Message, Equals, "")
	c.Assert(suites.Suites[0].TestCases[0].Failure.Contents, Equals, "")
}
Пример #3
0
func (rpc *themisRPC) commitRow(tbl, row []byte, mutations []*columnMutation,
	prewriteTs, commitTs uint64, primaryOffset int) error {
	req := &ThemisCommitRequest{}
	req.ThemisCommit = &ThemisCommit{
		Row:          row,
		PrewriteTs:   pb.Uint64(prewriteTs),
		CommitTs:     pb.Uint64(commitTs),
		PrimaryIndex: pb.Int(primaryOffset),
	}

	for _, m := range mutations {
		req.ThemisCommit.Mutations = append(req.ThemisCommit.Mutations, m.toCell())
	}
	var res ThemisCommitResponse
	err := rpc.call("commitRow", tbl, row, req, &res)
	if err != nil {
		return errors.Trace(err)
	}
	ok := res.GetResult()
	if !ok {
		if primaryOffset == -1 {
			return errors.Errorf("commit secondary failed, tbl: %s row: %q ts: %d", tbl, row, commitTs)
		}
		return errors.Errorf("commit primary failed, tbl: %s row: %q ts: %d", tbl, row, commitTs)
	}
	return nil
}
Пример #4
0
// EncodeMaster encodes information about a ResourceMaster as a protobuf.
func (m *ResourceMaster) EncodeMaster() ([]byte, error) {
	p := &ResourceMasterInfo{
		PrinName:          proto.String(m.ProgramName),
		BaseDirectoryName: proto.String(m.BaseDirectory),
		NumFileInfos:      proto.Int(len(m.Resources)),
	}
	return proto.Marshal(p)
}
Пример #5
0
func (rpc *themisRPC) prewriteRow(tbl []byte, row []byte, mutations []*columnMutation, prewriteTs uint64, primaryLockBytes []byte, secondaryLockBytes []byte, primaryOffset int) (Lock, error) {
	var cells []*proto.Cell
	request := &ThemisPrewriteRequest{
		PrewriteTs:    pb.Uint64(prewriteTs),
		PrimaryLock:   primaryLockBytes,
		SecondaryLock: secondaryLockBytes,
		PrimaryIndex:  pb.Int(primaryOffset),
	}
	request.ThemisPrewrite = &ThemisPrewrite{
		Row: row,
	}
	if primaryLockBytes == nil {
		request.PrimaryLock = []byte("")
	}
	if secondaryLockBytes == nil {
		request.SecondaryLock = []byte("")
	}
	for _, m := range mutations {
		cells = append(cells, m.toCell())
	}
	request.ThemisPrewrite.Mutations = cells

	var res ThemisPrewriteResponse
	err := rpc.call("prewriteRow", tbl, row, request, &res)
	if err != nil {
		return nil, errors.Trace(err)
	}
	b := res.ThemisPrewriteResult
	if b == nil {
		// if lock is empty, means we got the lock, otherwise some one else had
		// locked this row, and the lock should return in rpc result
		return nil, nil
	}
	// Oops, someone else have already locked this row.

	commitTs := b.GetNewerWriteTs()
	if commitTs != 0 {
		log.Errorf("write conflict, encounter write with larger timestamp than prewriteTs=%d, commitTs=%d, row=%s", prewriteTs, commitTs, string(row))
		return nil, kv.ErrLockConflict
	}

	l, err := parseLockFromBytes(b.ExistLock)
	if err != nil {
		return nil, errors.Trace(err)
	}

	col := &hbase.ColumnCoordinate{
		Table: tbl,
		Row:   row,
		Column: hbase.Column{
			Family: b.Family,
			Qual:   b.Qualifier,
		},
	}
	l.SetCoordinate(col)
	return l, nil
}
Пример #6
0
// EncodeResource creates a protobuf that represents a resource.
// TODO(tmroeder): map the types and statuses to protobuf enums properly.
func (r *Resource) EncodeResource() ([]byte, error) {
	m := &ResourceInfo{
		Name:     proto.String(r.Name),
		Type:     proto.Int32(int32(r.Type)),
		Status:   proto.Int32(int32(r.Status)),
		Location: proto.String(r.Location),
		Size:     proto.Int(r.Size),
		Owner:    proto.String(r.Owner),
	}
	return proto.Marshal(m)
}
Пример #7
0
func (rpc *themisRPC) batchCommitSecondaryRows(tbl []byte, rowMs map[string]*rowMutation, prewriteTs, commitTs uint64) error {
	req := &ThemisBatchCommitSecondaryRequest{}

	i := 0
	var lastRow []byte
	req.ThemisCommit = make([]*ThemisCommit, len(rowMs))
	for row, rowM := range rowMs {
		var cells []*proto.Cell
		for col, m := range rowM.mutations {
			cells = append(cells, toCellFromRowM(col, m))
		}

		req.ThemisCommit[i] = &ThemisCommit{
			Row:          []byte(row),
			Mutations:    cells,
			PrewriteTs:   pb.Uint64(prewriteTs),
			CommitTs:     pb.Uint64(commitTs),
			PrimaryIndex: pb.Int(-1),
		}
		i++
		lastRow = []byte(row)
	}

	var res ThemisBatchCommitSecondaryResponse
	err := rpc.call("batchCommitSecondaryRows", tbl, lastRow, req, &res)
	if err != nil {
		return errors.Trace(err)
	}
	log.Info("call batch commit secondary rows", len(req.ThemisCommit))

	cResult := res.BatchCommitSecondaryResult
	if cResult != nil && len(cResult) > 0 {
		errorInfo := "commit failed, tbl:" + string(tbl)
		for _, r := range cResult {
			errorInfo += (" row:" + string(r.Row))
		}
		return errors.New(fmt.Sprintf("%s, commitTs:%d", errorInfo, commitTs))
	}
	return nil
}
Пример #8
0
func (s *migrationSourceWs) Do() shared.OperationResult {
	<-s.allConnected

	criuType := CRIUType_CRIU_RSYNC.Enum()
	if !s.live {
		criuType = nil
	}

	idmaps := make([]*IDMapType, 0)

	if s.idmapset != nil {
		for _, ctnIdmap := range s.idmapset.Idmap {
			idmap := IDMapType{
				Isuid:    proto.Bool(ctnIdmap.Isuid),
				Isgid:    proto.Bool(ctnIdmap.Isgid),
				Hostid:   proto.Int(ctnIdmap.Hostid),
				Nsid:     proto.Int(ctnIdmap.Nsid),
				Maprange: proto.Int(ctnIdmap.Maprange),
			}

			idmaps = append(idmaps, &idmap)
		}
	}

	header := MigrationHeader{
		Fs:    MigrationFSType_RSYNC.Enum(),
		Criu:  criuType,
		Idmap: idmaps,
	}

	if err := s.send(&header); err != nil {
		s.sendControl(err)
		return shared.OperationError(err)
	}

	if err := s.recv(&header); err != nil {
		s.sendControl(err)
		return shared.OperationError(err)
	}

	if *header.Fs != MigrationFSType_RSYNC {
		err := fmt.Errorf("Formats other than rsync not understood")
		s.sendControl(err)
		return shared.OperationError(err)
	}

	if s.live {
		if header.Criu == nil {
			err := fmt.Errorf("Got no CRIU socket type for live migration")
			s.sendControl(err)
			return shared.OperationError(err)
		} else if *header.Criu != CRIUType_CRIU_RSYNC {
			err := fmt.Errorf("Formats other than criu rsync not understood")
			s.sendControl(err)
			return shared.OperationError(err)
		}

		checkpointDir, err := ioutil.TempDir("", "lxd_migration_")
		if err != nil {
			s.sendControl(err)
			return shared.OperationError(err)
		}
		defer os.RemoveAll(checkpointDir)

		opts := lxc.CheckpointOptions{Stop: true, Directory: checkpointDir, Verbose: true}
		err = s.container.Checkpoint(opts)

		if err2 := CollectCRIULogFile(s.container, checkpointDir, "migration", "dump"); err2 != nil {
			shared.Debugf("Error collecting checkpoint log file %s", err)
		}

		if err != nil {
			log := GetCRIULogErrors(checkpointDir, "dump")

			err = fmt.Errorf("checkpoint failed:\n%s", log)
			s.sendControl(err)
			return shared.OperationError(err)
		}

		/*
		 * We do the serially right now, but there's really no reason for us
		 * to; since we have separate websockets, we can do it in parallel if
		 * we wanted to. However, assuming we're network bound, there's really
		 * no reason to do these in parallel. In the future when we're using
		 * p.haul's protocol, it will make sense to do these in parallel.
		 */
		if err := RsyncSend(shared.AddSlash(checkpointDir), s.criuConn); err != nil {
			s.sendControl(err)
			return shared.OperationError(err)
		}
	}

	fsDir := s.container.ConfigItem("lxc.rootfs")[0]
	if err := RsyncSend(shared.AddSlash(fsDir), s.fsConn); err != nil {
		s.sendControl(err)
		return shared.OperationError(err)
	}

	msg := MigrationControl{}
	if err := s.recv(&msg); err != nil {
		s.disconnect()
		return shared.OperationError(err)
	}

	// TODO: should we add some config here about automatically restarting
	// the container migrate failure? What about the failures above?
	if !*msg.Success {
		return shared.OperationError(fmt.Errorf(*msg.Message))
	}

	return shared.OperationSuccess
}
Пример #9
0
func (self *specValidator) validateStep(step *parser.Step) *stepValidationError {
	message := &gauge_messages.Message{MessageType: gauge_messages.Message_StepValidateRequest.Enum(),
		StepValidateRequest: &gauge_messages.StepValidateRequest{StepText: proto.String(step.Value), NumberOfParameters: proto.Int(len(step.Args))}}
	response, err := conn.GetResponseForMessageWithTimeout(message, self.runner.Connection, config.RunnerRequestTimeout())
	if err != nil {
		return &stepValidationError{step: step, message: err.Error(), fileName: self.specification.FileName}
	}
	if response.GetMessageType() == gauge_messages.Message_StepValidateResponse {
		validateResponse := response.GetStepValidateResponse()
		if !validateResponse.GetIsValid() {
			return &stepValidationError{step: step, fileName: self.specification.FileName, errorType: validateResponse.ErrorType, message: *validateResponse.ErrorMessage}
		}
		return nil
	} else {
		return &stepValidationError{step: step, fileName: self.specification.FileName, errorType: &invalidResponse, message: "Invalid response from runner for Validation request"}
	}
}
Пример #10
0
func (s *migrationSourceWs) Do(migrateOp *operation) error {
	<-s.allConnected

	criuType := CRIUType_CRIU_RSYNC.Enum()
	if !s.live {
		criuType = nil

		err := s.container.StorageStart()
		if err != nil {
			return err
		}

		defer s.container.StorageStop()
	}

	idmaps := make([]*IDMapType, 0)

	idmapset := s.container.IdmapSet()
	if idmapset != nil {
		for _, ctnIdmap := range idmapset.Idmap {
			idmap := IDMapType{
				Isuid:    proto.Bool(ctnIdmap.Isuid),
				Isgid:    proto.Bool(ctnIdmap.Isgid),
				Hostid:   proto.Int(ctnIdmap.Hostid),
				Nsid:     proto.Int(ctnIdmap.Nsid),
				Maprange: proto.Int(ctnIdmap.Maprange),
			}

			idmaps = append(idmaps, &idmap)
		}
	}

	driver, fsErr := s.container.Storage().MigrationSource(s.container)
	/* the protocol says we have to send a header no matter what, so let's
	 * do that, but then immediately send an error.
	 */
	snapshots := []*Snapshot{}
	snapshotNames := []string{}
	if fsErr == nil {
		fullSnaps := driver.Snapshots()
		for _, snap := range fullSnaps {
			snapshots = append(snapshots, snapshotToProtobuf(snap))
			snapshotNames = append(snapshotNames, shared.ExtractSnapshotName(snap.Name()))
		}
	}

	myType := s.container.Storage().MigrationType()
	header := MigrationHeader{
		Fs:            &myType,
		Criu:          criuType,
		Idmap:         idmaps,
		SnapshotNames: snapshotNames,
		Snapshots:     snapshots,
	}

	if err := s.send(&header); err != nil {
		s.sendControl(err)
		return err
	}

	if fsErr != nil {
		s.sendControl(fsErr)
		return fsErr
	}

	if err := s.recv(&header); err != nil {
		s.sendControl(err)
		return err
	}

	if *header.Fs != myType {
		myType = MigrationFSType_RSYNC
		header.Fs = &myType

		driver, _ = rsyncMigrationSource(s.container)
	}

	// All failure paths need to do a few things to correctly handle errors before returning.
	// Unfortunately, handling errors is not well-suited to defer as the code depends on the
	// status of driver and the error value.  The error value is especially tricky due to the
	// common case of creating a new err variable (intentional or not) due to scoping and use
	// of ":=".  Capturing err in a closure for use in defer would be fragile, which defeats
	// the purpose of using defer.  An abort function reduces the odds of mishandling errors
	// without introducing the fragility of closing on err.
	abort := func(err error) error {
		driver.Cleanup()
		s.sendControl(err)
		return err
	}

	if err := driver.SendWhileRunning(s.fsConn, migrateOp); err != nil {
		return abort(err)
	}

	restoreSuccess := make(chan bool, 1)
	dumpSuccess := make(chan error, 1)
	if s.live {
		if header.Criu == nil {
			return abort(fmt.Errorf("Got no CRIU socket type for live migration"))
		} else if *header.Criu != CRIUType_CRIU_RSYNC {
			return abort(fmt.Errorf("Formats other than criu rsync not understood"))
		}

		checkpointDir, err := ioutil.TempDir("", "lxd_checkpoint_")
		if err != nil {
			return abort(err)
		}

		if lxc.VersionAtLeast(2, 0, 4) {
			/* What happens below is slightly convoluted. Due to various
			 * complications with networking, there's no easy way for criu
			 * to exit and leave the container in a frozen state for us to
			 * somehow resume later.
			 *
			 * Instead, we use what criu calls an "action-script", which is
			 * basically a callback that lets us know when the dump is
			 * done. (Unfortunately, we can't pass arguments, just an
			 * executable path, so we write a custom action script with the
			 * real command we want to run.)
			 *
			 * This script then hangs until the migration operation either
			 * finishes successfully or fails, and exits 1 or 0, which
			 * causes criu to either leave the container running or kill it
			 * as we asked.
			 */
			dumpDone := make(chan bool, 1)
			actionScriptOpSecret, err := shared.RandomCryptoString()
			if err != nil {
				os.RemoveAll(checkpointDir)
				return abort(err)
			}

			actionScriptOp, err := operationCreate(
				operationClassWebsocket,
				nil,
				nil,
				func(op *operation) error {
					result := <-restoreSuccess
					if !result {
						return fmt.Errorf("restore failed, failing CRIU")
					}
					return nil
				},
				nil,
				func(op *operation, r *http.Request, w http.ResponseWriter) error {
					secret := r.FormValue("secret")
					if secret == "" {
						return fmt.Errorf("missing secret")
					}

					if secret != actionScriptOpSecret {
						return os.ErrPermission
					}

					c, err := shared.WebsocketUpgrader.Upgrade(w, r, nil)
					if err != nil {
						return err
					}

					dumpDone <- true

					closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")
					return c.WriteMessage(websocket.CloseMessage, closeMsg)
				},
			)
			if err != nil {
				os.RemoveAll(checkpointDir)
				return abort(err)
			}

			if err := writeActionScript(checkpointDir, actionScriptOp.url, actionScriptOpSecret); err != nil {
				os.RemoveAll(checkpointDir)
				return abort(err)
			}

			_, err = actionScriptOp.Run()
			if err != nil {
				os.RemoveAll(checkpointDir)
				return abort(err)
			}

			go func() {
				dumpSuccess <- s.container.Migrate(lxc.MIGRATE_DUMP, checkpointDir, "migration", true, true)
				os.RemoveAll(checkpointDir)
			}()

			select {
			/* the checkpoint failed, let's just abort */
			case err = <-dumpSuccess:
				return abort(err)
			/* the dump finished, let's continue on to the restore */
			case <-dumpDone:
				shared.LogDebugf("Dump finished, continuing with restore...")
			}
		} else {
			defer os.RemoveAll(checkpointDir)
			if err := s.container.Migrate(lxc.MIGRATE_DUMP, checkpointDir, "migration", true, false); err != nil {
				return abort(err)
			}
		}

		/*
		 * We do the serially right now, but there's really no reason for us
		 * to; since we have separate websockets, we can do it in parallel if
		 * we wanted to. However, assuming we're network bound, there's really
		 * no reason to do these in parallel. In the future when we're using
		 * p.haul's protocol, it will make sense to do these in parallel.
		 */
		if err := RsyncSend(shared.AddSlash(checkpointDir), s.criuConn, nil); err != nil {
			return abort(err)
		}

		if err := driver.SendAfterCheckpoint(s.fsConn); err != nil {
			return abort(err)
		}
	}

	driver.Cleanup()

	msg := MigrationControl{}
	if err := s.recv(&msg); err != nil {
		s.disconnect()
		return err
	}

	if s.live {
		restoreSuccess <- *msg.Success
		err := <-dumpSuccess
		if err != nil {
			shared.LogErrorf("dump failed after successful restore?: %q", err)
		}
	}

	if !*msg.Success {
		return fmt.Errorf(*msg.Message)
	}

	return nil
}
Пример #11
0
func (s *migrationSourceWs) Do(op *operation) error {
	<-s.allConnected

	criuType := CRIUType_CRIU_RSYNC.Enum()
	if !s.live {
		criuType = nil

		err := s.container.StorageStart()
		if err != nil {
			return err
		}

		defer s.container.StorageStop()
	}

	idmaps := make([]*IDMapType, 0)

	idmapset := s.container.IdmapSet()
	if idmapset != nil {
		for _, ctnIdmap := range idmapset.Idmap {
			idmap := IDMapType{
				Isuid:    proto.Bool(ctnIdmap.Isuid),
				Isgid:    proto.Bool(ctnIdmap.Isgid),
				Hostid:   proto.Int(ctnIdmap.Hostid),
				Nsid:     proto.Int(ctnIdmap.Nsid),
				Maprange: proto.Int(ctnIdmap.Maprange),
			}

			idmaps = append(idmaps, &idmap)
		}
	}

	driver, fsErr := s.container.Storage().MigrationSource(s.container)
	/* the protocol says we have to send a header no matter what, so let's
	 * do that, but then immediately send an error.
	 */
	snapshots := []string{}
	if fsErr == nil {
		fullSnaps := driver.Snapshots()
		for _, snap := range fullSnaps {
			snapshots = append(snapshots, shared.ExtractSnapshotName(snap.Name()))
		}
	}

	myType := s.container.Storage().MigrationType()
	header := MigrationHeader{
		Fs:        &myType,
		Criu:      criuType,
		Idmap:     idmaps,
		Snapshots: snapshots,
	}

	if err := s.send(&header); err != nil {
		s.sendControl(err)
		return err
	}

	if fsErr != nil {
		s.sendControl(fsErr)
		return fsErr
	}

	if err := s.recv(&header); err != nil {
		s.sendControl(err)
		return err
	}

	if *header.Fs != myType {
		myType = MigrationFSType_RSYNC
		header.Fs = &myType

		driver, _ = rsyncMigrationSource(s.container)
	}

	defer driver.Cleanup()

	if err := driver.SendWhileRunning(s.fsConn); err != nil {
		s.sendControl(err)
		return err
	}

	if s.live {
		if header.Criu == nil {
			err := fmt.Errorf("Got no CRIU socket type for live migration")
			s.sendControl(err)
			return err
		} else if *header.Criu != CRIUType_CRIU_RSYNC {
			err := fmt.Errorf("Formats other than criu rsync not understood")
			s.sendControl(err)
			return err
		}

		checkpointDir, err := ioutil.TempDir("", "lxd_checkpoint_")
		if err != nil {
			s.sendControl(err)
			return err
		}
		defer os.RemoveAll(checkpointDir)

		opts := lxc.CheckpointOptions{Stop: true, Directory: checkpointDir, Verbose: true}
		err = s.container.Checkpoint(opts)

		if err2 := CollectCRIULogFile(s.container, checkpointDir, "migration", "dump"); err2 != nil {
			shared.Debugf("Error collecting checkpoint log file %s", err)
		}

		if err != nil {
			log, err2 := GetCRIULogErrors(checkpointDir, "dump")

			/* couldn't find the CRIU log file which means we
			 * didn't even get that far; give back the liblxc
			 * error. */
			if err2 != nil {
				log = err.Error()
			}

			err = fmt.Errorf("checkpoint failed:\n%s", log)
			s.sendControl(err)
			return err
		}

		/*
		 * We do the serially right now, but there's really no reason for us
		 * to; since we have separate websockets, we can do it in parallel if
		 * we wanted to. However, assuming we're network bound, there's really
		 * no reason to do these in parallel. In the future when we're using
		 * p.haul's protocol, it will make sense to do these in parallel.
		 */
		if err := RsyncSend(shared.AddSlash(checkpointDir), s.criuConn); err != nil {
			s.sendControl(err)
			return err
		}

		if err := driver.SendAfterCheckpoint(s.fsConn); err != nil {
			s.sendControl(err)
			return err
		}
	}

	msg := MigrationControl{}
	if err := s.recv(&msg); err != nil {
		s.disconnect()
		return err
	}

	// TODO: should we add some config here about automatically restarting
	// the container migrate failure? What about the failures above?
	if !*msg.Success {
		return fmt.Errorf(*msg.Message)
	}

	return nil
}
Пример #12
0
// Analyze runs android lint on all Android projects that are implicitly
// referenced by the list of files on the depot path. In case of an error, it
// returns partial results.
func (ala Analyzer) Analyze(ctx *ctxpb.ShipshapeContext) ([]*notepb.Note, error) {
	var notes []*notepb.Note

	// Get the list of Android Projects
	projects := getAndroidProjects(ctx.FilePath)

	for prj := range projects {
		tempReport, err := ioutil.TempFile("", report)
		if err != nil {
			return notes, fmt.Errorf("Could not create temp report file %s: %v", report, err)
		}
		defer os.Remove(tempReport.Name())

		// TODO(ciera): Add project options (--classpath) when we have build information.
		// TODO(clconway): The path to the binary should be configurable, especially since
		// the "lint" command name is overloaded.
		cmd := exec.Command(lintBin,
			"--showall",
			"--quiet",
			"--exitcode",
			"--xml", tempReport.Name(),
			prj)
		out, err := cmd.CombinedOutput()

		log.Printf("lint output is %q", out)

		switch err := err.(type) {
		case nil:
			// no issues. Do nothing.
		case *exec.ExitError:
			if err.Error() != exitStatus {
				return notes, fmt.Errorf("Unexpected error code from android lint: %v", err)
			}

			// Get the results from xml
			data, xmlErr := ioutil.ReadFile(tempReport.Name())
			if xmlErr != nil {
				return notes, fmt.Errorf("could not read %s : %v", tempReport.Name(), xmlErr)
			}

			var issues IssuesList
			xmlErr = xml.Unmarshal(data, &issues)
			if xmlErr != nil {
				return notes, fmt.Errorf("could not unmarshal XML from %s: %v", tempReport.Name(), xmlErr)
			}

			// Create a bunch of notes to return, one per line from the output.
			for _, issue := range issues.Issues {
				notes = append(notes, &notepb.Note{
					Category:    proto.String(ala.Category()),
					Subcategory: proto.String(issue.Subcategory),
					Description: proto.String(issue.Message),
					Location: &notepb.Location{
						SourceContext: ctx.SourceContext,
						Path:          proto.String(filepath.Join(prj, issue.Location.File)),
						Range: &rangepb.TextRange{
							StartLine:   proto.Int(issue.Location.Line),
							StartColumn: proto.Int(issue.Location.Column),
						},
					},
				})
			}
		case *exec.Error:
			return notes, err
		default:
			return notes, err
		}
	}

	return notes, nil
}
Пример #13
0
func (specInfoGatherer *SpecInfoGatherer) createConceptInfos() []*gauge_messages.ConceptInfo {
	conceptInfos := make([]*gauge_messages.ConceptInfo, 0)
	for _, concept := range specInfoGatherer.getDictionary().ConceptsMap {
		stepValue := parser.CreateStepValue(concept.ConceptStep)
		conceptInfos = append(conceptInfos, &gauge_messages.ConceptInfo{StepValue: parser.ConvertToProtoStepValue(&stepValue), Filepath: proto.String(concept.FileName), LineNumber: proto.Int(concept.ConceptStep.LineNo)})
	}
	return conceptInfos
}
Пример #14
0
func genField(f *ast.Field) (*pb.FieldDescriptorProto, *pb.DescriptorProto, error) {
	fdp := &pb.FieldDescriptorProto{
		Name:   proto.String(f.Name),
		Number: proto.Int32(int32(f.Tag)),
	}
	switch {
	case f.Required:
		fdp.Label = pb.FieldDescriptorProto_LABEL_REQUIRED.Enum()
	case f.Repeated:
		fdp.Label = pb.FieldDescriptorProto_LABEL_REPEATED.Enum()
	default:
		// default is optional
		fdp.Label = pb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
	}
	if f.KeyTypeName != "" {
		mname := camelCase(f.Name) + "Entry"
		vmsg := &ast.Message{
			Name: mname,
			Fields: []*ast.Field{
				{
					TypeName: f.KeyTypeName,
					Type:     f.KeyType,
					Name:     "key",
					Tag:      1,
				},
				{
					TypeName: f.TypeName,
					Type:     f.Type,
					Name:     "value",
					Tag:      2,
				},
			},
			Up: f.Up,
		}
		vmsg.Fields[0].Up = vmsg
		vmsg.Fields[1].Up = vmsg
		xdp, err := genMessage(vmsg)
		if err != nil {
			return nil, nil, fmt.Errorf("internal error: %v", err)
		}
		xdp.Options = &pb.MessageOptions{
			MapEntry: proto.Bool(true),
		}
		fdp.Type = pb.FieldDescriptorProto_TYPE_MESSAGE.Enum()
		fdp.TypeName = proto.String(qualifiedName(vmsg))
		return fdp, xdp, nil
	}
	switch t := f.Type.(type) {
	case ast.FieldType:
		pt, ok := fieldTypeMap[t]
		if !ok {
			return nil, nil, fmt.Errorf("internal error: no mapping from ast.FieldType %v", t)
		}
		fdp.Type = pt.Enum()
	case *ast.Message:
		if !t.Group {
			fdp.Type = pb.FieldDescriptorProto_TYPE_MESSAGE.Enum()
		} else {
			fdp.Type = pb.FieldDescriptorProto_TYPE_GROUP.Enum()
			// The field name is lowercased by protoc.
			*fdp.Name = strings.ToLower(*fdp.Name)
		}
		fdp.TypeName = proto.String(qualifiedName(t))
	case *ast.Enum:
		fdp.Type = pb.FieldDescriptorProto_TYPE_ENUM.Enum()
		fdp.TypeName = proto.String(qualifiedName(t))
	default:
		return nil, nil, fmt.Errorf("internal error: bad ast.Field.Type type %T", f.Type)
	}
	if ext, ok := f.Up.(*ast.Extension); ok {
		fdp.Extendee = proto.String(qualifiedName(ext.ExtendeeType))
	}
	if f.HasDefault {
		fdp.DefaultValue = proto.String(f.Default)
	}
	if f.Oneof != nil {
		n := 0
		for _, oo := range f.Oneof.Up.Oneofs {
			if oo == f.Oneof {
				break
			}
			n++
		}
		fdp.OneofIndex = proto.Int(n)
	}

	return fdp, nil, nil
}
Пример #15
0
func (s *SpecInfoGatherer) GetConceptInfos() []*gauge_messages.ConceptInfo {
	s.waitGroup.Wait()

	conceptInfos := make([]*gauge_messages.ConceptInfo, 0)
	s.mutex.Lock()
	for _, conceptList := range s.conceptsCache {
		for _, concept := range conceptList {
			stepValue := parser.CreateStepValue(concept.ConceptStep)
			conceptInfos = append(conceptInfos, &gauge_messages.ConceptInfo{StepValue: gauge.ConvertToProtoStepValue(&stepValue), Filepath: proto.String(concept.FileName), LineNumber: proto.Int(concept.ConceptStep.LineNo)})
		}
	}
	s.mutex.Unlock()
	return conceptInfos
}
Пример #16
0
func (s *migrationSourceWs) Do(op *operation) error {
	<-s.allConnected

	criuType := CRIUType_CRIU_RSYNC.Enum()
	if !s.live {
		criuType = nil
		defer s.container.StorageStop()
	}

	idmaps := make([]*IDMapType, 0)

	idmapset := s.container.IdmapSet()
	if idmapset != nil {
		for _, ctnIdmap := range idmapset.Idmap {
			idmap := IDMapType{
				Isuid:    proto.Bool(ctnIdmap.Isuid),
				Isgid:    proto.Bool(ctnIdmap.Isgid),
				Hostid:   proto.Int(ctnIdmap.Hostid),
				Nsid:     proto.Int(ctnIdmap.Nsid),
				Maprange: proto.Int(ctnIdmap.Maprange),
			}

			idmaps = append(idmaps, &idmap)
		}
	}

	sources, fsErr := s.container.Storage().MigrationSource(s.container)
	/* the protocol says we have to send a header no matter what, so let's
	 * do that, but then immediately send an error.
	 */
	snapshots := []string{}
	if fsErr == nil {
		/* A bit of a special case here: doing lxc launch
		 * host2:c1/snap1 host1:container we're sending a snapshot, but
		 * it ends up as the container on the other end. So, we want to
		 * send it as the main container (i.e. ignore its IsSnapshot()).
		 */
		if len(sources) > 1 {
			for _, snap := range sources {
				if !snap.IsSnapshot() {
					continue
				}
				name := shared.ExtractSnapshotName(snap.Name())
				snapshots = append(snapshots, name)
			}
		}
	}

	myType := s.container.Storage().MigrationType()
	header := MigrationHeader{
		Fs:        &myType,
		Criu:      criuType,
		Idmap:     idmaps,
		Snapshots: snapshots,
	}

	if err := s.send(&header); err != nil {
		s.sendControl(err)
		return err
	}

	if fsErr != nil {
		s.sendControl(fsErr)
		return fsErr
	}

	if err := s.recv(&header); err != nil {
		s.sendControl(err)
		return err
	}

	// TODO: actually fall back on rsync.
	if *header.Fs != myType {
		err := fmt.Errorf("mismatched storage types not supported yet")
		s.sendControl(err)
		return err
	}

	if s.live {
		if header.Criu == nil {
			err := fmt.Errorf("Got no CRIU socket type for live migration")
			s.sendControl(err)
			return err
		} else if *header.Criu != CRIUType_CRIU_RSYNC {
			err := fmt.Errorf("Formats other than criu rsync not understood")
			s.sendControl(err)
			return err
		}

		checkpointDir, err := ioutil.TempDir("", "lxd_checkpoint_")
		if err != nil {
			s.sendControl(err)
			return err
		}
		defer os.RemoveAll(checkpointDir)

		opts := lxc.CheckpointOptions{Stop: true, Directory: checkpointDir, Verbose: true}
		err = s.container.Checkpoint(opts)

		if err2 := CollectCRIULogFile(s.container, checkpointDir, "migration", "dump"); err2 != nil {
			shared.Debugf("Error collecting checkpoint log file %s", err)
		}

		if err != nil {
			log := GetCRIULogErrors(checkpointDir, "dump")

			err = fmt.Errorf("checkpoint failed:\n%s", log)
			s.sendControl(err)
			return err
		}

		/*
		 * We do the serially right now, but there's really no reason for us
		 * to; since we have separate websockets, we can do it in parallel if
		 * we wanted to. However, assuming we're network bound, there's really
		 * no reason to do these in parallel. In the future when we're using
		 * p.haul's protocol, it will make sense to do these in parallel.
		 */
		if err := RsyncSend(shared.AddSlash(checkpointDir), s.criuConn); err != nil {
			s.sendControl(err)
			return err
		}
	}

	for _, source := range sources {
		shared.Debugf("sending fs object %s", source.Name())
		if err := source.Send(s.fsConn); err != nil {
			s.sendControl(err)
			return err
		}
	}

	msg := MigrationControl{}
	if err := s.recv(&msg); err != nil {
		s.disconnect()
		return err
	}

	// TODO: should we add some config here about automatically restarting
	// the container migrate failure? What about the failures above?
	if !*msg.Success {
		return fmt.Errorf(*msg.Message)
	}

	return nil
}
Пример #17
0
func newPhoneNumber(cc int, natNum uint64) *PhoneNumber {
	p := &PhoneNumber{}
	p.CountryCode = proto.Int(cc)
	p.NationalNumber = proto.Uint64(natNum)
	return p
}
Пример #18
0
func (self *specValidator) validateStep(step *step) {
	message := &gauge_messages.Message{MessageType: gauge_messages.Message_StepValidateRequest.Enum(),
		StepValidateRequest: &gauge_messages.StepValidateRequest{StepText: proto.String(step.value), NumberOfParameters: proto.Int(len(step.args))}}
	response, err := conn.GetResponseForMessageWithTimeout(message, self.runner.connection, config.RunnerRequestTimeout())
	if err != nil {
		self.stepValidationErrors = append(self.stepValidationErrors, &stepValidationError{step: step, message: err.Error(), fileName: self.specification.fileName})
		return
	}
	if response.GetMessageType() == gauge_messages.Message_StepValidateResponse {
		validateResponse := response.GetStepValidateResponse()
		if !validateResponse.GetIsValid() {
			self.stepValidationErrors = append(self.stepValidationErrors, &stepValidationError{step: step, message: validateResponse.GetErrorMessage(), fileName: self.specification.fileName})
		}
	} else {
		self.stepValidationErrors = append(self.stepValidationErrors, &stepValidationError{step: step, message: "Invalid response from runner for Validation request", fileName: self.specification.fileName})
	}
}
Пример #19
0
func (v *specValidator) validateStep(s *gauge.Step) *StepValidationError {
	m := &gauge_messages.Message{MessageType: gauge_messages.Message_StepValidateRequest.Enum(),
		StepValidateRequest: &gauge_messages.StepValidateRequest{StepText: proto.String(s.Value), NumberOfParameters: proto.Int(len(s.Args))}}
	r, err := conn.GetResponseForMessageWithTimeout(m, v.runner.Connection, config.RunnerRequestTimeout())
	if err != nil {
		return NewValidationError(s, err.Error(), v.specification.FileName, nil)
	}
	if r.GetMessageType() == gauge_messages.Message_StepValidateResponse {
		res := r.GetStepValidateResponse()
		if !res.GetIsValid() {
			msg := getMessage(res.ErrorType.String())
			return NewValidationError(s, msg, v.specification.FileName, res.ErrorType)
		}
		return nil
	}
	return NewValidationError(s, "Invalid response from runner for Validation request", v.specification.FileName, &invalidResponse)
}
Пример #20
0
func (agent *rephraseRefactorer) createParameterPositions(orderMap map[int]int) []*gauge_messages.ParameterPosition {
	paramPositions := make([]*gauge_messages.ParameterPosition, 0)
	for k, v := range orderMap {
		paramPositions = append(paramPositions, &gauge_messages.ParameterPosition{NewPosition: proto.Int(k), OldPosition: proto.Int(v)})
	}
	return paramPositions
}
Пример #21
0
func newNegotiate() *pb.Negotiate {
	return &pb.Negotiate{
		Magic:   proto.String("cluster"),
		Version: proto.Int(1),
	}
}