示例#1
0
文件: worker_test.go 项目: bac/juju
func (s *workerSuite) TestLogSending(c *gc.C) {
	const logCount = 5
	logsCh := make(chan *logsender.LogRecord, logCount)

	// Start the logsender worker.
	worker := logsender.New(logsCh, s.logSenderAPI())
	defer func() {
		worker.Kill()
		c.Check(worker.Wait(), jc.ErrorIsNil)
	}()

	// Send some logs, also building up what should appear in the
	// database.
	var expectedDocs []bson.M
	for i := 0; i < logCount; i++ {
		ts := time.Now()
		location := fmt.Sprintf("loc%d", i)
		message := fmt.Sprintf("%d", i)

		logsCh <- &logsender.LogRecord{
			Time:     ts,
			Module:   "logsender-test",
			Location: location,
			Level:    loggo.INFO,
			Message:  message,
		}

		expectedDocs = append(expectedDocs, bson.M{
			"t": ts.UnixNano(),
			"e": s.State.ModelUUID(),
			"r": version.Current.String(),
			"n": s.machineTag.String(),
			"m": "logsender-test",
			"l": location,
			"v": int(loggo.INFO),
			"x": message,
		})
	}

	// Wait for the logs to appear in the database.
	var docs []bson.M
	logsColl := s.State.MongoSession().DB("logs").C("logs")
	for a := testing.LongAttempt.Start(); a.Next(); {
		err := logsColl.Find(bson.M{"m": "logsender-test"}).All(&docs)
		c.Assert(err, jc.ErrorIsNil)
		if len(docs) == logCount {
			break
		}
	}

	// Check that the logs are correct.
	c.Assert(docs, gc.HasLen, logCount)
	for i := 0; i < logCount; i++ {
		doc := docs[i]
		delete(doc, "_id")
		c.Assert(doc, gc.DeepEquals, expectedDocs[i])
	}
}
示例#2
0
func (s *workerSuite) TestLockedGate(c *gc.C) {

	// Set a bad password to induce an error if we connect.
	s.apiInfo.Password = "******"

	// Run a logsender worker.
	logsCh := make(chan *logsender.LogRecord)
	worker := logsender.New(logsCh, lockedGate{}, s.agent())

	// At the end of the test, make sure we never tried to connect.
	defer func() {
		worker.Kill()
		c.Check(worker.Wait(), jc.ErrorIsNil)
	}()

	// Give it a chance to ignore the gate and read the log channel.
	select {
	case <-time.After(testing.ShortWait):
	case logsCh <- &logsender.LogRecord{}:
		c.Fatalf("read log channel without waiting for gate")
	}
}
示例#3
0
func (s *workerSuite) TestDroppedLogs(c *gc.C) {
	logsCh := make(logsender.LogRecordCh)

	// Start the logsender worker.
	worker := logsender.New(logsCh, s.logSenderAPI())
	defer func() {
		worker.Kill()
		c.Check(worker.Wait(), jc.ErrorIsNil)
	}()

	// Send a log record which indicates some messages after it were
	// dropped.
	ts := time.Now().Truncate(time.Millisecond)
	logsCh <- &logsender.LogRecord{
		Time:         ts,
		Module:       "aaa",
		Location:     "loc",
		Level:        loggo.INFO,
		Message:      "message0",
		DroppedAfter: 42,
	}

	// Send another log record with no drops indicated.
	logsCh <- &logsender.LogRecord{
		Time:     time.Now(),
		Module:   "zzz",
		Location: "loc",
		Level:    loggo.INFO,
		Message:  "message1",
	}

	// Wait for the logs to appear in the database.
	var docs []bson.M
	logsColl := s.State.MongoSession().DB("logs").C("logs")
	for a := testing.LongAttempt.Start(); a.Next(); {
		if !a.HasNext() {
			c.Fatal("timed out waiting for logs")
		}
		err := logsColl.Find(nil).Sort("m").All(&docs)
		c.Assert(err, jc.ErrorIsNil)
		// Expect the 2 messages sent along with a message about
		// dropped messages.
		if len(docs) == 3 {
			break
		}
	}

	// Check that the log records sent are present as well as an additional
	// message in between indicating that some messages were dropped.
	c.Assert(docs[0]["x"], gc.Equals, "message0")
	delete(docs[1], "_id")
	c.Assert(docs[1], gc.DeepEquals, bson.M{
		"t": ts, // Should share timestamp with previous message.
		"e": s.State.EnvironUUID(),
		"n": s.machineTag.String(),
		"m": "juju.worker.logsender",
		"l": "",
		"v": int(loggo.WARNING),
		"x": "42 log messages dropped due to lack of API connectivity",
	})
	c.Assert(docs[2]["x"], gc.Equals, "message1")
}
示例#4
0
文件: unit.go 项目: frankban/juju-tmp
func (a *UnitAgent) APIWorkers() (_ worker.Worker, err error) {
	agentConfig := a.CurrentConfig()
	dataDir := agentConfig.DataDir()
	hookLock, err := cmdutil.HookExecutionLock(dataDir)
	if err != nil {
		return nil, err
	}
	st, entity, err := OpenAPIState(agentConfig, a)
	if err != nil {
		return nil, err
	}
	unitTag, err := names.ParseUnitTag(entity.Tag())
	if err != nil {
		return nil, errors.Trace(err)
	}
	// Ensure that the environment uuid is stored in the agent config.
	// Luckily the API has it recorded for us after we connect.
	if agentConfig.Environment().Id() == "" {
		err := a.ChangeConfig(func(setter agent.ConfigSetter) error {
			environTag, err := st.EnvironTag()
			if err != nil {
				return errors.Annotate(err, "no environment uuid set on api")
			}

			return setter.Migrate(agent.MigrateParams{
				Environment: environTag,
			})
		})
		if err != nil {
			logger.Warningf("unable to save environment uuid: %v", err)
			// Not really fatal, just annoying.
		}
	}

	defer func() {
		if err != nil {
			st.Close()
			reportClosedUnitAPI(st)
		}
	}()

	// Before starting any workers, ensure we record the Juju version this unit
	// agent is running.
	currentTools := &tools.Tools{Version: version.Current}
	apiStateUpgrader := a.getUpgrader(st)
	if err := apiStateUpgrader.SetVersion(agentConfig.Tag().String(), currentTools.Version); err != nil {
		return nil, errors.Annotate(err, "cannot set unit agent version")
	}

	runner := worker.NewRunner(cmdutil.ConnectionIsFatal(logger, st), cmdutil.MoreImportant)

	// start proxyupdater first to ensure proxy settings are correct
	runner.StartWorker("proxyupdater", func() (worker.Worker, error) {
		return proxyupdater.New(st.Environment(), false), nil
	})
	if feature.IsDbLogEnabled() {
		runner.StartWorker("logsender", func() (worker.Worker, error) {
			return logsender.New(a.bufferedLogs, agentConfig.APIInfo()), nil
		})
	}
	runner.StartWorker("upgrader", func() (worker.Worker, error) {
		return upgrader.NewAgentUpgrader(
			st.Upgrader(),
			agentConfig,
			agentConfig.UpgradedToVersion(),
			func() bool { return false },
			a.initialAgentUpgradeCheckComplete,
		), nil
	})
	runner.StartWorker("logger", func() (worker.Worker, error) {
		return workerlogger.NewLogger(st.Logger(), agentConfig), nil
	})
	runner.StartWorker("uniter", func() (worker.Worker, error) {
		uniterFacade, err := st.Uniter()
		if err != nil {
			return nil, errors.Trace(err)
		}
		uniterParams := uniter.UniterParams{
			uniterFacade,
			unitTag,
			leadership.NewClient(st),
			dataDir,
			hookLock,
			uniter.NewMetricsTimerChooser(),
			uniter.NewUpdateStatusTimer(),
			nil,
		}
		return uniter.NewUniter(&uniterParams), nil
	})

	runner.StartWorker("apiaddressupdater", func() (worker.Worker, error) {
		uniterFacade, err := st.Uniter()
		if err != nil {
			return nil, errors.Trace(err)
		}
		return apiaddressupdater.NewAPIAddressUpdater(uniterFacade, a), nil
	})
	if !featureflag.Enabled(feature.DisableRsyslog) {
		runner.StartWorker("rsyslog", func() (worker.Worker, error) {
			return cmdutil.NewRsyslogConfigWorker(st.Rsyslog(), agentConfig, rsyslog.RsyslogModeForwarding)
		})
	}
	return cmdutil.NewCloseWorker(logger, runner, st), nil
}