func (s *fslockSuite) TestMessageAcrossLocks(c *gc.C) { dir := c.MkDir() lock1, err := fslock.NewLock(dir, "testing") c.Assert(err, gc.IsNil) lock2, err := fslock.NewLock(dir, "testing") c.Assert(err, gc.IsNil) err = lock1.Lock("very busy") c.Assert(err, gc.IsNil) c.Assert(lock2.Message(), gc.Equals, "very busy") }
func (s *fslockSuite) TestIsLockHeldTwoLocks(c *gc.C) { dir := c.MkDir() lock1, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) lock2, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock1.Lock("") c.Assert(err, gc.IsNil) c.Assert(lock2.IsLockHeld(), gc.Equals, false) }
func (s *fslockSuite) TestLockWithTimeoutLocked(c *gc.C) { dir := c.MkDir() lock1, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) lock2, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock1.Lock("") c.Assert(err, gc.IsNil) err = lock2.LockWithTimeout(shortWait, "") c.Assert(errors.Cause(err), gc.Equals, fslock.ErrTimeout) }
func (s *fslockSuite) TestIsLocked(c *gc.C) { dir := c.MkDir() lock1, err := fslock.NewLock(dir, "testing") c.Assert(err, gc.IsNil) lock2, err := fslock.NewLock(dir, "testing") c.Assert(err, gc.IsNil) err = lock1.Lock("") c.Assert(err, gc.IsNil) c.Assert(lock1.IsLocked(), gc.Equals, true) c.Assert(lock2.IsLocked(), gc.Equals, true) }
func acquireEnvironmentLock(dir, operation string) (*fslock.Lock, error) { lock, err := fslock.NewLock(dir, lockName, fslock.Defaults()) if err != nil { return nil, errors.Trace(err) } message := fmt.Sprintf("pid: %d, operation: %s", os.Getpid(), operation) err = lock.LockWithTimeout(lockTimeout, message) if err == nil { return lock, nil } if errors.Cause(err) != fslock.ErrTimeout { return nil, errors.Trace(err) } logger.Warningf("breaking configstore lock, lock dir: %s", filepath.Join(dir, lockName)) logger.Warningf(" lock holder message: %s", lock.Message()) // If we are unable to acquire the lock within the lockTimeout, // consider it broken for some reason, and break it. err = lock.BreakLock() if err != nil { return nil, errors.Annotate(err, "unable to break the configstore lock") } err = lock.LockWithTimeout(lockTimeout, message) if err != nil { return nil, errors.Trace(err) } return lock, nil }
func (s startUniter) step(c *gc.C, ctx *context) { if s.unitTag == "" { s.unitTag = "unit-u-0" } if ctx.uniter != nil { panic("don't start two uniters!") } if ctx.api == nil { panic("API connection not established") } tag, err := names.ParseUnitTag(s.unitTag) if err != nil { panic(err.Error()) } locksDir := filepath.Join(ctx.dataDir, "locks") lock, err := fslock.NewLock(locksDir, "uniter-hook-execution") c.Assert(err, jc.ErrorIsNil) uniterParams := uniter.UniterParams{ St: ctx.api, UnitTag: tag, LeadershipManager: ctx.leader, DataDir: ctx.dataDir, HookLock: lock, MetricsTimerChooser: uniter.NewTestingMetricsTimerChooser( ctx.collectMetricsTicker.ReturnTimer, ctx.sendMetricsTicker.ReturnTimer, ), UpdateStatusSignal: ctx.updateStatusHookTicker.ReturnTimer, NewOperationExecutor: s.newExecutorFunc, } ctx.uniter = uniter.NewUniter(&uniterParams) uniter.SetUniterObserver(ctx.uniter, ctx) }
func (s startUniter) step(c *gc.C, ctx *context) { if s.unitTag == "" { s.unitTag = "unit-u-0" } if ctx.uniter != nil { panic("don't start two uniters!") } if ctx.api == nil { panic("API connection not established") } tag, err := names.ParseUnitTag(s.unitTag) if err != nil { panic(err.Error()) } locksDir := filepath.Join(ctx.dataDir, "locks") lock, err := fslock.NewLock(locksDir, "uniter-hook-execution") c.Assert(err, jc.ErrorIsNil) uniterParams := uniter.UniterParams{ ctx.api, tag, ctx.leader, ctx.dataDir, lock, uniter.NewTestingMetricsTimerChooser(ctx.metricsTicker.ReturnTimer), ctx.updateStatusHookTicker.ReturnTimer, } ctx.uniter = uniter.NewUniter(&uniterParams) uniter.SetUniterObserver(ctx.uniter, ctx) }
func (s *fslockSuite) TestTomb(c *gc.C) { const timeToDie = 200 * time.Millisecond die := tomb.Tomb{} dir := c.MkDir() lock, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) // Just use one lock, and try to lock it twice. err = lock.Lock("very busy") c.Assert(err, gc.IsNil) checkTomb := func() error { select { case <-die.Dying(): return tomb.ErrDying default: // no-op to fall through to return. } return nil } go func() { time.Sleep(timeToDie) die.Killf("time to die") }() err = lock.LockWithFunc("won't happen", checkTomb) c.Assert(errors.Cause(err), gc.Equals, tomb.ErrDying) msg, err := lock.Message() c.Assert(err, gc.IsNil) c.Assert(msg, gc.Equals, "very busy") }
func (s *fslockSuite) TestNewLockWithExistingDir(c *gc.C) { dir := c.MkDir() err := os.MkdirAll(dir, 0755) c.Assert(err, gc.IsNil) _, err = fslock.NewLock(dir, "special", s.lockConfig) c.Assert(err, gc.IsNil) }
// ReadInfo implements Storage.ReadInfo. func (d *diskStore) ReadInfo(envName string) (EnvironInfo, error) { // TODO: first try the new format, and if it doesn't exist, read the old format. // NOTE: any reading or writing from the directory should be done with a fslock // to make sure we have a consistent read or write. Also worth noting, we should // use a very short timeout. lock, err := fslock.NewLock(d.dir, lockName) if err != nil { return nil, errors.Trace(err) } err = lock.LockWithTimeout(lockTimeout, "reading") if err != nil { return nil, errors.Annotatef(err, "cannot read info") } defer lock.Unlock() info, err := d.readConnectionFile(envName) if err != nil { if errors.IsNotFound(err) { info, err = d.readJENVFile(envName) } } if err != nil { return nil, errors.Trace(err) } info.environmentDir = d.dir return info, nil }
func (s startUniter) step(c *gc.C, ctx *context) { if s.unitTag == "" { s.unitTag = "unit-u-0" } if ctx.uniter != nil { panic("don't start two uniters!") } if ctx.api == nil { panic("API connection not established") } tag, err := names.ParseUnitTag(s.unitTag) if err != nil { panic(err.Error()) } locksDir := filepath.Join(ctx.dataDir, "locks") lock, err := fslock.NewLock(locksDir, "uniter-hook-execution") c.Assert(err, jc.ErrorIsNil) operationExecutor := operation.NewExecutor if s.newExecutorFunc != nil { operationExecutor = s.newExecutorFunc } uniterParams := uniter.UniterParams{ UniterFacade: ctx.api, UnitTag: tag, LeadershipTracker: ctx.leaderTracker, CharmDirLocker: ctx.charmDirLocker, DataDir: ctx.dataDir, MachineLock: lock, UpdateStatusSignal: ctx.updateStatusHookTicker.ReturnTimer, NewOperationExecutor: operationExecutor, Observer: ctx, } ctx.uniter = uniter.NewUniter(&uniterParams) }
func (s *ManifoldSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.stub = &testing.Stub{} s.manifoldConfig = meterstatus.ManifoldConfig{ AgentName: "agent-name", APICallerName: "apicaller-name", MachineLockName: "machine-lock-name", NewHookRunner: meterstatus.NewHookRunner, NewMeterStatusAPIClient: msapi.NewClient, NewConnectedStatusWorker: meterstatus.NewConnectedStatusWorker, NewIsolatedStatusWorker: meterstatus.NewIsolatedStatusWorker, } s.manifold = meterstatus.Manifold(s.manifoldConfig) s.dataDir = c.MkDir() locksDir := c.MkDir() lock, err := fslock.NewLock(locksDir, "machine-lock", fslock.Defaults()) c.Assert(err, jc.ErrorIsNil) s.dummyResources = dt.StubResources{ "agent-name": dt.StubResource{Output: &dummyAgent{dataDir: s.dataDir}}, "apicaller-name": dt.StubResource{Output: &dummyAPICaller{}}, "machine-lock-name": dt.StubResource{Output: lock}, } s.getResource = dt.StubGetResource(s.dummyResources) }
func (s *rebootSuite) SetUpTest(c *gc.C) { var err error template := state.MachineTemplate{ Series: coretesting.FakeDefaultSeries, Jobs: []state.MachineJob{state.JobHostUnits}, } s.JujuConnSuite.SetUpTest(c) s.stateAPI, s.machine = s.OpenAPIAsNewMachine(c) s.rebootState, err = s.stateAPI.Reboot() c.Assert(err, jc.ErrorIsNil) c.Assert(s.rebootState, gc.NotNil) //Add container s.ct, err = s.State.AddMachineInsideMachine(template, s.machine.Id(), instance.KVM) c.Assert(err, jc.ErrorIsNil) password, err := utils.RandomPassword() c.Assert(err, jc.ErrorIsNil) err = s.ct.SetPassword(password) c.Assert(err, jc.ErrorIsNil) err = s.ct.SetProvisioned("foo", "fake_nonce", nil) c.Assert(err, jc.ErrorIsNil) // Open api as container ctState := s.OpenAPIAsMachine(c, s.ct.Tag(), password, "fake_nonce") s.ctRebootState, err = ctState.Reboot() c.Assert(err, jc.ErrorIsNil) c.Assert(s.ctRebootState, gc.NotNil) lock, err := fslock.NewLock(c.MkDir(), "fake", fslock.Defaults()) c.Assert(err, jc.ErrorIsNil) s.lock = lock }
// loadImage loads an os image from the blobstore, // downloading and caching it if necessary. func (h *imagesDownloadHandler) loadImage(st *state.State, envuuid, kind, series, arch string) ( *imagestorage.Metadata, io.ReadCloser, error, ) { // We want to ensure that if an image needs to be downloaded and cached, // this only happens once. imageIdent := fmt.Sprintf("image-%s-%s-%s-%s", envuuid, kind, series, arch) lockDir := filepath.Join(h.dataDir, "locks") lock, err := fslock.NewLock(lockDir, imageIdent, fslock.Defaults()) if err != nil { return nil, nil, errors.Trace(err) } lock.Lock("fetch and cache image " + imageIdent) defer lock.Unlock() storage := st.ImageStorage() metadata, imageReader, err := storage.Image(kind, series, arch) // Not in storage, so go fetch it. if errors.IsNotFound(err) { if err := h.fetchAndCacheLxcImage(storage, envuuid, series, arch); err != nil { return nil, nil, errors.Annotate(err, "error fetching and caching image") } err = networkOperationWitDefaultRetries(func() error { metadata, imageReader, err = storage.Image(string(instance.LXC), series, arch) return err }, "streaming os image from blobstore")() } if err != nil { return nil, nil, errors.Trace(err) } return metadata, imageReader, nil }
func (s *store) lock(operation string) (*fslock.Lock, error) { lockName := "controllers.lock" lock, err := fslock.NewLock(osenv.JujuXDGDataHome(), lockName, fslock.Defaults()) if err != nil { return nil, errors.Trace(err) } message := fmt.Sprintf("pid: %d, operation: %s", os.Getpid(), operation) err = lock.LockWithTimeout(lockTimeout, message) if err == nil { return lock, nil } if errors.Cause(err) != fslock.ErrTimeout { return nil, errors.Trace(err) } logger.Warningf("breaking jujuclient lock : %s", lockName) logger.Warningf(" lock holder message: %s", lock.Message()) // If we are unable to acquire the lock within the lockTimeout, // consider it broken for some reason, and break it. err = lock.BreakLock() if err != nil { return nil, errors.Annotatef(err, "unable to break the jujuclient lock %v", lockName) } err = lock.LockWithTimeout(lockTimeout, message) if err != nil { return nil, errors.Trace(err) } return lock, nil }
func (s *fslockSuite) TestUnlock(c *gc.C) { dir := c.MkDir() lock, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock.Unlock() c.Assert(err, gc.Equals, fslock.ErrLockNotHeld) }
func (s *fslockSuite) TestLockWithTimeoutUnlocked(c *gc.C) { dir := c.MkDir() lock, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock.LockWithTimeout(shortWait, "") c.Assert(err, gc.IsNil) }
func newLockedLock(c *gc.C, cfg fslock.LockConfig) (lock *fslock.Lock, lockFile, aliveFile string) { dir := c.MkDir() lock, err := fslock.NewLock(dir, "testing", cfg) c.Assert(err, gc.IsNil) assertCanLock(c, lock) lockFile = path.Join(dir, "testing", "held") return lock, lockFile, dir }
func (s *fslockSuite) TestNewLockWithExistingFileInPlace(c *gc.C) { dir := c.MkDir() err := os.MkdirAll(dir, 0755) c.Assert(err, gc.IsNil) path := path.Join(dir, "locks") err = ioutil.WriteFile(path, []byte("foo"), 0644) c.Assert(err, gc.IsNil) _, err = fslock.NewLock(path, "special", s.lockConfig) c.Assert(err, gc.ErrorMatches, utils.MkdirFailErrRegexp) }
// NewJSONMetricsRecorder creates a new JSON metrics recorder. // It checks if the metrics spool directory exists, if it does not - it is created. Then // it tries to find an unused metric batch UUID 3 times. func NewJSONMetricsRecorder(spoolDir string, charmURL string) (rec *JSONMetricsRecorder, rErr error) { lock, err := fslock.NewLock(spoolDir, spoolLockName) if err != nil { return nil, errors.Trace(err) } if err := lock.LockWithTimeout(lockTimeout, "initializing recorder"); err != nil { return nil, errors.Trace(err) } defer func() { err := lock.Unlock() if err != nil && rErr == nil { rErr = errors.Trace(err) rec = nil } else if err != nil { rErr = errors.Annotatef(err, "failed to unlock spool directory %q", spoolDir) } }() if err := checkSpoolDir(spoolDir); err != nil { return nil, errors.Trace(err) } mbUUID, err := utils.NewUUID() if err != nil { return nil, errors.Trace(err) } metaFile := filepath.Join(spoolDir, fmt.Sprintf("%s.meta", mbUUID.String())) dataFile := filepath.Join(spoolDir, mbUUID.String()) if _, err := os.Stat(metaFile); !os.IsNotExist(err) { if err != nil { return nil, errors.Annotatef(err, "failed to stat file %s", metaFile) } return nil, errors.Errorf("file %s already exists", metaFile) } if _, err := os.Stat(dataFile); err != nil && !os.IsNotExist(err) { if err != nil { return nil, errors.Annotatef(err, "failed to stat file %s", dataFile) } return nil, errors.Errorf("file %s already exists", dataFile) } if err := recordMetaData(metaFile, charmURL, mbUUID.String()); err != nil { return nil, errors.Trace(err) } recorder := &JSONMetricsRecorder{ path: dataFile, } if err := recorder.open(); err != nil { return nil, errors.Trace(err) } return recorder, nil }
// This test also happens to test that locks can get created when the parent // lock directory doesn't exist. func (s *fslockSuite) TestValidNamesLockDir(c *gc.C) { for _, name := range []string{ "a", "longer", "longer-with.special-characters", } { dir := c.MkDir() _, err := fslock.NewLock(dir, name, s.lockConfig) c.Assert(err, gc.IsNil) } }
// NewJSONMetricsReader creates a new JSON metrics reader for the specified spool directory. func NewJSONMetricsReader(spoolDir string) (*JSONMetricsReader, error) { if _, err := os.Stat(spoolDir); err != nil { return nil, errors.Annotatef(err, "failed to open spool directory %q", spoolDir) } lock, err := fslock.NewLock(spoolDir, spoolLockName) if err != nil { return nil, errors.Trace(err) } return &JSONMetricsReader{ lock: lock, dir: spoolDir, }, nil }
func (s *fslockSuite) TestBreakLock(c *gc.C) { dir := c.MkDir() lock1, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) lock2, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock1.Lock("") c.Assert(err, gc.IsNil) err = lock2.BreakLock() c.Assert(err, gc.IsNil) c.Assert(lock2.IsLocked(), gc.Equals, false) // Normally locks are broken due to client crashes, not duration. err = lock1.Unlock() c.Assert(err, gc.Equals, fslock.ErrLockNotHeld) // Breaking a non-existant isn't an error err = lock2.BreakLock() c.Assert(err, gc.IsNil) }
func (s *fslockSuite) TestLockBlocks(c *gc.C) { dir := c.MkDir() lock1, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) lock2, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) acquired := make(chan struct{}) err = lock1.Lock("") c.Assert(err, gc.IsNil) go func() { lock2.Lock("") acquired <- struct{}{} close(acquired) }() // Waiting for something not to happen is inherently hard... select { case <-acquired: c.Fatalf("Unexpected lock acquisition") case <-time.After(shortWait): // all good } err = lock1.Unlock() c.Assert(err, gc.IsNil) select { case <-acquired: // all good case <-time.After(longWait): c.Fatalf("Expected lock acquisition") } c.Assert(lock2.IsLockHeld(), gc.Equals, true) }
func (s *fslockSuite) TestIsLockHeldBasics(c *gc.C) { dir := c.MkDir() lock, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) c.Assert(lock.IsLockHeld(), gc.Equals, false) err = lock.Lock("") c.Assert(err, gc.IsNil) c.Assert(lock.IsLockHeld(), gc.Equals, true) err = lock.Unlock() c.Assert(err, gc.IsNil) c.Assert(lock.IsLockHeld(), gc.Equals, false) }
func acquireEnvironmentLock(dir, operation string) (*fslock.Lock, error) { lock, err := fslock.NewLock(dir, lockName) if err != nil { return nil, errors.Trace(err) } message := fmt.Sprintf("pid: %d, operation: %s", os.Getpid(), operation) err = lock.LockWithTimeout(lockTimeout, message) if err != nil { logger.Warningf("configstore lock held, lock dir: %s", filepath.Join(dir, lockName)) logger.Warningf(" lock holder message: %s", lock.Message()) return nil, errors.Trace(err) } return lock, nil }
func acquireEnvironmentLock(operation string) (*fslock.Lock, error) { // NOTE: any reading or writing from the directory should be done with a // fslock to make sure we have a consistent read or write. Also worth // noting, we should use a very short timeout. lock, err := fslock.NewLock(osenv.JujuHome(), lockName) if err != nil { return nil, errors.Trace(err) } err = lock.LockWithTimeout(lockTimeout, operation) if err != nil { return nil, errors.Trace(err) } return lock, nil }
func AcquireTemplateLock(name, message string) (*fslock.Lock, error) { logger.Infof("wait for fslock on %v", name) lock, err := fslock.NewLock(TemplateLockDir, name) if err != nil { logger.Tracef("failed to create fslock for template: %v", err) return nil, err } err = lock.Lock(message) if err != nil { logger.Tracef("failed to acquire lock for template: %v", err) return nil, err } return lock, nil }
func (s *ContainerSetupSuite) SetUpTest(c *gc.C) { s.CommonProvisionerSuite.SetUpTest(c) aptCmdChan := s.HookCommandOutput(&apt.CommandOutput, []byte{}, nil) s.aptCmdChan = aptCmdChan // Set up provisioner for the state machine. s.agentConfig = s.AgentConfigForTag(c, names.NewMachineTag("0")) s.p = provisioner.NewEnvironProvisioner(s.provisioner, s.agentConfig) // Create a new container initialisation lock. s.initLockDir = c.MkDir() initLock, err := fslock.NewLock(s.initLockDir, "container-init") c.Assert(err, gc.IsNil) s.initLock = initLock }
func (s *fslockSuite) TestMessage(c *gc.C) { dir := c.MkDir() lock, err := fslock.NewLock(dir, "testing") c.Assert(err, gc.IsNil) c.Assert(lock.Message(), gc.Equals, "") err = lock.Lock("my message") c.Assert(err, gc.IsNil) c.Assert(lock.Message(), gc.Equals, "my message") // Unlocking removes the message. err = lock.Unlock() c.Assert(err, gc.IsNil) c.Assert(lock.Message(), gc.Equals, "") }