// getTmpROC returns a removeOnClose instance wrapping a temporary // file provided by the passed store. The actual file name is based on // a hash of the passed path. func getTmpROC(s *imagestore.Store, path string) (*removeOnClose, error) { h := sha512.New() h.Write([]byte(path)) pathHash := s.HashToKey(h) tmp, err := s.TmpNamedFile(pathHash) if err != nil { return nil, errwrap.Wrap(errors.New("error setting up temporary file"), err) } // let's lock the file to avoid concurrent writes to the temporary file, it // will go away when removing the temp file _, err = lock.TryExclusiveLock(tmp.Name(), lock.RegFile) if err != nil { if err != lock.ErrLocked { return nil, errwrap.Wrap(errors.New("failed to lock temporary file"), err) } log.Printf("another rkt instance is downloading this file, waiting...") _, err = lock.ExclusiveLock(tmp.Name(), lock.RegFile) if err != nil { return nil, errwrap.Wrap(errors.New("failed to lock temporary file"), err) } } return &removeOnClose{File: tmp}, nil }
// Do Opens the db, executes DoTx and then Closes the DB // To support a multiprocess and multigoroutine model on a single file access // database like ql there's the need to exlusively lock, open, close, unlock the // db for every transaction. For this reason every db transaction should be // fast to not block other processes/goroutines. func (db *DB) Do(fns ...txfunc) error { l, err := lock.ExclusiveLock(db.dbdir, lock.Dir) if err != nil { return err } defer l.Close() sqldb, err := sql.Open("ql", filepath.Join(db.dbdir, DbFilename)) if err != nil { return err } defer sqldb.Close() tx, err := sqldb.Begin() if err != nil { return err } for _, fn := range fns { if err := fn(tx); err != nil { tx.Rollback() return err } } return tx.Commit() }
// ManifestExclusiveLock gets an exclusive lock on only the pod manifest. // This is used in the app sandbox - since the pod is already running, we // won't be able to get an exclusive lock on the pod itself. func (p *Pod) ExclusiveManifestLock() error { if p.manifestLock != nil { return p.manifestLock.ExclusiveLock() // This is idempotent } l, err := lock.ExclusiveLock(common.PodManifestLockPath(p.Path()), lock.RegFile) if err != nil { return err } p.manifestLock = l return nil }
// ExclusiveLockManifest gets an exclusive lock on only the pod manifest in the app sandbox. // Since the pod is already running, we won't be able to get an exclusive lock on the pod itself. func (p *Pod) ExclusiveLockManifest() error { if !p.isRunning() { return errors.New("pod is not running") } if p.manifestLock != nil { return p.manifestLock.ExclusiveLock() // This is idempotent } l, err := lock.ExclusiveLock(common.PodManifestLockPath(p.Path()), lock.RegFile) if err != nil { return err } p.manifestLock = l return nil }
func (db *DB) Open() error { // take a lock on db dir if db.lock != nil { panic("cas db lock already gained") } dl, err := lock.ExclusiveLock(db.dbdir, lock.Dir) if err != nil { return err } db.lock = dl sqldb, err := sql.Open("ql", filepath.Join(db.dbdir, DbFilename)) if err != nil { dl.Close() return err } db.sqldb = sqldb return nil }
func TestWalkPods(t *testing.T) { tests := [][]*struct { uuid string exited bool garbage bool deleting bool expected bool n_matched int }{ { // nothing }, { // single executing pod { uuid: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", exited: false, garbage: false, deleting: false, expected: true, }, }, { // single exited pod { uuid: "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb", exited: true, garbage: false, deleting: false, expected: true, }, }, { // single garbage pod { uuid: "cccccccc-cccc-cccc-cccc-cccccccccccc", exited: true, garbage: true, deleting: false, expected: true, }, }, { // single deleting pod { uuid: "dddddddd-dddd-dddd-dddd-dddddddddddd", exited: true, garbage: true, deleting: true, expected: true, }, }, { // one of each { // executing uuid: "eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee", exited: false, garbage: false, deleting: false, expected: true, }, { // exited uuid: "ffffffff-ffff-ffff-ffff-ffffffffffff", exited: true, garbage: false, deleting: false, expected: true, }, { // garbage uuid: "f0f0f0f0-f0f0-f0f0-f0f0-f0f0f0f0f0f0", exited: true, garbage: true, deleting: false, expected: true, }, { // deleting uuid: "f1f1f1f1-f1f1-f1f1-f1f1-f1f1f1f1f1f1", exited: true, garbage: true, deleting: true, expected: true, }, }, // TODO(vc): update to test new prepared/prepare-failed/non-exited-garbage states.. } for _, tt := range tests { // start every test with a clean slate d, err := ioutil.TempDir("", "") if err != nil { t.Fatalf("error creating tmpdir: %v", err) } defer os.RemoveAll(d) if err := initPods(d); err != nil { t.Fatalf("error initializing pods: %v", err) } var ( n_expected int n_walked int n_matched int included IncludeMask ) // create the pod dirs as specified by the test for _, ct := range tt { var cp string if ct.garbage { cp = filepath.Join(exitedGarbageDir(d), ct.uuid) included |= IncludeExitedGarbageDir } else { cp = filepath.Join(runDir(d), ct.uuid) included |= IncludeRunDir } if err := os.MkdirAll(cp, 0700); err != nil { t.Fatalf("error creating pod directory: %v", err) } if !ct.exited || ct.deleting { // acquire lock to simulate running and deleting pods l, err := lock.ExclusiveLock(cp, lock.Dir) if err != nil { t.Fatalf("error locking pod: %v", err) } defer l.Close() } if ct.expected { n_expected++ } } // match what walk provided against the set in the test if err := WalkPods(d, included, func(ch *Pod) { n_walked++ for _, ct := range tt { if ch.UUID.String() == ct.uuid && ch.isExitedGarbage == ct.garbage && ch.isExited == ct.exited && ch.isExitedDeleting == ct.deleting { ct.n_matched++ if ct.n_matched > 1 { t.Errorf("no pods should match multiple times") } n_matched++ } } }); err != nil { t.Fatalf("error walking pods: %v", err) } if n_expected != n_matched { t.Errorf("walked: %d expected: %d matched: %d", n_walked, n_expected, n_matched) } for _, ct := range tt { if ct.expected && ct.n_matched == 0 { t.Errorf("pod %q expected but not matched", ct.uuid) } if !ct.expected && ct.n_matched != 0 { t.Errorf("pod %q matched but not expected", ct.uuid) } } } }