Beispiel #1
0
func newDBLock(dirPath string) (*dbLock, error) {
	l, err := lock.NewLock(dirPath, lock.Dir)
	if err != nil {
		return nil, err
	}
	return &dbLock{fl: l}, nil
}
Beispiel #2
0
func getPodDefaultIP(workDir string) (string, error) {
	// get pod lock
	l, err := lock.NewLock(workDir, lock.Dir)
	if err != nil {
		return "", err
	}

	// get file descriptor for lock
	fd, err := l.Fd()
	if err != nil {
		return "", err
	}

	// use this descriptor as method of reading pod network configuration
	nets, err := netinfo.LoadAt(fd)
	if err != nil {
		return "", err
	}
	// kvm flavored container must have at first position default vm<->host network
	if len(nets) == 0 {
		return "", fmt.Errorf("pod has no configured networks")
	}

	for _, net := range nets {
		if net.NetName == "default" || net.NetName == "default-restricted" {
			return net.IP.String(), nil
		}
	}

	return "", fmt.Errorf("pod has no default network!")
}
Beispiel #3
0
// NewPod creates a new pod directory in the "preparing" state, allocating a unique uuid for it in the process.
// The returned pod is always left in an exclusively locked state (preparing is locked in the prepared directory)
// The pod must be closed using pod.Close()
func NewPod(dataDir string) (*Pod, error) {
	if err := initPods(dataDir); err != nil {
		return nil, err
	}

	p := &Pod{
		dataDir:     dataDir,
		createdByMe: true,
		isEmbryo:    true, // starts as an embryo, then ToPreparing locks, renames, and sets isPreparing
		// rest start false.
	}

	var err error
	p.UUID, err = types.NewUUID(uuid.New())
	if err != nil {
		return nil, errwrap.Wrap(errors.New("error creating UUID"), err)
	}

	err = os.Mkdir(p.embryoPath(), 0750)
	if err != nil {
		return nil, err
	}

	p.FileLock, err = lock.NewLock(p.embryoPath(), lock.Dir)
	if err != nil {
		os.Remove(p.embryoPath())
		return nil, err
	}

	err = p.ToPreparing()
	if err != nil {
		return nil, err
	}

	// At this point we we have:
	// /var/lib/rkt/pods/prepare/$uuid << exclusively locked to indicate "preparing"

	return p, nil
}
Beispiel #4
0
// newPod creates a new pod directory in the "preparing" state, allocating a unique uuid for it in the process.
// The returned pod is always left in an exclusively locked state (preparing is locked in the prepared directory)
// The pod must be closed using pod.Close()
func newPod() (*pod, error) {
	if err := initPods(); err != nil {
		return nil, err
	}

	p := &pod{
		createdByMe: true,
		isEmbryo:    true, // starts as an embryo, then xToPreparing locks, renames, and sets isPreparing
		// rest start false.
	}

	var err error
	p.uuid, err = types.NewUUID(uuid.New())
	if err != nil {
		return nil, fmt.Errorf("error creating UUID: %v", err)
	}

	err = os.Mkdir(p.embryoPath(), 0750)
	if err != nil {
		return nil, err
	}

	p.FileLock, err = lock.NewLock(p.embryoPath(), lock.Dir)
	if err != nil {
		os.Remove(p.embryoPath())
		return nil, err
	}

	err = p.xToPreparing()
	if err != nil {
		return nil, err
	}

	// At this point we we have:
	// /var/lib/rkt/pods/prepare/$uuid << exclusively locked to indicate "preparing"

	return p, nil
}
Beispiel #5
0
func NewStore(baseDir string) (*Store, error) {
	storeDir := filepath.Join(baseDir, "cas")

	s := &Store{
		dir:    storeDir,
		stores: make([]*diskv.Diskv, len(diskvStores)),
	}

	s.imageLockDir = filepath.Join(storeDir, "imagelocks")
	err := os.MkdirAll(s.imageLockDir, defaultPathPerm)
	if err != nil {
		return nil, err
	}

	s.treeStoreLockDir = filepath.Join(storeDir, "treestorelocks")
	err = os.MkdirAll(s.treeStoreLockDir, defaultPathPerm)
	if err != nil {
		return nil, err
	}

	// Take a shared cas lock
	s.storeLock, err = lock.NewLock(storeDir, lock.Dir)
	if err != nil {
		return nil, err
	}

	for i, p := range diskvStores {
		s.stores[i] = diskv.New(diskv.Options{
			BasePath:  filepath.Join(storeDir, p),
			Transform: blockTransform,
		})
	}
	db, err := NewDB(filepath.Join(storeDir, "db"))
	if err != nil {
		return nil, err
	}
	s.db = db

	s.treestore = &TreeStore{path: filepath.Join(storeDir, "tree")}

	needsMigrate := false
	fn := func(tx *sql.Tx) error {
		var err error
		ok, err := dbIsPopulated(tx)
		if err != nil {
			return err
		}
		// populate the db
		if !ok {
			for _, stmt := range dbCreateStmts {
				_, err = tx.Exec(stmt)
				if err != nil {
					return err
				}
			}
			return nil
		}
		// if db is populated check its version
		version, err := getDBVersion(tx)
		if err != nil {
			return err
		}
		if version < dbVersion {
			needsMigrate = true
		}
		if version > dbVersion {
			return fmt.Errorf("Current store db version: %d greater than the current rkt expected version: %d", version, dbVersion)
		}
		return nil
	}
	if err = db.Do(fn); err != nil {
		return nil, err
	}

	// migration is done in another transaction as it must take an exclusive
	// store lock. If, in the meantime, another process has already done the
	// migration, between the previous db version check and the below
	// migration code, the migration will do nothing as it'll start
	// migration from the current version.
	if needsMigrate {
		// Take an exclusive store lock
		err := s.storeLock.ExclusiveLock()
		if err != nil {
			return nil, err
		}
		if err := s.backupDB(); err != nil {
			return nil, err
		}
		fn := func(tx *sql.Tx) error {
			return migrate(tx, dbVersion)
		}
		if err = db.Do(fn); err != nil {
			return nil, err
		}
	}

	return s, nil
}
Beispiel #6
0
// getPod returns a pod struct representing the given pod.
// The returned lock is always left in an open but unlocked state.
// The pod must be closed using pod.Close()
func getPod(uuid *types.UUID) (*pod, error) {
	if err := initPods(); err != nil {
		return nil, err
	}

	p := &pod{uuid: uuid}

	// we try open the pod in all possible directories, in the same order the states occur
	l, err := lock.NewLock(p.embryoPath(), lock.Dir)
	if err == nil {
		p.isEmbryo = true
	} else if err == lock.ErrNotExist {
		l, err = lock.NewLock(p.preparePath(), lock.Dir)
		if err == nil {
			// treat as aborted prepare until lock is tested
			p.isAbortedPrepare = true
		} else if err == lock.ErrNotExist {
			l, err = lock.NewLock(p.preparedPath(), lock.Dir)
			if err == nil {
				p.isPrepared = true
			} else if err == lock.ErrNotExist {
				l, err = lock.NewLock(p.runPath(), lock.Dir)
				if err == nil {
					// treat as exited until lock is tested
					p.isExited = true
				} else if err == lock.ErrNotExist {
					l, err = lock.NewLock(p.exitedGarbagePath(), lock.Dir)
					if err == lock.ErrNotExist {
						l, err = lock.NewLock(p.garbagePath(), lock.Dir)
						if err == nil {
							p.isGarbage = true
						} else {
							return nil, fmt.Errorf("pod %q not found", uuid)
						}
					} else if err == nil {
						p.isExitedGarbage = true
						p.isExited = true // ExitedGarbage is _always_ implicitly exited
					}
				}
			}
		}
	}

	if err != nil && err != lock.ErrNotExist {
		return nil, errwrap.Wrap(fmt.Errorf("error opening pod %q", uuid), err)
	}

	if !p.isPrepared && !p.isEmbryo {
		// preparing, run, exitedGarbage, and garbage dirs use exclusive locks to indicate preparing/aborted, running/exited, and deleting/marked
		if err = l.TrySharedLock(); err != nil {
			if err != lock.ErrLocked {
				l.Close()
				return nil, errwrap.Wrap(errors.New("unexpected lock error"), err)
			}
			if p.isExitedGarbage {
				// locked exitedGarbage is also being deleted
				p.isExitedDeleting = true
			} else if p.isExited {
				// locked exited and !exitedGarbage is not exited (default in the run dir)
				p.isExited = false
			} else if p.isAbortedPrepare {
				// locked in preparing is preparing, not aborted (default in the preparing dir)
				p.isAbortedPrepare = false
				p.isPreparing = true
			} else if p.isGarbage {
				// locked in non-exited garbage is deleting
				p.isDeleting = true
			}
			err = nil
		} else {
			l.Unlock()
		}
	}

	p.FileLock = l

	if p.isRunning() {
		cfd, err := p.Fd()
		if err != nil {
			return nil, errwrap.Wrap(fmt.Errorf("error acquiring pod %v dir fd", uuid), err)
		}
		p.nets, err = netinfo.LoadAt(cfd)
		// ENOENT is ok -- assume running with --net=host
		if err != nil && !os.IsNotExist(err) {
			return nil, errwrap.Wrap(fmt.Errorf("error opening pod %v netinfo", uuid), err)
		}
	}

	return p, nil
}
Beispiel #7
0
func TestGetPodAndRefreshState(t *testing.T) {
	testCases := []struct {
		paths    []dirFn
		locks    []dirFn
		expected states
	}{
		{
			paths:    []dirFn{embryoDir},
			expected: states{isEmbryo: true},
		},
		{
			paths:    []dirFn{prepareDir},
			locks:    []dirFn{prepareDir},
			expected: states{isPreparing: true},
		},
		{
			paths:    []dirFn{prepareDir},
			expected: states{isAbortedPrepare: true},
		},
		{
			paths:    []dirFn{runDir},
			locks:    []dirFn{runDir},
			expected: states{},
		},
		{
			paths:    []dirFn{runDir},
			expected: states{isExited: true},
		},
		{
			paths:    []dirFn{garbageDir},
			expected: states{isGarbage: true},
		},
		{
			paths:    []dirFn{garbageDir},
			locks:    []dirFn{garbageDir},
			expected: states{isGarbage: true, isDeleting: true},
		},
	}

	uuid, err := types.NewUUID("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa")
	if err != nil {
		panic(err)
	}

	for i, tcase := range testCases {
		tmpDir, err := ioutil.TempDir("", "")
		if err != nil {
			panic(err)
		}
		defer os.RemoveAll(tmpDir)

		for _, pfn := range tcase.paths {
			podPath := filepath.Join(pfn(tmpDir), uuid.String())
			if err := os.MkdirAll(podPath, 0777); err != nil {
				panic(err)
			}
		}

		for _, lfn := range tcase.locks {
			podPath := filepath.Join(lfn(tmpDir), uuid.String())
			l, err := lock.NewLock(podPath, lock.Dir)
			if err != nil {
				t.Fatalf("error taking lock on directory: %v", err)
			}
			err = l.ExclusiveLock()
			if err != nil {
				t.Fatalf("could not get exclusive lock on directory: %v", err)
			}
			defer l.Unlock()
		}

		p, err := getPod(tmpDir, uuid)
		if err != nil {
			t.Fatalf("%v: unable to get pod: %v", i, err)
		}

		pstate := podToStates(p)
		if !reflect.DeepEqual(tcase.expected, pstate) {
			t.Errorf("%v: expected %+v == %+v after getPod", i, tcase.expected, pstate)
		}

		err = p.refreshState()
		if err != nil {
			t.Errorf("error refreshing state: %v", err)
			continue
		}

		pstate = podToStates(p)
		if !reflect.DeepEqual(tcase.expected, pstate) {
			t.Errorf("%v: expected %+v == %+v after refrshState", i, tcase.expected, pstate)
		}
	}
}
Beispiel #8
0
func NewStore(dir string) (*Store, error) {
	// We need to allow the store's setgid bits (if any) to propagate, so
	// disable umask
	um := syscall.Umask(0)
	defer syscall.Umask(um)

	s := &Store{
		dir:    dir,
		stores: make([]*diskv.Diskv, len(diskvStores)),
	}

	s.imageLockDir = filepath.Join(dir, "imagelocks")
	err := os.MkdirAll(s.imageLockDir, defaultPathPerm)
	if err != nil {
		return nil, err
	}

	// Take a shared cas lock
	s.storeLock, err = lock.NewLock(dir, lock.Dir)
	if err != nil {
		return nil, err
	}
	if err := s.storeLock.SharedLock(); err != nil {
		return nil, err
	}

	for i, p := range diskvStores {
		s.stores[i] = diskv.New(diskv.Options{
			PathPerm:  defaultPathPerm,
			FilePerm:  defaultFilePerm,
			BasePath:  filepath.Join(dir, p),
			Transform: blockTransform,
		})
	}
	db, err := db.NewDB(s.dbDir())
	if err != nil {
		return nil, err
	}
	s.db = db

	needsMigrate := false
	needsSizePopulation := false
	fn := func(tx *sql.Tx) error {
		var err error
		ok, err := dbIsPopulated(tx)
		if err != nil {
			return err
		}
		// populate the db
		if !ok {
			for _, stmt := range dbCreateStmts {
				_, err = tx.Exec(stmt)
				if err != nil {
					return err
				}
			}
			return nil
		}
		// if db is populated check its version
		version, err := getDBVersion(tx)
		if err != nil {
			return err
		}
		if version < dbVersion {
			needsMigrate = true
		}
		if version > dbVersion {
			return fmt.Errorf("current store db version: %d (greater than the current rkt expected version: %d)", version, dbVersion)
		}
		if version < 5 {
			needsSizePopulation = true
		}
		return nil
	}
	if err = db.Do(fn); err != nil {
		return nil, err
	}

	// migration is done in another transaction as it must take an exclusive
	// store lock. If, in the meantime, another process has already done the
	// migration, between the previous db version check and the below
	// migration code, the migration will do nothing as it'll start
	// migration from the current version.
	if needsMigrate {
		// Take an exclusive store lock
		err := s.storeLock.ExclusiveLock()
		if err != nil {
			return nil, err
		}
		if err := s.backupDB(); err != nil {
			return nil, err
		}
		fn := func(tx *sql.Tx) error {
			return migrate(tx, dbVersion)
		}
		if err = db.Do(fn); err != nil {
			return nil, err
		}

		if needsSizePopulation {
			if err := s.populateSize(); err != nil {
				return nil, err
			}
		}
	}

	return s, nil
}
Beispiel #9
0
// getPod returns a pod struct representing the given pod.
// The returned lock is always left in an open but unlocked state.
// The pod must be closed using pod.Close()
func getPod(dataDir string, uuid *types.UUID) (*Pod, error) {
	if err := initPods(dataDir); err != nil {
		return nil, err
	}

	p := &Pod{UUID: uuid, dataDir: dataDir}

	// dirStates is a list of directories -> state that directory existing
	// implies.
	// Its order matches the order states occur.
	dirStates := []struct {
		dir           string
		impliedStates []*bool
	}{
		{dir: p.embryoPath(), impliedStates: []*bool{&p.isEmbryo}},
		// For prepare, assume it's aborted prepare until it gets updated below
		{dir: p.preparePath(), impliedStates: []*bool{&p.isAbortedPrepare}},
		{dir: p.preparedPath(), impliedStates: []*bool{&p.isPrepared}},
		// For run, assume exited until the lock is tested
		{dir: p.runPath(), impliedStates: []*bool{&p.isExited}},
		// Exited garbage implies exited
		{dir: p.exitedGarbagePath(), impliedStates: []*bool{&p.isExitedGarbage, &p.isExited}},
		{dir: p.garbagePath(), impliedStates: []*bool{&p.isGarbage}},
	}

	var l *lock.FileLock
	var err error
	for _, dirState := range dirStates {
		l, err = lock.NewLock(dirState.dir, lock.Dir)
		if err == nil {
			for _, s := range dirState.impliedStates {
				*s = true
			}
			break
		}
		if err == lock.ErrNotExist {
			continue
		}
		// unexpected lock error
		return nil, errwrap.Wrap(fmt.Errorf("error opening pod %q", uuid), err)
	}

	if !p.isPrepared && !p.isEmbryo {
		// preparing, run, exitedGarbage, and garbage dirs use exclusive locks to indicate preparing/aborted, running/exited, and deleting/marked
		if err = l.TrySharedLock(); err != nil {
			if err != lock.ErrLocked {
				l.Close()
				return nil, errwrap.Wrap(errors.New("unexpected lock error"), err)
			}
			if p.isExitedGarbage {
				// locked exitedGarbage is also being deleted
				p.isExitedDeleting = true
			} else if p.isExited {
				// locked exited and !exitedGarbage is not exited (default in the run dir)
				p.isExited = false
			} else if p.isAbortedPrepare {
				// locked in preparing is preparing, not aborted (default in the preparing dir)
				p.isAbortedPrepare = false
				p.isPreparing = true
			} else if p.isGarbage {
				// locked in non-exited garbage is deleting
				p.isDeleting = true
			}
			err = nil
		} else {
			l.Unlock()
		}
	}

	p.FileLock = l

	if p.isRunning() {
		cfd, err := p.Fd()
		if err != nil {
			return nil, errwrap.Wrap(fmt.Errorf("error acquiring pod %v dir fd", uuid), err)
		}
		p.Nets, err = netinfo.LoadAt(cfd)
		// ENOENT is ok -- assume running with --net=host
		if err != nil && !os.IsNotExist(err) {
			return nil, errwrap.Wrap(fmt.Errorf("error opening pod %v netinfo", uuid), err)
		}
	}

	return p, nil
}