func (s *filestorageSuite) TestListHidesTempDir(c *gc.C) { err := s.writer.Put("test-write", bytes.NewReader(nil), 0) c.Assert(err, jc.ErrorIsNil) files, err := storage.List(s.reader, "") c.Assert(err, jc.ErrorIsNil) c.Check(files, gc.DeepEquals, []string{"test-write"}) files, err = storage.List(s.reader, "no-such-directory") c.Assert(err, jc.ErrorIsNil) c.Check(files, gc.DeepEquals, []string(nil)) // We also pretend the .tmp directory doesn't exist. If you call a // directory that doesn't exist, we just return an empty list of // strings, so we force the same behavior for '.tmp' // we poke in a file so it would have something to return s.createFile(c, ".tmp/test-file") files, err = storage.List(s.reader, ".tmp") c.Assert(err, jc.ErrorIsNil) c.Check(files, gc.DeepEquals, []string(nil)) // For consistency, we refuse all other possibilities as well s.createFile(c, ".tmp/foo/bar") files, err = storage.List(s.reader, ".tmp/foo") c.Assert(err, jc.ErrorIsNil) c.Check(files, gc.DeepEquals, []string(nil)) s.createFile(c, ".tmpother/foo") files, err = storage.List(s.reader, ".tmpother") c.Assert(err, jc.ErrorIsNil) c.Check(files, gc.DeepEquals, []string(nil)) }
// RemoveAll is specified in the StorageWriter interface. func (stor *maasStorage) RemoveAll() error { names, err := storage.List(stor, "") if err != nil { return err } // Remove all the objects in parallel so that we incur fewer round-trips. // If we're in danger of having hundreds of objects, // we'll want to change this to limit the number // of concurrent operations. var wg sync.WaitGroup wg.Add(len(names)) errc := make(chan error, len(names)) for _, name := range names { name := name go func() { defer wg.Done() if err := stor.Remove(name); err != nil { errc <- err } }() } wg.Wait() select { case err := <-errc: return fmt.Errorf("cannot delete all provider state: %v", err) default: } return nil }
func (s *filestorageSuite) TestList(c *gc.C) { names := []string{ "a/b/c", "a/bb", "a/c", "aa", "b/c/d", } for _, name := range names { s.createFile(c, name) } type test struct { prefix string expected []string } for i, test := range []test{ {"a", []string{"a/b/c", "a/bb", "a/c", "aa"}}, {"a/b", []string{"a/b/c", "a/bb"}}, {"a/b/c", []string{"a/b/c"}}, {"", names}, } { c.Logf("test %d: prefix=%q", i, test.prefix) files, err := storage.List(s.reader, test.prefix) c.Assert(err, gc.IsNil) c.Assert(files, gc.DeepEquals, test.expected) } }
func (s *filestorageSuite) TestList(c *gc.C) { names := []string{ "a/b/c", "a/bb", "a/c", "aa", "b/c/d", } for _, name := range names { s.createFile(c, name) } type test struct { prefix string expected []string } for i, test := range []test{ {"a", []string{"a/b/c", "a/bb", "a/c", "aa"}}, {"a/b", []string{"a/b/c", "a/bb"}}, {"a/b/c", []string{"a/b/c"}}, {"", names}, } { c.Logf("test %d: prefix=%q", i, test.prefix) files, err := storage.List(s.reader, test.prefix) c.Assert(err, jc.ErrorIsNil) i := len(files) j := len(test.expected) c.Assert(i, gc.Equals, j) for i := range files { c.Assert(files[i], jc.SamePath, test.expected[i]) } } }
func (s *storageSuite) TestListReturnsNoFilesIfNoFilesMatchPrefix(c *gc.C) { stor := NewStorage(s.makeEnviron()) s.fakeStoredFile(stor, "foo") listing, err := storage.List(stor, "bar") c.Assert(err, gc.IsNil) c.Check(listing, gc.DeepEquals, []string{}) }
func (s *storageSuite) TestListOperatesOnFlatNamespace(c *gc.C) { stor := NewStorage(s.makeEnviron()) s.fakeStoredFile(stor, "a/b/c/d") listing, err := storage.List(stor, "a/b") c.Assert(err, gc.IsNil) c.Check(listing, gc.DeepEquals, []string{"a/b/c/d"}) }
func (s *storageSuite) TestList(c *gc.C) { listener, _, _ := startServer(c) defer listener.Close() stor := httpstorage.Client(listener.Addr().String()) names, err := storage.List(stor, "a/b/c") c.Assert(err, gc.IsNil) c.Assert(names, gc.HasLen, 0) }
func (s *storageSuite) TestListReturnsOnlyFilesWithMatchingPrefix(c *gc.C) { stor := NewStorage(s.makeEnviron()) s.fakeStoredFile(stor, "abc") s.fakeStoredFile(stor, "xyz") listing, err := storage.List(stor, "x") c.Assert(err, gc.IsNil) c.Check(listing, gc.DeepEquals, []string{"xyz"}) }
func (s *storageSuite) TestListMatchesPrefixOnly(c *gc.C) { stor := NewStorage(s.makeEnviron()) s.fakeStoredFile(stor, "abc") s.fakeStoredFile(stor, "xabc") listing, err := storage.List(stor, "a") c.Assert(err, gc.IsNil) c.Check(listing, gc.DeepEquals, []string{"abc"}) }
// RemoveTools deletes all tools from the supplied storage. func RemoveTools(c *gc.C, stor storage.Storage) { names, err := storage.List(stor, "tools/releases/juju-") c.Assert(err, gc.IsNil) c.Logf("removing files: %v", names) for _, name := range names { err = stor.Remove(name) c.Check(err, gc.IsNil) } RemoveFakeToolsMetadata(c, stor) }
func checkList(c *gc.C, stor storage.StorageReader, prefix string, names []string) { lnames, err := storage.List(stor, prefix) c.Assert(err, jc.ErrorIsNil) i := len(lnames) j := len(names) c.Assert(i, gc.Equals, j) for i := range lnames { c.Assert(lnames[i], jc.SamePath, names[i]) } }
func checkList(c *gc.C, stor storage.StorageReader, prefix string, names []string) { lnames, err := storage.List(stor, prefix) c.Assert(err, gc.IsNil) // TODO(dfc) gocheck should grow an SliceEquals checker. expected := copyslice(lnames) sort.Strings(expected) actual := copyslice(names) sort.Strings(actual) c.Assert(expected, gc.DeepEquals, actual) }
// RemoveTools deletes all tools from the supplied storage. func RemoveTools(c *gc.C, stor storage.Storage, toolsDir string) { names, err := storage.List(stor, fmt.Sprintf("tools/%s/juju-", toolsDir)) c.Assert(err, jc.ErrorIsNil) c.Logf("removing files: %v", names) for _, name := range names { err = stor.Remove(name) c.Check(err, jc.ErrorIsNil) } RemoveFakeToolsMetadata(c, stor) }
func (s *storageSuite) TestListReturnsAllFilesIfPrefixEmpty(c *gc.C) { stor := NewStorage(s.makeEnviron()) files := []string{"1a", "2b", "3c"} for _, name := range files { s.fakeStoredFile(stor, name) } listing, err := storage.List(stor, "") c.Assert(err, gc.IsNil) c.Check(listing, gc.DeepEquals, files) }
func (s *storageSuite) TestListSortsResults(c *gc.C) { stor := NewStorage(s.makeEnviron()) files := []string{"4d", "1a", "3c", "2b"} for _, name := range files { s.fakeStoredFile(stor, name) } listing, err := storage.List(stor, "") c.Assert(err, gc.IsNil) c.Check(listing, gc.DeepEquals, []string{"1a", "2b", "3c", "4d"}) }
// ReadList returns a List of the tools in store with the given major.minor version. // If minorVersion = -1, then only majorVersion is considered. // If store contains no such tools, it returns ErrNoMatches. func ReadList(stor storage.StorageReader, toolsDir string, majorVersion, minorVersion int) (coretools.List, error) { if minorVersion >= 0 { logger.Debugf("reading v%d.%d tools", majorVersion, minorVersion) } else { logger.Debugf("reading v%d.* tools", majorVersion) } storagePrefix := storagePrefix(toolsDir) names, err := storage.List(stor, storagePrefix) if err != nil { return nil, err } var list coretools.List var foundAnyTools bool for _, name := range names { name = filepath.ToSlash(name) if !strings.HasPrefix(name, storagePrefix) || !strings.HasSuffix(name, toolSuffix) { continue } var t coretools.Tools vers := name[len(storagePrefix) : len(name)-len(toolSuffix)] if t.Version, err = version.ParseBinary(vers); err != nil { logger.Debugf("failed to parse version %q: %v", vers, err) continue } foundAnyTools = true // Major version must match specified value. if t.Version.Major != majorVersion { continue } // If specified minor version value supplied, minor version must match. if minorVersion >= 0 && t.Version.Minor != minorVersion { continue } logger.Debugf("found %s", vers) if t.URL, err = stor.URL(name); err != nil { return nil, err } list = append(list, &t) // Older versions of Juju only know about ppc64, so add metadata for that arch. if t.Version.Arch == arch.PPC64EL { legacyPPC64Tools := t legacyPPC64Tools.Version.Arch = arch.LEGACY_PPC64 list = append(list, &legacyPPC64Tools) } } if len(list) == 0 { if foundAnyTools { return nil, coretools.ErrNoMatches } return nil, ErrNoTools } return list, nil }
// RemoveAll is specified in the StorageWriter interface. func (s *openstackstorage) RemoveAll() error { names, err := storage.List(s, "") if err != nil { return err } // Remove all the objects in parallel so as to minimize round-trips. // Start with a goroutine feeding all the names that need to be // deleted. toDelete := make(chan string) go func() { for _, name := range names { toDelete <- name } close(toDelete) }() // Now spawn up to N routines to actually issue the requests. maxRoutines := len(names) if maxConcurrentDeletes < maxRoutines { maxRoutines = maxConcurrentDeletes } var wg sync.WaitGroup wg.Add(maxRoutines) // Make a channel long enough to buffer all possible errors. errc := make(chan error, len(names)) for i := 0; i < maxRoutines; i++ { go func() { for name := range toDelete { if err := s.Remove(name); err != nil { errc <- err } } wg.Done() }() } wg.Wait() select { case err := <-errc: return fmt.Errorf("cannot delete all provider state: %v", err) default: } s.Lock() defer s.Unlock() // Even DeleteContainer fails, it won't harm if we try again - the // operation might have succeeded even if we get an error. s.madeContainer = false err = s.swift.DeleteContainer(s.containerName) err, ok := maybeNotFound(err) if ok { return nil } return err }
func (*storageSuite) TestListWithNonexistentContainerReturnsNoFiles(c *gc.C) { // If Azure returns a 404 it means the container doesn't exist. In this // case the provider should interpret this as "no files" and return nil. container := "container" response := makeResponse("", http.StatusNotFound) azStorage, transport := makeFakeStorage(container, "account", "") transport.AddExchange(response, nil) names, err := storage.List(azStorage, "prefix") c.Assert(err, gc.IsNil) c.Assert(names, gc.IsNil) }
func (s *storageSuite) TestPathValidity(c *gc.C) { stor, storageDir := s.makeStorage(c) err := os.Mkdir(filepath.Join(storageDir, "a"), 0755) c.Assert(err, gc.IsNil) createFiles(c, storageDir, "a/b") for _, prefix := range []string{"..", "a/../.."} { c.Logf("prefix: %q", prefix) _, err := storage.List(stor, prefix) c.Check(err, gc.ErrorMatches, regexp.QuoteMeta(fmt.Sprintf("%q escapes storage directory", prefix))) } // Paths are always relative, so a leading "/" may as well not be there. names, err := storage.List(stor, "/") c.Assert(err, gc.IsNil) c.Assert(names, gc.DeepEquals, []string{"a/b"}) // Paths will be canonicalised. names, err = storage.List(stor, "a/..") c.Assert(err, gc.IsNil) c.Assert(names, gc.DeepEquals, []string{"a/b"}) }
func (t *LiveTests) TestDestroy(c *gc.C) { s := t.Env.Storage() err := s.Put("foo", strings.NewReader("foo"), 3) c.Assert(err, gc.IsNil) err = s.Put("bar", strings.NewReader("bar"), 3) c.Assert(err, gc.IsNil) // Check that the bucket exists, so we can be sure // we have checked correctly that it's been destroyed. names, err := storage.List(s, "") c.Assert(err, gc.IsNil) c.Assert(len(names) >= 2, gc.Equals, true) t.Destroy(c) for a := ec2.ShortAttempt.Start(); a.Next(); { names, err = storage.List(s, "") if len(names) == 0 { break } } c.Assert(names, gc.HasLen, 0) }
func (s *storageSuite) TestRemoveAllDeletesAllFiles(c *gc.C) { stor := s.makeStorage("get-retrieves-file") const filename1 = "stored-data1" s.fakeStoredFile(stor, filename1) const filename2 = "stored-data2" s.fakeStoredFile(stor, filename2) err := stor.RemoveAll() c.Assert(err, gc.IsNil) listing, err := storage.List(stor, "") c.Assert(err, gc.IsNil) c.Assert(listing, gc.DeepEquals, []string{}) }
func (s *storageSuite) TestWithExclusiveLocks(c *gc.C) { stor, storageDir := s.makeStorage(c) // None of the methods (apart from URL) should be able to do anything // while an exclusive lock is held. s.flock(c, flockExclusive, storageDir) _, err := stor.URL("a") c.Assert(err, gc.IsNil) c.Assert(stor.Put("a", bytes.NewBuffer(nil), 0), gc.NotNil) c.Assert(stor.Remove("a"), gc.NotNil) c.Assert(stor.RemoveAll(), gc.NotNil) _, err = storage.Get(stor, "a") c.Assert(err, gc.NotNil) _, err = storage.List(stor, "") c.Assert(err, gc.NotNil) }
func (s *storageSuite) TestRemoveDeletesFile(c *gc.C) { const filename = "doomed.txt" stor := NewStorage(s.makeEnviron()) s.fakeStoredFile(stor, filename) err := stor.Remove(filename) c.Assert(err, gc.IsNil) _, err = storage.Get(stor, filename) c.Assert(err, jc.Satisfies, errors.IsNotFound) listing, err := storage.List(stor, filename) c.Assert(err, gc.IsNil) c.Assert(listing, gc.DeepEquals, []string{}) }
func (s *storageSuite) TestWithSharedLocks(c *gc.C) { stor, storageDir := s.makeStorage(c) // Get and List should be able to proceed with a shared lock. // All other methods should fail. createFiles(c, storageDir, "a") s.flock(c, flockShared, storageDir) _, err := storage.Get(stor, "a") c.Assert(err, gc.IsNil) _, err = storage.List(stor, "") c.Assert(err, gc.IsNil) c.Assert(stor.Put("a", bytes.NewBuffer(nil), 0), gc.NotNil) c.Assert(stor.Remove("a"), gc.NotNil) c.Assert(stor.RemoveAll(), gc.NotNil) }
func (*storageSuite) TestList(c *gc.C) { container := "container" response := makeResponse(blobListResponse, http.StatusOK) azStorage, transport := makeFakeStorage(container, "account", "") transport.AddExchange(response, nil) prefix := "prefix" names, err := storage.List(azStorage, prefix) c.Assert(err, gc.IsNil) c.Assert(transport.ExchangeCount, gc.Equals, 1) // The prefix has been passed down as a query parameter. c.Check(transport.Exchanges[0].Request.URL.Query()["prefix"], gc.DeepEquals, []string{prefix}) // The container name is used in the requested URL. c.Check(transport.Exchanges[0].Request.URL.String(), gc.Matches, ".*"+container+".*") c.Check(names, gc.DeepEquals, []string{"prefix-1", "prefix-2"}) }
// ReadList returns a List of the tools in store with the given major.minor version. // If minorVersion = -1, then only majorVersion is considered. // If store contains no such tools, it returns ErrNoMatches. func ReadList(stor storage.StorageReader, majorVersion, minorVersion int) (coretools.List, error) { if minorVersion >= 0 { logger.Debugf("reading v%d.%d tools", majorVersion, minorVersion) } else { logger.Debugf("reading v%d.* tools", majorVersion) } names, err := storage.List(stor, toolPrefix) if err != nil { return nil, err } var list coretools.List var foundAnyTools bool for _, name := range names { if !strings.HasPrefix(name, toolPrefix) || !strings.HasSuffix(name, toolSuffix) { continue } var t coretools.Tools vers := name[len(toolPrefix) : len(name)-len(toolSuffix)] if t.Version, err = version.ParseBinary(vers); err != nil { logger.Debugf("failed to parse version %q: %v", vers, err) continue } foundAnyTools = true // Major version must match specified value. if t.Version.Major != majorVersion { continue } // If specified minor version value supplied, minor version must match. if minorVersion >= 0 && t.Version.Minor != minorVersion { continue } logger.Debugf("found %s", vers) if t.URL, err = stor.URL(name); err != nil { return nil, err } list = append(list, &t) } if len(list) == 0 { if foundAnyTools { return nil, coretools.ErrNoMatches } return nil, ErrNoTools } return list, nil }
func checkRemoveAll(c *gc.C, stor storage.Storage) { contents := []byte("File contents.") aFile := "a-file.txt" err := stor.Put(aFile, bytes.NewBuffer(contents), int64(len(contents))) c.Assert(err, gc.IsNil) err = stor.Put("empty-file", bytes.NewBuffer(nil), 0) c.Assert(err, gc.IsNil) err = stor.RemoveAll() c.Assert(err, gc.IsNil) files, err := storage.List(stor, "") c.Assert(err, gc.IsNil) c.Check(files, gc.HasLen, 0) _, err = storage.Get(stor, aFile) c.Assert(err, gc.NotNil) c.Check(err, gc.ErrorMatches, fmt.Sprintf("file %q not found", aFile)) }
func (s *storageSuite) TestClientTLS(c *gc.C) { listener, _, storageDir := startServerTLS(c) defer listener.Close() stor, err := httpstorage.ClientTLS(listener.Addr().String(), coretesting.CACert, testAuthkey) c.Assert(err, gc.IsNil) data := []byte("hello") err = ioutil.WriteFile(filepath.Join(storageDir, "filename"), data, 0644) c.Assert(err, gc.IsNil) names, err := storage.List(stor, "filename") c.Assert(err, gc.IsNil) c.Assert(names, gc.DeepEquals, []string{"filename"}) checkFileHasContents(c, stor, "filename", data) // Put, Remove and RemoveAll should all succeed. checkPutFile(c, stor, "filenamethesecond", data) checkFileHasContents(c, stor, "filenamethesecond", data) c.Assert(stor.Remove("filenamethesecond"), gc.IsNil) c.Assert(stor.RemoveAll(), gc.IsNil) }
func (suite *environSuite) TestDestroy(c *gc.C) { env := suite.makeEnviron() suite.getInstance("test1") suite.testMAASObject.TestServer.OwnedNodes()["test1"] = true // simulate acquire data := makeRandomBytes(10) suite.testMAASObject.TestServer.NewFile("filename", data) stor := env.Storage() err := env.Destroy() c.Check(err, jc.ErrorIsNil) // Instances have been stopped. operations := suite.testMAASObject.TestServer.NodesOperations() c.Check(operations, gc.DeepEquals, []string{"release"}) c.Check(suite.testMAASObject.TestServer.OwnedNodes()["test1"], jc.IsFalse) // Files have been cleaned up. listing, err := envstorage.List(stor, "") c.Assert(err, jc.ErrorIsNil) c.Check(listing, gc.DeepEquals, []string{}) }
func (s *ec2storage) RemoveAll() error { names, err := storage.List(s, "") if err != nil { return err } // Remove all the objects in parallel to minimize round-trips. // If we're in danger of having hundreds of objects, // we'll want to change this to limit the number // of concurrent operations. var wg sync.WaitGroup wg.Add(len(names)) errc := make(chan error, len(names)) for _, name := range names { name := name go func() { if err := s.Remove(name); err != nil { errc <- err } wg.Done() }() } wg.Wait() select { case err := <-errc: return fmt.Errorf("cannot delete all provider state: %v", err) default: } s.Lock() defer s.Unlock() // Even DelBucket fails, it won't harm if we try again - the operation // might have succeeded even if we get an error. s.madeBucket = false err = deleteBucket(s) err = s.bucket.DelBucket() if s3ErrorStatusCode(err) == 404 { return nil } return err }