// ParseClusterFromFile reads a cluster from file func ParseClusterFromFile(path string) (*Cluster, error) { data, err := ioutil.ReadFile(path) if err != nil { return nil, maskAny(err) } // Parse the input root, err := hcl.Parse(string(data)) if err != nil { return nil, maskAny(err) } // Top-level item should be a list list, ok := root.Node.(*ast.ObjectList) if !ok { return nil, errgo.New("error parsing: root should be an object") } matches := list.Filter("cluster") if len(matches.Items) == 0 { return nil, errgo.New("'cluster' stanza not found") } // Parse hcl into Cluster cluster := &Cluster{} if err := cluster.parse(matches); err != nil { return nil, maskAny(err) } cluster.setDefaults() // Validate the cluster if err := cluster.validate(); err != nil { return nil, maskAny(err) } return cluster, nil }
func pageGallery(w http.ResponseWriter, r *http.Request, p httprouter.Params) *httphelper.HandlerError { l := httphelper.NewHandlerLogEntry(r) filepath := path.Join(FlagFolderGallery, p.ByName("path")) l.Debug("Sending ", filepath) stat, err := os.Stat(filepath) if err != nil { return httphelper.NewHandlerErrorDef(errgo.Notef(err, "can not stat file")) } if stat.Mode().IsDir() { l.Debug("Filetype: Directory") return pageFilesDirectory(w, r, p) } if stat.Mode().IsRegular() { l.Debug("Filetype: Regular") return pageFilesRegular(w, r, p) } if !stat.Mode().IsDir() && !stat.Mode().IsRegular() { return httphelper.NewHandlerErrorDef(errgo.New("filetype is not a directory and not a regular file. Something is strange.")) } return httphelper.NewHandlerErrorDef(errgo.New("unreachable code reached!")) }
func ParseTodo(values []string) (Todo, error) { if len(values) != 4 { return Todo{}, errgo.New("entry with the type todo needs exactly four fields") } etype, err := ParseEntryType(values[0]) if err != nil { return Todo{}, errgo.Notef(err, "can not parse entry type") } if etype != EntryTypeTodo { return Todo{}, errgo.New("tried to parse a todo but got the entry type " + etype.String()) } timestamp, err := time.Parse(TimeStampFormat, values[1]) if err != nil { return Todo{}, errgo.Notef(err, "can not parse timestamp") } active, err := strconv.ParseBool(values[2]) if err != nil { return Todo{}, errgo.Notef(err, "can not parse active state") } return Todo{Active: active, TimeStamp: timestamp, Value: values[3]}, nil }
// Construct sets the necessary options func (sca *SourceCurrencyAll) Construct(mc config.ModelConstructor) error { if mc.ConfigReader == nil { return errgo.New("ConfigReader is required") } if mc.Scope == nil { return errgo.New("Scope is required") } sca.mc = mc return nil }
func (*errorsSuite) TestCause(c *gc.C) { c.Assert(errgo.Cause(someErr), gc.Equals, someErr) causeErr := errgo.New("cause error") underlyingErr := errgo.New("underlying error") //err TestCause#1 err := errgo.WithCausef(underlyingErr, causeErr, "foo %d", 99) //err TestCause#2 c.Assert(errgo.Cause(err), gc.Equals, causeErr) checkErr(c, err, underlyingErr, "foo 99: underlying error", "[{$TestCause#2$: foo 99} {$TestCause#1$: underlying error}]", causeErr) err = &embed{err.(*errgo.Err)} c.Assert(errgo.Cause(err), gc.Equals, causeErr) }
func convertValues(store store.Store, project data.ProjectName, values [][]string) error { for _, value := range values { if len(value) < 2 { return errgo.New("value length must be at least 2") } timestamp, err := time.Parse(time.RFC3339Nano, value[0]) if err != nil { return errgo.Notef(err, "can not parse timestamp of value") } log.Info("Timestamp: ", timestamp) switch value[1] { case "note": log.Debug("Saving note") note := data.Note{ Value: value[2], TimeStamp: timestamp, } err := store.AddEntry(project, note) if err != nil { return errgo.Notef(err, "can not save note to store") } case "todo": log.Debug("Saving todo") done, err := strconv.ParseBool(value[3]) if err != nil { return errgo.Notef(err, "can not parse bool from value") } todo := data.Todo{ Value: value[2], TimeStamp: timestamp, Active: !done, } err = store.AddEntry(project, todo) if err != nil { return errgo.Notef(err, "can not save note to store") } default: return errgo.New("do not know what to do with this type of value: " + value[1]) } } return nil }
func TestErrors(t *testing.T) { assert.Equal(t, "Err1\nErr2\nErr3", util.Errors( errors.New("Err1"), errors.New("Err2"), errors.New("Err3"), )) err := util.Errors( errgo.New("Err1"), errgo.New("Err2"), errors.New("Err3"), ) assert.Contains(t, err, "corestoreio/csfw/util/errors_test.go:34\nErr2") }
// SetRSA reads PEM byte data and decodes it and parses the private key. // Applies the private and the public key to the AuthManager. Password as second // argument is only required when the private key is encrypted. // Checks for io.Close and closes the resource. Public key will be derived from // the private key. Default Signing bits 256. func SetRSA(privateKey io.Reader, password ...[]byte) OptionFunc { if cl, ok := privateKey.(io.Closer); ok { defer func() { if err := cl.Close(); err != nil { // close file log.Error("userjwt.RSAKey.ioCloser", "err", err) } }() } prKeyData, errRA := ioutil.ReadAll(privateKey) if errRA != nil { return func(a *AuthManager) { a.lastError = errgo.Mask(errRA) } } var prKeyPEM *pem.Block if prKeyPEM, _ = pem.Decode(prKeyData); prKeyPEM == nil { return func(a *AuthManager) { a.lastError = errgo.New("Private Key from io.Reader no found") } } var rsaPrivateKey *rsa.PrivateKey var err error if x509.IsEncryptedPEMBlock(prKeyPEM) { if len(password) != 1 || len(password[0]) == 0 { return func(a *AuthManager) { a.lastError = errgo.New("Private Key is encrypted but password was not set") } } var dd []byte var errPEM error if dd, errPEM = x509.DecryptPEMBlock(prKeyPEM, password[0]); errPEM != nil { return func(a *AuthManager) { a.lastError = errgo.Newf("Private Key decryption failed: %s", errPEM.Error()) } } rsaPrivateKey, err = x509.ParsePKCS1PrivateKey(dd) } else { rsaPrivateKey, err = x509.ParsePKCS1PrivateKey(prKeyPEM.Bytes) } return func(a *AuthManager) { a.SigningMethod = jwt.SigningMethodRS256 a.rsapk = rsaPrivateKey a.hasKey = true a.lastError = errgo.Mask(err) } }
// Validate checks for duplicated configuration paths in all three hierarchy levels. func (ss SectionSlice) Validate() error { if len(ss) == 0 { return errgo.New("SectionSlice is empty") } // @todo try to pick the right strategy between maps and slice depending on the overall size of a full SectionSlice var pc = make(utils.StringSlice, ss.TotalFields()) // pc path checker i := 0 for _, s := range ss { for _, g := range s.Groups { for _, f := range g.Fields { arg, err := newArg(Path(s.ID, g.ID, f.ID)) if err != nil { log.Error("config.SectionSlice.Validate.newArg", "err", err, "s", s, "g", g, "f", f) } p := arg.scopePath() if pc.Include(p) { return errgo.Newf("Duplicate entry for path %s :: %s", p, ss.ToJSON()) } pc[i] = p i++ } } } return nil }
func (t *gluonService) Setup(deps service.ServiceDependencies, flags *service.ServiceFlags) error { if err := flags.SetupDefaults(deps.Logger); err != nil { return maskAny(err) } if flags.Docker.DockerSubnet == "" { return errgo.New("docker-subnet is missing") } changedFlags, err := flags.Save() if err != nil { return maskAny(err) } changedService, err := createService(deps, flags) if err != nil { return maskAny(err) } if flags.Force || changedFlags || changedService { if err := os.Remove(gluonPath); err != nil { if !os.IsNotExist(err) { return maskAny(err) } } if err := deps.Systemd.Reload(); err != nil { return maskAny(err) } if err := deps.Systemd.Enable(serviceName); err != nil { return maskAny(err) } } return nil }
func TestCause(t *testing.T) { if cause := errgo.Cause(someErr); cause != someErr { t.Fatalf("expected %q kind; got %#v", someErr, cause) } causeErr := errgo.New("cause error") underlyingErr := errgo.New("underlying error") //err TestCause#1 err := errgo.WithCausef(underlyingErr, causeErr, "foo %d", 99) //err TestCause#2 if errgo.Cause(err) != causeErr { t.Fatalf("expected %q; got %#v", causeErr, errgo.Cause(err)) } checkErr(t, err, underlyingErr, "foo 99: underlying error", "[{$TestCause#2$: foo 99} {$TestCause#1$: underlying error}]", causeErr) err = &embed{err.(*errgo.Err)} if errgo.Cause(err) != causeErr { t.Fatalf("expected %q; got %#v", causeErr, errgo.Cause(err)) } }
// loadCluster loads a cluster description from the given flags. func loadCluster(f *fg.Flags) (*cluster.Cluster, error) { if f.ClusterPath == "" { return nil, maskAny(errgo.New("--cluster missing")) } clustersPath := os.Getenv("PULCY_CLUSTERS") if clustersPath == "" { clustersPath = "config/clusters" } path, err := resolvePath(f.ClusterPath, clustersPath, ".hcl") if err != nil { return nil, maskAny(err) } cluster, err := cluster.ParseClusterFromFile(path) if err != nil { return nil, maskAny(err) } if f.TunnelOverride != "" { cluster.Tunnel = f.TunnelOverride } if f.Local { cluster.Tunnel = "core-01" cluster.Stack = "core-01" } return cluster, nil }
func TestNewErrorFromErrors(t *testing.T) { tests := []struct { code int errs []error wantError string }{ {http.StatusBadGateway, nil, http.StatusText(http.StatusBadGateway)}, {http.StatusTeapot, []error{errors.New("No coffee pot"), errors.New("Not even a milk pot")}, "No coffee pot\nNot even a milk pot"}, {http.StatusConflict, []error{errgo.New("Now a coffee pot"), errgo.New("Not even close to a milk pot")}, "error_test.go"}, } for _, test := range tests { he := ctxhttp.NewErrorFromErrors(test.code, test.errs...) assert.Exactly(t, test.code, he.Code) assert.Contains(t, he.Error(), test.wantError) } }
// ParseJob takes input from a given reader and parses it into a Job. func parseJob(input []byte, jf *jobFunctions) (*Job, error) { // Create a template, add the function map, and parse the text. tmpl, err := template.New("job").Funcs(jf.Functions()).Parse(string(input)) if err != nil { return nil, maskAny(err) } // Run the template to verify the output. buffer := &bytes.Buffer{} err = tmpl.Execute(buffer, jf.Options()) if err != nil { return nil, maskAny(err) } // Parse the input root, err := hcl.Parse(buffer.String()) if err != nil { return nil, maskAny(err) } // Top-level item should be a list list, ok := root.Node.(*ast.ObjectList) if !ok { return nil, errgo.New("error parsing: root should be an object") } // Parse hcl into Job job := &Job{} matches := list.Filter("job") if len(matches.Items) == 0 { return nil, maskAny(errgo.WithCausef(nil, ValidationError, "'job' stanza not found")) } if err := job.parse(matches); err != nil { return nil, maskAny(err) } // Link internal structures job.prelink() // Set defaults job.setDefaults(jf.cluster) // Replace variables if err := job.replaceVariables(); err != nil { return nil, maskAny(err) } // Sort internal structures and make final links job.link() // Optimize job for cluster job.optimizeFor(jf.cluster) // Validate the job if err := job.Validate(); err != nil { return nil, maskAny(err) } return job, nil }
func findSwarmType(tags []types.Tag) (string, error) { for _, tag := range tags { if tag.Key == "StackType" { return tag.Value, nil } } return "", errgo.New("swarm type not found") }
func TestMatch(t *testing.T) { type errTest func(error) bool allow := func(ss ...string) []func(error) bool { fns := make([]func(error) bool, len(ss)) for i, s := range ss { s := s fns[i] = func(err error) bool { return err != nil && err.Error() == s } } return fns } tests := []struct { err error fns []func(error) bool ok bool }{{ err: errgo.New("foo"), fns: allow("foo"), ok: true, }, { err: errgo.New("foo"), fns: allow("bar"), ok: false, }, { err: errgo.New("foo"), fns: allow("bar", "foo"), ok: true, }, { err: errgo.New("foo"), fns: nil, ok: false, }, { err: nil, fns: nil, ok: false, }} for i, test := range tests { ok := errgo.Match(test.err, test.fns...) if ok != test.ok { t.Fatalf("test %d: expected %v got %v", i, test.ok, ok) } } }
func (*errorsSuite) TestMatch(c *gc.C) { type errTest func(error) bool allow := func(ss ...string) []func(error) bool { fns := make([]func(error) bool, len(ss)) for i, s := range ss { s := s fns[i] = func(err error) bool { return err != nil && err.Error() == s } } return fns } tests := []struct { err error fns []func(error) bool ok bool }{{ err: errgo.New("foo"), fns: allow("foo"), ok: true, }, { err: errgo.New("foo"), fns: allow("bar"), ok: false, }, { err: errgo.New("foo"), fns: allow("bar", "foo"), ok: true, }, { err: errgo.New("foo"), fns: nil, ok: false, }, { err: nil, fns: nil, ok: false, }} for i, test := range tests { c.Logf("test %d", i) c.Assert(errgo.Match(test.err, test.fns...), gc.Equals, test.ok) } }
func ParseEntryType(value string) (EntryType, error) { switch value { case "note": return EntryTypeNote, nil case "todo": return EntryTypeTodo, nil default: return EntryTypeUnkown, errgo.New("the entry type " + value + " is not known") } }
func ParseEntry(values []string) (Entry, error) { if len(values) < 1 { return nil, errgo.New("entry values need at least one field") } etype, err := ParseEntryType(values[0]) if err != nil { return nil, errgo.Notef(err, "can not parse entry type") } switch etype { case EntryTypeNote: return ParseNote(values) case EntryTypeTodo: return ParseTodo(values) default: return nil, errgo.New("do not know how to parse this entry type") } }
func ParseNote(values []string) (Note, error) { if len(values) != 3 { return Note{}, errgo.New("entry with the type note needs exactly three fields") } etype, err := ParseEntryType(values[0]) if err != nil { return Note{}, errgo.Notef(err, "can not parse entry type") } if etype != EntryTypeNote { return Note{}, errgo.New("tried to parse a note but got the entry type " + etype.String()) } timestamp, err := time.Parse(TimeStampFormat, values[1]) if err != nil { return Note{}, errgo.Notef(err, "can not parse timestamp") } return Note{TimeStamp: timestamp, Value: values[2]}, nil }
// NewS3Client creates a new configured instance of s3Client. // You can either pass the AWS credentials as arguments to the function or // pass empty strings. In this case environment variables will be used for // the credentials. Supported environment variables are AWS_ACCESS_KEY_ID, // AWS_SECRET_ACCESS_KEY and S3_ENDPOINT. func NewS3Client(awsAccessKey, awsSecretKey, s3Endpoint, bucket string) (fetchclient.FetchClient, error) { if awsAccessKey == "" { awsAccessKey = os.Getenv("AWS_ACCESS_KEY_ID") } if awsSecretKey == "" { awsSecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") } if s3Endpoint == "" { s3Endpoint = os.Getenv("S3_ENDPOINT") } if awsAccessKey == "" { return nil, errgo.New("AWS_ACCESS_KEY_ID or flag is empty") } if awsSecretKey == "" { return nil, errgo.New("AWS_SECRET_ACCESS_KEY or flag is empty") } if s3Endpoint == "" { return nil, errgo.New("S3_ENDPOINT not found in environment or flag is empty") } s3 := s3.New( aws.Auth{ AccessKey: awsAccessKey, SecretKey: awsSecretKey, }, aws.Region{ S3Endpoint: "https://" + s3Endpoint, }, ) s3c := &S3Client{ bucket: s3.Bucket(bucket), } return s3c, nil }
// loadJob loads the a job from the given flags. func loadJob(f *fg.Flags, cluster cluster.Cluster) (*jobs.Job, error) { if f.JobPath == "" { return nil, maskAny(errgo.New("--job missing")) } path, err := resolvePath(f.JobPath, "config", ".hcl") if err != nil { return nil, maskAny(err) } job, err := jobs.ParseJobFromFile(path, cluster, f.Options, log, f.VaultConfig, f.GithubLoginData) if err != nil { return nil, maskAny(err) } return job, nil }
// ReInit reloads all websites, groups and stores concurrently from the database. If GOMAXPROCS // is set to > 1 then in parallel. Returns an error with location or nil. If an error occurs // then all internal slices will be reset. func (st *Storage) ReInit(dbrSess dbr.SessionRunner, cbs ...dbr.SelectCb) error { st.mu.Lock() defer st.mu.Unlock() if dbrSess == nil { return errgo.New("dbr.SessionRunner is nil") } errc := make(chan error) defer close(errc) // not sure about those three go go func() { for i := range st.websites { st.websites[i] = nil // I'm not quite sure if that is needed to clear the pointers } st.websites = nil _, err := st.websites.SQLSelect(dbrSess, cbs...) errc <- errgo.Mask(err) }() go func() { for i := range st.groups { st.groups[i] = nil // I'm not quite sure if that is needed to clear the pointers } st.groups = nil _, err := st.groups.SQLSelect(dbrSess, cbs...) errc <- errgo.Mask(err) }() go func() { for i := range st.stores { st.stores[i] = nil // I'm not quite sure if that is needed to clear the pointers } st.stores = nil _, err := st.stores.SQLSelect(dbrSess, cbs...) errc <- errgo.Mask(err) }() for i := 0; i < 3; i++ { if err := <-errc; err != nil { // in case of error clear all st.websites = nil st.groups = nil st.stores = nil return err } } return nil }
func (*errorsSuite) TestDetails(c *gc.C) { c.Assert(errgo.Details(nil), gc.Equals, "[]") otherErr := fmt.Errorf("other") checkErr(c, otherErr, nil, "other", "[{other}]", otherErr) err0 := &embed{errgo.New("foo").(*errgo.Err)} //err TestStack#0 checkErr(c, err0, nil, "foo", "[{$TestStack#0$: foo}]", err0) err1 := &embed{errgo.Notef(err0, "bar").(*errgo.Err)} //err TestStack#1 checkErr(c, err1, err0, "bar: foo", "[{$TestStack#1$: bar} {$TestStack#0$: foo}]", err1) err2 := errgo.Mask(err1) //err TestStack#2 checkErr(c, err2, err1, "bar: foo", "[{$TestStack#2$: } {$TestStack#1$: bar} {$TestStack#0$: foo}]", err2) }
func (i ClusterInstance) GetVaultAddr(log *logging.Logger) (string, error) { const prefix = "VAULT_ADDR=" log.Debugf("Fetching vault-addr on %s", i) env, err := i.runRemoteCommand(log, "sudo cat /etc/pulcy/vault.env", "", false) if err != nil { return "", maskAny(err) } for _, line := range strings.Split(env, "\n") { line = strings.TrimSpace(line) if strings.HasPrefix(line, prefix) { return strings.TrimSpace(line[len(prefix):]), nil } } return "", maskAny(errgo.New("VAULT_ADDR not found in /etc/pulcy/vault.env")) }
func TestLogFatal(t *testing.T) { defer func() { logFatalln = log.Fatalln }() var err error err = errors.New("Test") logFatalln = func(v ...interface{}) { assert.Contains(t, v[0].(string), "Error: Test") } LogFatal(err) err = errgo.New("Test") LogFatal(err) err = nil LogFatal(err) }
func TestDetails(t *testing.T) { if details := errgo.Details(nil); details != "[]" { t.Fatalf("errgo.Details(nil) got %q want %q", details, "[]") } otherErr := fmt.Errorf("other") checkErr(t, otherErr, nil, "other", "[{other}]", otherErr) err0 := &embed{errgo.New("foo").(*errgo.Err)} //err TestStack#0 checkErr(t, err0, nil, "foo", "[{$TestStack#0$: foo}]", err0) err1 := &embed{errgo.Notef(err0, "bar").(*errgo.Err)} //err TestStack#1 checkErr(t, err1, err0, "bar: foo", "[{$TestStack#1$: bar} {$TestStack#0$: foo}]", err1) err2 := errgo.Mask(err1) //err TestStack#2 checkErr(t, err2, err1, "bar: foo", "[{$TestStack#2$: } {$TestStack#1$: bar} {$TestStack#0$: foo}]", err2) }
// Are the no uncommited changes in this repo? func checkRepoClean(log *log.Logger, branch string) error { if st, err := git.Status(log, true); err != nil { return maskAny(err) } else if st != "" { return maskAny(errgo.New("There are uncommited changes")) } if err := git.Fetch(log, "origin"); err != nil { return maskAny(err) } if diff, err := git.Diff(log, branch, path.Join("origin", branch)); err != nil { return maskAny(err) } else if diff != "" { return maskAny(errgo.Newf("%s is not in sync with origin", branch)) } return nil }
func (db *DBFiles) walkPopulateKeys(path string, info os.FileInfo, err error) error { if err != nil { return errgo.Notef(err, "error is not empty") } if info == nil { return errgo.New("directory info is empty") } //Skip git folder if info.IsDir() && info.Name() == ".git" { return filepath.SkipDir } if info.IsDir() { return nil } // Remove basedir from path relpath, err := filepath.Rel(db.BaseDir, path) if err != nil { return errgo.Notef(err, "can not get relative path") } // Get driver extention driverext := filepath.Ext(relpath) // remove driverextention nodriverpath := relpath[0 : len(relpath)-len(driverext)] // Split by path sepperator split := strings.Split(nodriverpath, string(os.PathSeparator)) // Append new key to the db.keys db.keysmux.Lock() db.keys = append(db.keys, split) db.keysmux.Unlock() log.Debug("Path: ", path) log.Debug("driverext: ", driverext) log.Debug("Nodriverpath: ", nodriverpath) log.Debug("Split: ", split) return nil }
func GetDefaultConfig() (*Config, error) { home := os.Getenv("HOME") if home == "" { return nil, Mask(errgo.New("Cannot find HOME")) } file, err := ioutil.ReadFile(filepath.Join(home, configFile)) if err != nil { return nil, Mask(err) } config := &Config{} if err := json.Unmarshal(file, config); err != nil { return nil, Mask(err) } return config, nil }