func (c *Client) BindUnit(instance *ServiceInstance, app bind.App, unit bind.Unit) error { log.Debugf("Calling bind of instance %q and %q unit at %q API", instance.Name, unit.GetIp(), instance.ServiceName) var resp *http.Response params := map[string][]string{ "app-host": {app.GetIp()}, "unit-host": {unit.GetIp()}, } resp, err := c.issueRequest("/resources/"+instance.GetIdentifier()+"/bind", "POST", params) if err != nil { return log.WrapError(errors.Wrapf(err, `Failed to bind the instance "%s/%s" to the unit %q`, instance.ServiceName, instance.Name, unit.GetIp())) } defer resp.Body.Close() switch resp.StatusCode { case http.StatusPreconditionFailed: return ErrInstanceNotReady case http.StatusNotFound: return ErrInstanceNotFoundInAPI } if resp.StatusCode > 299 { err = errors.Wrapf(c.buildErrorMessage(err, resp), `Failed to bind the instance "%s/%s" to the unit %q`, instance.ServiceName, instance.Name, unit.GetIp()) return log.WrapError(err) } return nil }
func (hsm *HTTPStateManager) modify(mp *ManifestPair) error { bf := mp.Prior af := mp.Post u, etag, err := hsm.getManifestEtag(bf) if err != nil { return errors.Wrapf(err, "modify request") } // XXX I don't think the URL should be *able* to be different here. u, err = hsm.manifestURL(af) if err != nil { return err } json := hsm.manifestJSON(af) Log.Debug.Printf("Updating manifest at %q", u) Log.Debug.Printf("Updating manifest to %s", json) prq, err := http.NewRequest("PUT", u, json) if err != nil { return errors.Wrapf(err, "modify request") } prq.Header.Add("If-Match", etag) prz, err := hsm.httpRequest(prq) if err != nil { return errors.Wrapf(err, "modify request") } defer prz.Body.Close() if !(prz.StatusCode >= 200 && prz.StatusCode < 300) { return errors.Errorf("Update failed: %s / %#v", prz.Status, af) } return nil }
// Setup sets up a mount point by either mounting the volume if it is // configured, or creating the source directory if supplied. func (m *MountPoint) Setup(mountLabel string, rootUID, rootGID int) (string, error) { if m.Volume != nil { if m.ID == "" { m.ID = stringid.GenerateNonCryptoID() } path, err := m.Volume.Mount(m.ID) return path, errors.Wrapf(err, "error while mounting volume '%s'", m.Source) } if len(m.Source) == 0 { return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") } // system.MkdirAll() produces an error if m.Source exists and is a file (not a directory), if m.Type == mounttypes.TypeBind { // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it if err := idtools.MkdirAllNewAs(m.Source, 0755, rootUID, rootGID); err != nil { if perr, ok := err.(*os.PathError); ok { if perr.Err != syscall.ENOTDIR { return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) } } } } if label.RelabelNeeded(m.Mode) { if err := label.Relabel(m.Source, mountLabel, label.IsShared(m.Mode)); err != nil { return "", errors.Wrapf(err, "error setting label on mount source '%s'", m.Source) } } return m.Source, nil }
// SaveKey writes the whole entity (including private key!) to a .gpg file. func (obj *PGP) SaveKey(path string) error { f, err := os.Create(path) if err != nil { return errwrap.Wrapf(err, "can't create file from given path") } w := bufio.NewWriter(f) if err != nil { return errwrap.Wrapf(err, "can't create writer") } if err := obj.Entity.SerializePrivate(w, &CONFIG); err != nil { return errwrap.Wrapf(err, "can't serialize private key") } for _, ident := range obj.Entity.Identities { for _, sig := range ident.Signatures { if err := sig.Serialize(w); err != nil { return errwrap.Wrapf(err, "can't serialize signature") } } } if err := w.Flush(); err != nil { return errwrap.Wrapf(err, "enable to flush writer") } return nil }
// StartHost starts a host VM. func StartHost(api libmachine.API, config MachineConfig) (*host.Host, error) { exists, err := api.Exists(constants.MachineName) if err != nil { return nil, errors.Wrapf(err, "Error checking if host exists: %s", constants.MachineName) } if !exists { return createHost(api, config) } glog.Infoln("Machine exists!") h, err := api.Load(constants.MachineName) if err != nil { return nil, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.") } s, err := h.Driver.GetState() glog.Infoln("Machine state: ", s) if err != nil { return nil, errors.Wrap(err, "Error getting state for host") } if s != state.Running { if err := h.Driver.Start(); err != nil { return nil, errors.Wrapf(err, "Error starting stopped host") } if err := api.Save(h); err != nil { return nil, errors.Wrapf(err, "Error saving started host") } } if err := h.ConfigureAuth(); err != nil { return nil, errors.Wrap(err, "Error configuring auth on host: %s") } return h, nil }
// Insert implements Inserter for HTTPNameInserter func (hni *HTTPNameInserter) Insert(sid SourceID, in, etag string, qs []Quality) error { url, err := hni.serverURL.Parse("./artifact") if err != nil { return errors.Wrapf(err, "http insert name: %s for %v", in, sid) } url.RawQuery = sid.QueryValues().Encode() art := &BuildArtifact{Name: in, Type: "docker", Qualities: qs} buf := &bytes.Buffer{} enc := json.NewEncoder(buf) err = enc.Encode(art) if err != nil { return errors.Wrapf(err, "http insert name %s, encoding %v", in, art) } req, err := http.NewRequest("PUT", url.String(), buf) if err != nil { return errors.Wrapf(err, "http insert name %s, building request for %s/%v", in, url, art) } rz, err := hni.Client.Do(req) if err != nil { return errors.Wrapf(err, "http insert name %s, sending %v", in, req) } if rz.StatusCode >= 200 && rz.StatusCode < 300 { return nil } return errors.Errorf("Received %s when attempting %v", rz.Status, req) }
// New creates a new StructNode. func (StructNode) New(base NodeBase, c *Codec) (Node, error) { // Children need a pointer to this node, so create it first. n := &StructNode{ FileNode: FileNode{ NodeBase: base, }, Fields: map[string]reflect.Type{}, Children: map[string]*Node{}, } for i := 0; i < n.Type.NumField(); i++ { field, err := NewFieldInfo(n.Type.Field(i)) //tag, Name: field.Name) if err != nil { return nil, errors.Wrapf(err, "reading field %s.%s", n.Type, n.Type.Field(i).Name) } if field.Tag.None { n.Fields[field.Name] = field.Type continue } if field.Tag.Ignore { continue } childNodeID, err := NewNodeID(n.Type, field.Type, field.Name) if err != nil { return nil, errors.Wrapf(err, "getting ID for %T.%s", n.Type, field.Name) } child, err := c.NewNode(n, childNodeID, field) if err != nil { return nil, errors.Wrapf(err, "analysing %T.%s", n.Type, field.Name) } if child != nil { n.Children[field.Name] = child } } return n, nil }
// readSystemProcessorPerformanceInformationBuffer reads from a buffer // containing SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION data. The buffer should // contain one entry for each CPU. // https://msdn.microsoft.com/en-us/library/windows/desktop/ms724509(v=vs.85).aspx func readSystemProcessorPerformanceInformationBuffer(b []byte) ([]SystemProcessorPerformanceInformation, error) { n := len(b) / sizeofSystemProcessorPerformanceInformation r := bytes.NewReader(b) rtn := make([]SystemProcessorPerformanceInformation, 0, n) for i := 0; i < n; i++ { _, err := r.Seek(int64(i*sizeofSystemProcessorPerformanceInformation), io.SeekStart) if err != nil { return nil, errors.Wrapf(err, "failed to seek to cpuN=%v in buffer", i) } times := make([]uint64, 3) for j := range times { err := binary.Read(r, binary.LittleEndian, ×[j]) if err != nil { return nil, errors.Wrapf(err, "failed reading cpu times for cpuN=%v", i) } } idleTime := time.Duration(times[0] * 100) kernelTime := time.Duration(times[1] * 100) userTime := time.Duration(times[2] * 100) rtn = append(rtn, SystemProcessorPerformanceInformation{ IdleTime: idleTime, KernelTime: kernelTime - idleTime, // Subtract out idle time from kernel time. UserTime: userTime, }) } return rtn, nil }
// Returns a byte stream which is a download of the given link. func (u *UpdateClient) FetchUpdate(api ApiRequester, url string) (io.ReadCloser, int64, error) { req, err := makeUpdateFetchRequest(url) if err != nil { return nil, -1, errors.Wrapf(err, "failed to create update fetch request") } r, err := api.Do(req) if err != nil { log.Error("Can not fetch update image: ", err) return nil, -1, errors.Wrapf(err, "update fetch request failed") } log.Debugf("Received fetch update response %v+", r) if r.StatusCode != http.StatusOK { r.Body.Close() log.Errorf("Error fetching shcheduled update info: code (%d)", r.StatusCode) return nil, -1, errors.New("Error receiving scheduled update information.") } if r.ContentLength < 0 { r.Body.Close() return nil, -1, errors.New("Will not continue with unknown image size.") } else if r.ContentLength < u.minImageSize { r.Body.Close() log.Errorf("Image smaller than expected. Expected: %d, received: %d", u.minImageSize, r.ContentLength) return nil, -1, errors.New("Image size is smaller than expected. Aborting.") } return r.Body, r.ContentLength, nil }
// GroomDatabase ensures that the database to back the cache is the correct schema func (nc *NameCache) GroomDatabase() error { db := nc.DB var tgp string err := db.QueryRow("select value from _database_metadata_ where name = 'fingerprint';").Scan(&tgp) if err != nil || tgp != schemaFingerprint { //log.Println(err, tgp, schemaFingerprint) err = nil repos := captureRepos(db) clobber(db) for _, cmd := range schema { if err := sqlExec(db, cmd); err != nil { return errors.Wrapf(err, "groom DB/create: %v", db) } } if _, err := db.Exec("insert into _database_metadata_ (name, value) values"+ " ('fingerprint', ?),"+ " ('created', ?);", schemaFingerprint, time.Now().UTC().Format(time.UnixDate)); err != nil { return errors.Wrapf(err, "groom DB/fp: %v", db) } for _, r := range repos { if err := nc.Warmup(r); err != nil { return errors.Wrap(err, "groom DB") } } } return errors.Wrap(err, "groom DB") }
func (nc *NameCache) ensureInDB(sel, ins string, args ...interface{}) (id int64, err error) { selN := len(sqlBindingRE.FindAllString(sel, -1)) insN := len(sqlBindingRE.FindAllString(ins, -1)) if selN > len(args) { return 0, errors.Errorf("only %d args when %d needed for %q", len(args), selN, sel) } if insN > len(args) { return 0, errors.Errorf("only %d args when %d needed for %q", len(args), insN, ins) } row := nc.DB.QueryRow(sel, args[0:selN]...) err = row.Scan(&id) if err == nil { Log.Vomit.Printf("Found id: %d with %q %v", id, sel, args) return } if errors.Cause(err) != sql.ErrNoRows { return 0, errors.Wrapf(err, "getting id with %q %v", sel, args[0:selN]) } nr, err := nc.DB.Exec(ins, args[0:insN]...) if err != nil { return 0, errors.Wrapf(err, "inserting new value: %q %v", ins, args[0:insN]) } id, err = nr.LastInsertId() Log.Vomit.Printf("Made (?err: %v) id: %d with %q", err, id, ins) return id, errors.Wrapf(err, "getting id of new value: %q %v", ins, args[0:insN]) }
func evalSetDiff(t *Topdown, expr *ast.Expr, iter Iterator) (err error) { ops := expr.Terms.([]*ast.Term) op1, err := ResolveRefs(ops[1].Value, t) if err != nil { return errors.Wrapf(err, "set_diff") } s1, ok := op1.(*ast.Set) if !ok { return &Error{ Code: TypeErr, Message: fmt.Sprintf("set_diff: first input argument must be set not %T", ops[1].Value), } } op2, err := ResolveRefs(ops[2].Value, t) if err != nil { return errors.Wrapf(err, "set_diff") } s2, ok := op2.(*ast.Set) if !ok { return &Error{ Code: TypeErr, Message: fmt.Sprintf("set_diff: second input argument must be set not %T", ops[2].Value), } } s3 := s1.Diff(s2) undo, err := evalEqUnify(t, s3, ops[3].Value, nil, iter) t.Unbind(undo) return err }
// list traverses the directory passed in, listing to out. // it returns a boolean whether it is finished or not. func (f *Fs) list(out fs.ListOpts, remote string, dirpath string, level int) (subdirs []listArgs) { fd, err := os.Open(dirpath) if err != nil { out.SetError(errors.Wrapf(err, "failed to open directory %q", dirpath)) return nil } defer func() { err := fd.Close() if err != nil { out.SetError(errors.Wrapf(err, "failed to close directory %q:", dirpath)) } }() for { fis, err := fd.Readdir(1024) if err == io.EOF && len(fis) == 0 { break } if err != nil { out.SetError(errors.Wrapf(err, "failed to read directory %q", dirpath)) return nil } for _, fi := range fis { name := fi.Name() newRemote := path.Join(remote, name) newPath := filepath.Join(dirpath, name) if fi.IsDir() { // Ignore directories which are symlinks. These are junction points under windows which // are kind of a souped up symlink. Unix doesn't have directories which are symlinks. if (fi.Mode()&os.ModeSymlink) == 0 && out.IncludeDirectory(newRemote) { dir := &fs.Dir{ Name: f.cleanRemote(newRemote), When: fi.ModTime(), Bytes: 0, Count: 0, } if out.AddDir(dir) { return nil } if level > 0 && f.dev == readDevice(fi) { subdirs = append(subdirs, listArgs{remote: newRemote, dirpath: newPath, level: level - 1}) } } } else { fso, err := f.newObjectWithInfo(newRemote, fi) if err != nil { out.SetError(err) return nil } if fso.Storable() && out.Add(fso) { return nil } } } } return subdirs }
// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths. // ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin // this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without // modification of its contents. func resolveLocalPaths(config *clientcmdConfig) error { for _, cluster := range config.Clusters { if len(cluster.LocationOfOrigin) == 0 { continue } base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) if err != nil { return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin) } if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { return err } } for _, authInfo := range config.AuthInfos { if len(authInfo.LocationOfOrigin) == 0 { continue } base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) if err != nil { return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin) } if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { return err } } return nil }
func (c *httpClient) decodeResponse(resp *http.Response, response interface{}, successfulCodes []int) error { dec := json.NewDecoder(resp.Body) successful := false for _, code := range successfulCodes { if code == resp.StatusCode { successful = true break } } // Unsuccessful response code, decode status result if !successful { var status Status err := dec.Decode(&status) if err != nil { return errors.Wrapf(err, "failed to understand k8s server response: Code: %d", resp.StatusCode) } return status } if response != nil { // Decode response body into provided response object if err := dec.Decode(response); err != nil { return errors.Wrapf(err, "failed to decode k8s server response into %T: Code: %d", response, resp.StatusCode) } } return nil }
// NewManager returns a new plugin manager. func NewManager(config ManagerConfig) (*Manager, error) { if config.RegistryService != nil { config.RegistryService = pluginRegistryService{config.RegistryService} } manager := &Manager{ config: config, } if err := os.MkdirAll(manager.config.Root, 0700); err != nil { return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.Root) } if err := os.MkdirAll(manager.config.ExecRoot, 0700); err != nil { return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.ExecRoot) } if err := os.MkdirAll(manager.tmpDir(), 0700); err != nil { return nil, errors.Wrapf(err, "failed to mkdir %v", manager.tmpDir()) } var err error manager.containerdClient, err = config.Executor.Client(manager) // todo: move to another struct if err != nil { return nil, errors.Wrap(err, "failed to create containerd client") } manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs")) if err != nil { return nil, err } manager.cMap = make(map[*v2.Plugin]*controller) if err := manager.reload(); err != nil { return nil, errors.Wrap(err, "failed to restore plugins") } return manager, nil }
func (s *SAMLAuthScheme) Parse(xml string) (*saml.Response, error) { if xml == "" { return nil, ErrMissingFormValueError } var response *saml.Response var err error if !s.BaseConfig.DeflatEncodedResponse { response, err = saml.ParseEncodedResponse(xml) } else { response, err = saml.ParseCompressedEncodedResponse(xml) } if err != nil || response == nil { return nil, errors.Wrapf(err, "unable to parse identity provider data: %s", xml) } sp, err := s.createSP() if err != nil { return nil, errors.Wrap(err, "unable to create service provider object") } if response.IsEncrypted() { if err = response.Decrypt(sp.PrivateKeyPath); err != nil { respData, _ := response.String() return nil, errors.Wrapf(err, "unable to decrypt identity provider data: %s", respData) } } resp, _ := response.String() log.Debugf("Data received from identity provider decoded: %s", resp) return response, nil }
func (u *AuthClient) Request(api ApiRequester, server string, dataSrc AuthDataMessenger) ([]byte, error) { req, err := makeAuthRequest(server, dataSrc) if err != nil { return nil, errors.Wrapf(err, "failed to build authorization request") } log.Debugf("making authorization request to server %s with req: %s", server, req) rsp, err := api.Do(req) if err != nil { return nil, errors.Wrapf(err, "failed to execute authorization request") } defer rsp.Body.Close() log.Debugf("got response: %v", rsp) switch rsp.StatusCode { case http.StatusUnauthorized: return nil, AuthErrorUnauthorized case http.StatusOK: log.Debugf("receive response data") data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, errors.Wrapf(err, "failed to receive authorization response data") } log.Debugf("received response data %v", data) return data, nil default: return nil, errors.Errorf("unexpected authorization status %v", rsp.StatusCode) } }
func newHttpsClient(conf Config) (*http.Client, error) { client := newHttpClient() trustedcerts, err := loadServerTrust(conf) if err != nil { return nil, errors.Wrapf(err, "cannot initialize server trust") } clientcerts, err := loadClientCert(conf) if err != nil { return nil, errors.Wrapf(err, "can not load client certificate") } if conf.NoVerify { log.Warnf("certificate verification skipped..") } tlsc := tls.Config{ RootCAs: trustedcerts, InsecureSkipVerify: conf.NoVerify, } transport := http.Transport{ TLSClientConfig: &tlsc, } if clientcerts != nil { transport.TLSClientConfig.Certificates = []tls.Certificate{*clientcerts} } client.Transport = &transport return client, nil }
func (av *Writer) write(updates []parser.UpdateData) error { av.availableUpdates = updates // write temporary header (we need to know the size before storing in tar) if err := av.WriteHeader(); err != nil { return err } // archive info info := av.getInfo() ia := archiver.NewMetadataArchiver(&info, "info") if err := ia.Archive(av.aArchiver); err != nil { return errors.Wrapf(err, "writer: error archiving info") } // archive header ha := archiver.NewFileArchiver(av.hTmpFile.Name(), "header.tar.gz") if err := ha.Archive(av.aArchiver); err != nil { return errors.Wrapf(err, "writer: error archiving header") } // archive data if err := av.WriteData(); err != nil { return err } // we've been storing everything in temporary file if err := av.aArchiver.Close(); err != nil { return errors.New("writer: error closing archive") } // prevent from closing archiver twice av.aArchiver = nil if err := av.aTmpFile.Close(); err != nil { return errors.New("writer: error closing archive temporary file") } return os.Rename(av.aTmpFile.Name(), av.aName) }
func updateHostnameProperty(object dbus.BusObject, expectedValue, property, setterName string, apply bool) (checkOK bool, err error) { propertyObject, err := object.GetProperty("org.freedesktop.hostname1." + property) if err != nil { return false, errwrap.Wrapf(err, "failed to get org.freedesktop.hostname1.%s", property) } if propertyObject.Value() == nil { return false, errwrap.Errorf("Unexpected nil value received when reading property %s", property) } propertyValue, ok := propertyObject.Value().(string) if !ok { return false, fmt.Errorf("Received unexpected type as %s value, expected string got '%T'", property, propertyValue) } // expected value and actual value match => checkOk if propertyValue == expectedValue { return true, nil } // nothing to do anymore if !apply { return false, nil } // attempting to apply the changes log.Printf("Changing %s: %s => %s", property, propertyValue, expectedValue) if err := object.Call("org.freedesktop.hostname1."+setterName, 0, expectedValue, false).Err; err != nil { return false, errwrap.Wrapf(err, "failed to call org.freedesktop.hostname1.%s", setterName) } // all good changes should now be applied again return false, nil }
// Obtain next sequence number. In case of errors (read, write, parse etc.) // returned value is 0 and error is returned. func (fs *FileSeqnum) Get() (uint64, error) { d, err := fs.store.ReadAll(fs.name) if err != nil && !os.IsNotExist(err) { return 0, errors.Wrapf(err, "seqnum data read failed") } newval := SeqnumStartVal if !os.IsNotExist(err) { v, err := strconv.ParseUint(string(d), 10, 64) if err != nil { return 0, errors.Wrapf(err, "seqnum data parse failed") } // check for overflow if math.MaxUint64 == v { newval = SeqnumStartVal } else { newval = v + 1 } } err = fs.store.WriteAll(fs.name, []byte(strconv.FormatUint(newval, 10))) if err != nil { return 0, errors.Wrapf(err, "seqnum data write failed") } return newval, nil }
// WriteConfig encodes the configuration and writes it to the given file. // If the file exists, it's contents will be overwritten. func WriteConfig(config *api.Config, filename string) error { if config == nil { glog.Errorf("could not write to '%s': config can't be nil", filename) } // encode config to YAML data, err := runtime.Encode(latest.Codec, config) if err != nil { return errors.Errorf("could not write to '%s': failed to encode config: %v", filename, err) } // create parent dir if doesn't exist dir := filepath.Dir(filename) if _, err := os.Stat(dir); os.IsNotExist(err) { if err = os.MkdirAll(dir, 0755); err != nil { return errors.Wrapf(err, "Error creating directory: %s", dir) } } // write with restricted permissions if err := ioutil.WriteFile(filename, data, 0600); err != nil { return errors.Wrapf(err, "Error writing file %s", filename) } return nil }
// walkMap walks the data MapStr to arrive at the value specified by the key. // The key is expressed in dot-notation (eg. x.y.z). When the key is found then // the given mapStrOperation is invoked. func walkMap(key string, data MapStr, op mapStrOperation) (interface{}, error) { var err error keyParts := strings.Split(key, ".") // Walk maps until reaching a leaf object. m := data for i, k := range keyParts[0 : len(keyParts)-1] { v, exists := m[k] if !exists { if op.CreateMissingKeys { newMap := MapStr{} m[k] = newMap m = newMap continue } return nil, errors.Wrapf(ErrKeyNotFound, "key=%v", strings.Join(keyParts[0:i+1], ".")) } m, err = toMapStr(v) if err != nil { return nil, errors.Wrapf(err, "key=%v", strings.Join(keyParts[0:i+1], ".")) } } // Execute the mapStrOperator on the leaf object. v, err := op.Do(keyParts[len(keyParts)-1], m) if err != nil { return nil, errors.Wrapf(err, "key=%v", key) } return v, nil }
// Init runs some startup code for this resource. func (obj *PkgRes) Init() error { obj.BaseRes.kind = "Pkg" if err := obj.BaseRes.Init(); err != nil { // call base init, b/c we're overriding return err } bus := packagekit.NewBus() if bus == nil { return fmt.Errorf("Can't connect to PackageKit bus.") } defer bus.Close() result, err := obj.pkgMappingHelper(bus) if err != nil { return errwrap.Wrapf(err, "The pkgMappingHelper failed") } data, ok := result[obj.Name] // lookup single package (init does just one) // package doesn't exist, this is an error! if !ok || !data.Found { return fmt.Errorf("Can't find package named '%s'.", obj.Name) } packageIDs := []string{data.PackageID} // just one for now filesMap, err := bus.GetFilesByPackageID(packageIDs) if err != nil { return errwrap.Wrapf(err, "Can't run GetFilesByPackageID") } if files, ok := filesMap[data.PackageID]; ok { obj.fileList = util.DirifyFileList(files, false) } return nil }
func getHandle(fn string) (*handle, error) { f, err := os.OpenFile(fn, O_PATH, 0) if err != nil { return nil, errors.Wrapf(err, "failed to open %v with O_PATH", fn) } var stat syscall.Stat_t if err := syscall.Fstat(int(f.Fd()), &stat); err != nil { f.Close() return nil, errors.Wrapf(err, "failed to stat handle %v", f.Fd()) } h := &handle{ f: f, name: fn, dev: stat.Dev, ino: stat.Ino, } // check /proc just in case if _, err := os.Stat(h.procPath()); err != nil { f.Close() return nil, errors.Wrapf(err, "couldn't stat %v", h.procPath()) } return h, nil }
func (n *Node) handleAddressChange(ctx context.Context, member *membership.Member, reconnectAddr string) error { newConn, err := n.ConnectToMember(reconnectAddr, 0) if err != nil { return errors.Wrapf(err, "could connect to member ID %x at observed address %s", member.RaftID, reconnectAddr) } healthCtx, cancelHealth := context.WithTimeout(ctx, time.Duration(n.Config.ElectionTick)*n.opts.TickInterval) defer cancelHealth() if err := newConn.HealthCheck(healthCtx); err != nil { return errors.Wrapf(err, "%x failed health check at observed address %s", member.RaftID, reconnectAddr) } if err := n.cluster.ReplaceMemberConnection(member.RaftID, member, newConn, reconnectAddr, false); err != nil { newConn.Conn.Close() return errors.Wrap(err, "failed to replace connection to raft member") } // If we're the leader, write the address change to raft updateCtx, cancelUpdate := context.WithTimeout(ctx, time.Duration(n.Config.ElectionTick)*n.opts.TickInterval) defer cancelUpdate() if err := n.updateMember(updateCtx, reconnectAddr, member.RaftID, member.NodeID); err != nil { return errors.Wrap(err, "failed to update member address in raft") } return nil }
// QueueStmtForAbortion registers a statement whose transaction will be aborted. // // stmt needs to be the statement, literally as the parser will convert it back // to a string. // abortCount specifies how many times a txn running this statement will be // aborted. // willBeRetriedIbid should be set if the statement will be retried by the test // (as an identical statement). This allows the TxnAborter to assert, on // Close(), that the statement has been retried the intended number of times by // the end of the test (besides asserting that an error was injected the right // number of times. So, the Aborter can be used to check that the retry // machinery has done its job. The Aborter will consider the statement to have // been retried correctly if the statement has been executed at least once after // the Aborter is done injecting errors because of it. So normally we'd expect // this statement to executed RestartCount + 1 times, but we allow it to be // retried more times because the statement's txn might also retried because of // other statements. // // Calling QueueStmtForAbortion repeatedly with the same stmt is allowed, and // each call checks that the previous one was satisfied. func (ta *TxnAborter) QueueStmtForAbortion( stmt string, abortCount int, willBeRetriedIbid bool, ) error { ta.mu.Lock() defer ta.mu.Unlock() if ri, ok := ta.mu.stmtsToAbort[stmt]; ok { // If we're overwriting a statement that was already queued, verify it // first. if err := ri.Verify(); err != nil { return errors.Wrapf(err, `statement "%s" error`, stmt) } } // Extract the "key" - the value of the first col, which will be trampled on. switch matches := valuesRE.FindStringSubmatch(stmt); len(matches) { case 0, 1: return errors.Errorf(`bad statement "%s": key col not found`, stmt) default: key, err := strconv.Atoi(matches[1]) if err != nil { return errors.Wrapf(err, `bad statement "%s"`, stmt) } ta.mu.stmtsToAbort[stmt] = &restartInfo{ key: key, abortCount: abortCount, satisfied: false, checkSatisfied: willBeRetriedIbid, } return nil } }
// ReadTargets reads targets into map entries. func (n *MapNode) ReadTargets(c ReadContext, val Val) error { list := c.List() for _, keyStr := range list { keyVal := reflect.New(n.KeyType).Elem() if err := n.UnmarshalKey(keyStr, keyVal); err != nil { return errors.Wrapf(err, "unmarshaling key") } elem := *n.ElemNode elemContext := c.Push(keyStr) elemVal := elem.NewKeyedVal(keyVal) err := elem.Read(elemContext, elemVal) // Set key field. if n.Field != nil && n.Field.KeyField != "" { n.Field.SetKeyFunc.Call([]reflect.Value{elemVal.Ptr, elemVal.Key}) } if err != nil { return errors.Wrapf(err, "reading child %s", keyStr) } // TODO: Don't calculate these values every time. if reflect.DeepEqual(elemVal.Ptr.Elem().Interface(), reflect.New(elemVal.Ptr.Type().Elem()).Elem().Interface()) { nv := reflect.New(elemVal.Ptr.Type()).Elem() val.Ptr.Elem().SetMapIndex(elemVal.Key, nv) } else { val.Ptr.Elem().SetMapIndex(elemVal.Key, elemVal.Final()) } } return nil }
func (f *FileArchiver) Archive(tw *tar.Writer) error { info, err := os.Stat(f.path) if err != nil { return err } hdr, err := tar.FileInfoHeader(info, "") if err != nil { return errors.Wrapf(err, "arch: invalid file info header") } fd, err := os.Open(f.path) if err != nil { return errors.Wrapf(err, "arch: can not open file") } defer fd.Close() hdr.Name = f.archivePath if err = tw.WriteHeader(hdr); err != nil { return errors.Wrapf(err, "arch: error writing header") } _, err = io.Copy(tw, fd) if err != nil { return errors.Wrapf(err, "arch: error writing archive data") } return nil }