// IssueAndSaveNewCertificates generates a new key-pair, signs it with the local root-ca, and returns a // tls certificate func (rca *RootCA) IssueAndSaveNewCertificates(paths CertPaths, cn, ou, org string) (*tls.Certificate, error) { csr, key, err := GenerateAndWriteNewKey(paths) if err != nil { return nil, errors.Wrap(err, "error when generating new node certs") } if !rca.CanSign() { return nil, ErrNoValidSigner } // Obtain a signed Certificate certChain, err := rca.ParseValidateAndSignCSR(csr, cn, ou, org) if err != nil { return nil, errors.Wrap(err, "failed to sign node certificate") } // Ensure directory exists err = os.MkdirAll(filepath.Dir(paths.Cert), 0755) if err != nil { return nil, err } // Write the chain to disk if err := ioutils.AtomicWriteFile(paths.Cert, certChain, 0644); err != nil { return nil, err } // Create a valid TLSKeyPair out of the PEM encoded private key and certificate tlsKeyPair, err := tls.X509KeyPair(certChain, key) if err != nil { return nil, err } return &tlsKeyPair, nil }
func (n *Node) handleAddressChange(ctx context.Context, member *membership.Member, reconnectAddr string) error { newConn, err := n.ConnectToMember(reconnectAddr, 0) if err != nil { return errors.Wrapf(err, "could connect to member ID %x at observed address %s", member.RaftID, reconnectAddr) } healthCtx, cancelHealth := context.WithTimeout(ctx, time.Duration(n.Config.ElectionTick)*n.opts.TickInterval) defer cancelHealth() if err := newConn.HealthCheck(healthCtx); err != nil { return errors.Wrapf(err, "%x failed health check at observed address %s", member.RaftID, reconnectAddr) } if err := n.cluster.ReplaceMemberConnection(member.RaftID, member, newConn, reconnectAddr, false); err != nil { newConn.Conn.Close() return errors.Wrap(err, "failed to replace connection to raft member") } // If we're the leader, write the address change to raft updateCtx, cancelUpdate := context.WithTimeout(ctx, time.Duration(n.Config.ElectionTick)*n.opts.TickInterval) defer cancelUpdate() if err := n.updateMember(updateCtx, reconnectAddr, member.RaftID, member.NodeID); err != nil { return errors.Wrap(err, "failed to update member address in raft") } return nil }
func (s *Site) editSubmitPlayerViewHandler(w http.ResponseWriter, req *http.Request) { id := mux.Vars(req)["id"] pid := strToUint(id) log.Printf("edit player ID: %s", id) defer func() { if r := recover(); r != nil { s.commonEditPlayerViewHandler(w, req, fmt.Sprintf("%v", r)) } }() if err := req.ParseForm(); err != nil { panic(errors.Wrap(err, "error processing form data")) } aliases := req.Form["aliases"] // expecting name to be 1 element list, aliases a list with // many aliases if len(aliases) == 0 { panic(errors.New("no aliases selected")) } log.Printf("claimed aliases: %s", aliases) err := s.p.UpdatePlayerAliases(pid, aliases) if err != nil { panic(errors.Wrap(err, "error updating player")) } // redirect to player view http.Redirect(w, req, s.playerViewURL(pid), http.StatusFound) }
func migrateSnapshots(legacySnapDir, snapDir string) error { // use temporary snaphot directory so initialization appears atomic tmpdirpath := filepath.Clean(snapDir) + ".tmp" if fileutil.Exist(tmpdirpath) { if err := os.RemoveAll(tmpdirpath); err != nil { return errors.Wrap(err, "could not remove temporary snapshot directory") } } if err := fileutil.CreateDirAll(tmpdirpath); err != nil { return errors.Wrap(err, "could not create temporary snapshot directory") } snapshotNames, err := fileutil.ReadDir(legacySnapDir) if err != nil { return errors.Wrapf(err, "could not list snapshot directory %s", legacySnapDir) } for _, fname := range snapshotNames { err := os.Link(filepath.Join(legacySnapDir, fname), filepath.Join(tmpdirpath, fname)) if err != nil { return errors.Wrap(err, "error linking snapshot file") } } if err := os.Rename(tmpdirpath, snapDir); err != nil { return err } return nil }
func getDirtyExitFailedJobsFromRedis(redisClient *redis.Client) ([]redisWorker, error) { failedJobsLength, err := redisClient.LLen(resqueFailedQueueKey).Result() if err != nil { return []redisWorker{}, errors.Wrap(err, "Failed to get failed resque jobs length") } failedJobs, err := redisClient.LRange(resqueFailedQueueKey, 0, failedJobsLength).Result() if err != nil { return []redisWorker{}, errors.Wrap(err, "Failed to get failed resque jobs") } var failedResqueJobs []redisWorker for _, failedJob := range failedJobs { var job failedResqueJob if err := json.Unmarshal([]byte(failedJob), &job); err != nil { continue } if job.Exception == "Resque::DirtyExit" { failedResqueJobs = append(failedResqueJobs, redisWorker{failedJob, failedJob}) } } return failedResqueJobs, nil }
func UpdateCluster(h sshAble, d drivers.Driver, config KubernetesConfig) error { client, err := sshutil.NewSSHClient(d) if err != nil { return errors.Wrap(err, "Error creating new ssh client") } // transfer localkube from cache/asset to vm if localkubeURIWasSpecified(config) { lCacher := localkubeCacher{config} if err = lCacher.updateLocalkubeFromURI(client); err != nil { return errors.Wrap(err, "Error updating localkube from uri") } } else { if err = updateLocalkubeFromAsset(client); err != nil { return errors.Wrap(err, "Error updating localkube from asset") } } fileAssets := []CopyableFile{} addMinikubeAddonsDirToAssets(&fileAssets) // merge files to copy var copyableFiles []CopyableFile copyableFiles = append(copyableFiles, memoryAssets...) copyableFiles = append(copyableFiles, fileAssets...) // transfer files to vm for _, copyableFile := range copyableFiles { if err := sshutil.Transfer(copyableFile, copyableFile.GetLength(), copyableFile.GetTargetDir(), copyableFile.GetTargetName(), copyableFile.GetPermissions(), client); err != nil { return err } } return nil }
// StartHost starts a host VM. func StartHost(api libmachine.API, config MachineConfig) (*host.Host, error) { exists, err := api.Exists(constants.MachineName) if err != nil { return nil, errors.Wrapf(err, "Error checking if host exists: %s", constants.MachineName) } if !exists { return createHost(api, config) } glog.Infoln("Machine exists!") h, err := api.Load(constants.MachineName) if err != nil { return nil, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.") } s, err := h.Driver.GetState() glog.Infoln("Machine state: ", s) if err != nil { return nil, errors.Wrap(err, "Error getting state for host") } if s != state.Running { if err := h.Driver.Start(); err != nil { return nil, errors.Wrapf(err, "Error starting stopped host") } if err := api.Save(h); err != nil { return nil, errors.Wrapf(err, "Error saving started host") } } if err := h.ConfigureAuth(); err != nil { return nil, errors.Wrap(err, "Error configuring auth on host: %s") } return h, nil }
func requestToken(schemeData map[string]string) (string, error) { maxRetries := samlRequestTimeout(schemeData) - 7 time.Sleep(5 * time.Second) id := samlRequestId(schemeData) v := url.Values{} v.Set("request_id", id) for count := 0; count <= maxRetries; count += 2 { u, err := GetURL("/auth/login") if err != nil { return "", errors.Wrap(err, "Error in GetURL") } resp, err := tsuruNet.Dial5Full300Client.Post(u, "application/x-www-form-urlencoded", strings.NewReader(v.Encode())) if err != nil { return "", errors.Wrap(err, "Error during login post") } defer resp.Body.Close() result, err := ioutil.ReadAll(resp.Body) if err != nil { return "", errors.Wrap(err, "Error reading body") } if strings.TrimSpace(string(result)) == saml.ErrRequestWaitingForCredentials.Message { if count < maxRetries { time.Sleep(2 * time.Second) } continue } data := make(map[string]interface{}) if err = json.Unmarshal(result, &data); err != nil { return "", errors.Errorf("API response: %s", result) } return data["token"].(string), nil } // finish when timeout return "", saml.ErrRequestWaitingForCredentials }
func (c *Card) fetchArbitraryCard() error { u, err := CurrentUser() if err != nil { return errors.Wrap(err, "No user logged in") } if c.db == nil { db, err := u.DB() if err != nil { return errors.Wrap(err, "Error connecting to User DB") } c.db = db } doc := make(map[string][]*fb.Card) query := map[string]interface{}{ "selector": map[string]string{"type": "card"}, "limit": 1, } if err := c.db.Find(query, &doc); err != nil { return err } if len(doc["docs"]) == 0 { return errors.New("No cards available") } c.Card = doc["docs"][0] return nil }
func (s *DatabaseStore) GetJob(inst flux.InstanceID, id JobID) (Job, error) { var ( queue string method string paramsBytes []byte scheduledAt time.Time priority int key string submittedAt time.Time claimedAt nullTime heartbeatAt nullTime finishedAt nullTime logStr string status string done sql.NullBool success sql.NullBool ) if err := s.conn.QueryRow(` SELECT queue, method, params, scheduled_at, priority, key, submitted_at, claimed_at, heartbeat_at, finished_at, log, status, done, success FROM jobs WHERE id = $1 AND instance_id = $2 `, string(id), string(inst)).Scan( &queue, &method, ¶msBytes, &scheduledAt, &priority, &key, &submittedAt, &claimedAt, &heartbeatAt, &finishedAt, &logStr, &status, &done, &success, ); err == sql.ErrNoRows { return Job{}, ErrNoSuchJob } else if err != nil { return Job{}, errors.Wrap(err, "error getting job") } params, err := s.scanParams(method, paramsBytes) if err != nil { return Job{}, errors.Wrap(err, "unmarshaling params") } var log []string if err := json.NewDecoder(strings.NewReader(logStr)).Decode(&log); err != nil { return Job{}, errors.Wrap(err, "unmarshaling log") } return Job{ Instance: inst, ID: id, Queue: queue, Method: method, Params: params, ScheduledAt: scheduledAt, Priority: priority, Key: key, Submitted: submittedAt, Claimed: claimedAt.Time, Heartbeat: heartbeatAt.Time, Finished: finishedAt.Time, Log: log, Status: status, Done: done.Bool, Success: success.Bool, }, nil }
// AddStockcollections adds the given related objects to the existing relationships // of the contact, optionally inserting them as new records. // Appends related to o.R.Stockcollections. // Sets related.R.Contact appropriately. func (o *Contact) AddStockcollections(exec boil.Executor, insert bool, related ...*Stockcollection) error { var err error for _, rel := range related { rel.ContactID.Int = o.ContactID rel.ContactID.Valid = true if insert { if err = rel.Insert(exec); err != nil { return errors.Wrap(err, "failed to insert into foreign table") } } else { if err = rel.Update(exec, "contact_id"); err != nil { return errors.Wrap(err, "failed to update foreign table") } } } if o.R == nil { o.R = &contactR{ Stockcollections: related, } } else { o.R.Stockcollections = append(o.R.Stockcollections, related...) } for _, rel := range related { if rel.R == nil { rel.R = &stockcollectionR{ Contact: o, } } else { rel.R.Contact = o } } return nil }
func (pkg *smPackage) writeTargetState(client connection.Client) error { sess, err := client.NewSession() if err != nil { return err } defer sess.Close() stdin, err := sess.StdinPipe() if err != nil { return errors.Wrap(err, "failed to receive stdin pipe") } tstamp := time.Now().UTC().Format("20060102T150405") filename := fmt.Sprintf("/var/lib/smutje/%s.%s.log", pkg.ID, tstamp) cmd := fmt.Sprintf(`bash -c "cat - > %[1]s && ln -sf %[1]s /var/lib/smutje/%[2]s.log"`, filename, pkg.ID) if err := sess.Start(cmd); err != nil { return err } if _, err := io.WriteString(stdin, strings.Join(pkg.state, "\n")+"\n"); err != nil { return errors.Wrap(err, "failed to send script to target") } stdin.Close() return sess.Wait() }
// UnmarshalJSON unmarshals a list of conditions from json. func (cs Conditions) UnmarshalJSON(data []byte) error { if cs == nil { return errors.New("Can not be nil") } var jcs map[string]jsonCondition var dc Condition if err := json.Unmarshal(data, &jcs); err != nil { return errors.Wrap(err, "") } for k, jc := range jcs { for name, c := range ConditionFactories { if name == jc.Type { dc = c() if len(jc.Options) == 0 { cs[k] = dc break } if err := json.Unmarshal(jc.Options, dc); err != nil { return errors.Wrap(err, "") } cs[k] = dc break } } } return nil }
// Update does PATCH workitem func (c *WorkitemController) Update(ctx *app.UpdateWorkitemContext) error { return application.Transactional(c.db, func(appl application.Application) error { if ctx.Payload == nil || ctx.Payload.Data == nil || ctx.Payload.Data.ID == nil { return jsonapi.JSONErrorResponse(ctx, errors.NewBadParameterError("missing data.ID element in request", nil)) } wi, err := appl.WorkItems().Load(ctx, *ctx.Payload.Data.ID) if err != nil { return jsonapi.JSONErrorResponse(ctx, errs.Wrap(err, fmt.Sprintf("Failed to load work item with id %v", *ctx.Payload.Data.ID))) } // Type changes of WI are not allowed which is why we overwrite it the // type with the old one after the WI has been converted. oldType := wi.Type err = ConvertJSONAPIToWorkItem(appl, *ctx.Payload.Data, wi) if err != nil { return jsonapi.JSONErrorResponse(ctx, err) } wi.Type = oldType wi, err = appl.WorkItems().Save(ctx, *wi) if err != nil { return jsonapi.JSONErrorResponse(ctx, errs.Wrap(err, "Error updating work item")) } wi2 := ConvertWorkItem(ctx.RequestData, wi) resp := &app.WorkItem2Single{ Data: wi2, Links: &app.WorkItemLinks{ Self: buildAbsoluteURL(ctx.RequestData), }, } return ctx.OK(resp) }) }
// createSockets initializes the sockets for the socket group based on values from zmq. func createSockets() (*zmq.Context, SocketGroup, error) { context, err := zmq.NewContext() if err != nil { return context, SocketGroup{}, errors.Wrap(err, "Could not create zmq Context") } var sg SocketGroup sg.ShellSocket, err = context.NewSocket(zmq.ROUTER) if err != nil { return context, sg, errors.Wrap(err, "Could not get Shell Socket") } sg.ControlSocket, err = context.NewSocket(zmq.ROUTER) if err != nil { return context, sg, errors.Wrap(err, "Could not get Control Socket") } sg.StdinSocket, err = context.NewSocket(zmq.ROUTER) if err != nil { return context, sg, errors.Wrap(err, "Could not get Stdin Socket") } sg.IOPubSocket, err = context.NewSocket(zmq.PUB) if err != nil { return context, sg, errors.Wrap(err, "Could not get IOPub Socket") } return context, sg, nil }
// makePlots creates and saves the first of our plots showing the raw input data. func makePlots(xys plotter.XYs) error { // Create a new plot. p, err := plot.New() if err != nil { return errors.Wrap(err, "Could not create plot object") } // Label the new plot. p.Title.Text = "Daily Counts of Go Repos Created" p.X.Label.Text = "Days from Jan. 1, 2013" p.Y.Label.Text = "Count" // Add the prepared points to the plot. if err = plotutil.AddLinePoints(p, "Counts", xys); err != nil { return errors.Wrap(err, "Could not add lines to plot") } // Save the plot to a PNG file. if err := p.Save(7*vg.Inch, 4*vg.Inch, "countseries.png"); err != nil { return errors.Wrap(err, "Could not output plot") } return nil }
// IssueAndSaveNewCertificates generates a new key-pair, signs it with the local root-ca, and returns a // tls certificate func (rca *RootCA) IssueAndSaveNewCertificates(kw KeyWriter, cn, ou, org string) (*tls.Certificate, error) { csr, key, err := GenerateNewCSR() if err != nil { return nil, errors.Wrap(err, "error when generating new node certs") } if !rca.CanSign() { return nil, ErrNoValidSigner } // Obtain a signed Certificate certChain, err := rca.ParseValidateAndSignCSR(csr, cn, ou, org) if err != nil { return nil, errors.Wrap(err, "failed to sign node certificate") } // Create a valid TLSKeyPair out of the PEM encoded private key and certificate tlsKeyPair, err := tls.X509KeyPair(certChain, key) if err != nil { return nil, err } if err := kw.Write(certChain, key, nil); err != nil { return nil, err } return &tlsKeyPair, nil }
func (c *Conn) sendLoop() { defer c.wg.Done() sendc := c.Send for { select { case msg := <-sendc: log.Printf("ws-conn-%04d: send-> %# v", c.ID, pretty.Formatter(msg)) err := websocket.JSON.Send(c.WSConn, msg) if err != nil { if !isUseOfClosed(err) { log.Printf("ws-conn-%04d: send error: %v", c.ID, err) c.errors.Push(errors.Wrap(err, "send error")) } c.Close() sendc = nil // stop sending messages after an error } case <-c.shutdown: err := c.WSConn.Close() if err != nil && !isUseOfClosed(err) { log.Printf("ws-conn-%04d: close error: %v", c.ID, err) c.errors.Push(errors.Wrap(err, "close error")) } log.Printf("ws-conn-%04d: closed", c.ID) return case <-c.Done: c.Close() } } }
func (m *MachineConfig) CacheMinikubeISOFromURL() error { // store the miniube-iso inside the .minikube dir response, err := http.Get(m.MinikubeISO) if err != nil { return errors.Wrapf(err, "Error getting minikube iso at %s via http", m.MinikubeISO) } defer response.Body.Close() isoData, err := ioutil.ReadAll(response.Body) if err != nil { return errors.Wrap(err, "Error reading minikubeISO url response") } // Validate the ISO if it was the default URL, before writing it to disk. if m.MinikubeISO == constants.DefaultIsoUrl { if !isIsoChecksumValid(&isoData, constants.DefaultIsoShaUrl) { return errors.New("Error validating ISO checksum.") } } if response.StatusCode != http.StatusOK { return errors.Errorf("Received %d response from %s while trying to download minikube.iso", response.StatusCode, m.MinikubeISO) } out, err := os.Create(m.GetISOCacheFilepath()) if err != nil { return errors.Wrap(err, "Error creating minikube iso cache filepath") } defer out.Close() if _, err = out.Write(isoData); err != nil { return errors.Wrap(err, "Error writing iso data to file") } return nil }
// Mount mounts the remote at mountpoint. // // If noModTime is set then it func Mount(f fs.Fs, mountpoint string) error { if debugFUSE { fuse.Debug = func(msg interface{}) { fs.Debug("fuse", "%v", msg) } } // Set permissions dirPerms = 0777 &^ os.FileMode(umask) filePerms = 0666 &^ os.FileMode(umask) // Show stats if the user has specifically requested them if cmd.ShowStats() { stopStats := cmd.StartStats() defer close(stopStats) } // Mount it errChan, err := mount(f, mountpoint) if err != nil { return errors.Wrap(err, "failed to mount FUSE fs") } // Wait for umount err = <-errChan if err != nil { return errors.Wrap(err, "failed to umount FUSE fs") } return nil }
func migrateWALs(legacyWALDir, walDir string) error { // keep temporary wal directory so WAL initialization appears atomic tmpdirpath := filepath.Clean(walDir) + ".tmp" if fileutil.Exist(tmpdirpath) { if err := os.RemoveAll(tmpdirpath); err != nil { return errors.Wrap(err, "could not remove temporary WAL directory") } } if err := fileutil.CreateDirAll(tmpdirpath); err != nil { return errors.Wrap(err, "could not create temporary WAL directory") } walNames, err := fileutil.ReadDir(legacyWALDir) if err != nil { return errors.Wrapf(err, "could not list WAL directory %s", legacyWALDir) } for _, fname := range walNames { _, err := copyFile(filepath.Join(legacyWALDir, fname), filepath.Join(tmpdirpath, fname), 0600) if err != nil { return errors.Wrap(err, "error copying WAL file") } } if err := os.Rename(tmpdirpath, walDir); err != nil { return err } return nil }
func extractRegexp(r **regexp.Regexp, m map[string]interface{}, s string) error { v, ok := m[s] if !ok { return nil } val, ok := v.(string) if !ok { return errors.Wrap( errInvalidType("string", v), "failed to extract regular expression", ) } rx, err := regexp.Compile(val) if err != nil { return errors.Wrap( errors.Wrapf( err, "failed to compile regular expression: %s", strconv.Quote(val), ), "failed to extract regular expression", ) } *r = rx return nil }
// Load loads the ByteCode for template specified by `key`, which, for this // ByteCodeLoader, is the path to the template we want. // If cached vm.ByteCode struct is found, it is loaded and its last modified // time is compared against that of the template file. If the template is // newer, it's compiled. Otherwise the cached version is used, saving us the // time to parse and compile the template. func (l *CachedByteCodeLoader) Load(key string) (bc *vm.ByteCode, err error) { defer func() { if bc != nil && err == nil && l.ShouldDumpByteCode() { fmt.Fprintf(os.Stderr, "%s\n", bc.String()) } }() var source TemplateSource if l.CacheLevel > CacheNone { var entity *CacheEntity for _, cache := range l.Caches { entity, err = cache.Get(key) if err == nil { break } } if err == nil { if l.CacheLevel == CacheNoVerify { return entity.ByteCode, nil } t, err := entity.Source.LastModified() if err != nil { return nil, errors.Wrap(err, "failed to get last-modified from source") } if t.Before(entity.ByteCode.GeneratedOn) { return entity.ByteCode, nil } // ByteCode validation failed, but we can still re-use source source = entity.Source } } if source == nil { source, err = l.Fetcher.FetchTemplate(key) if err != nil { return nil, errors.Wrap(err, "failed to fetch template") } } rdr, err := source.Reader() if err != nil { return nil, errors.Wrap(err, "failed to get the reader") } bc, err = l.LoadReader(key, rdr) if err != nil { return nil, errors.Wrap(err, "failed to read byte code") } entity := &CacheEntity{bc, source} for _, cache := range l.Caches { cache.Set(key, entity) } return bc, nil }
// Extract takes either a list of `map[string]interface{}` or // a single `map[string]interface{}` to initialize this list // of schemas func (l *SchemaList) Extract(v interface{}) error { switch val := v.(type) { case []interface{}: *l = make([]*Schema, len(val)) var s *Schema for i, d := range val { m, ok := d.(map[string]interface{}) if !ok { return errors.Wrap( errInvalidType("map[string]interface{}", d), "failed to extract schema list", ) } if err := extractSingleSchema(&s, m); err != nil { return errors.Wrap(err, "failed to extract schema list") } (*l)[i] = s } return nil case map[string]interface{}: var s *Schema if err := extractSingleSchema(&s, val); err != nil { return errors.Wrap(err, "failed to extract schema list") } *l = []*Schema{s} return nil default: return errors.Wrap( errInvalidType("[]*Schema or *Schema", v), "failed to extract schema list", ) } }
func removeDeadRedisWorker(redisClient *redis.Client, redisWorker redisWorker) error { bytes, err := redisClient.Get(fmt.Sprintf("%s:%s", resqueWorkerKey, redisWorker.info)).Bytes() if err != nil && err != redis.Nil { return errors.Wrap(err, fmt.Sprintf("Error getting %s from redis", redisWorker)) } if err != nil && err == redis.Nil { // Redis key not present so the issue probably corrected itself return nil } if err := retryDeadWorker(redisClient, bytes); err != nil { return errors.Wrap(err, fmt.Sprintf("Unable to retry %s", redisWorker)) } redisClient.Pipelined(func(pipe *redis.Pipeline) error { pipe.SRem(resqueWorkersKey, redisWorker.info) pipe.Del(fmt.Sprintf("%s:%s", resqueWorkerKey, redisWorker.info)) pipe.Del(fmt.Sprintf("%s:%s:started", resqueWorkerKey, redisWorker.info)) pipe.Del(fmt.Sprintf("%s:%s:shutdown", resqueWorkerKey, redisWorker.info)) pipe.Del(fmt.Sprintf("%s:%s", statProcessedKey, redisWorker.info)) pipe.Del(fmt.Sprintf("%s:%s", statFailedKey, redisWorker.info)) return nil }) return nil }
func extractItems(res **ItemSpec, m map[string]interface{}, name string) error { v, ok := m[name] if !ok { return nil } if pdebug.Enabled { pdebug.Printf("Found array element '%s'", name) } tupleMode := false switch v.(type) { case []interface{}: tupleMode = true case map[string]interface{}: default: return errors.Wrap( errInvalidType("[]interface{} or map[string]interface{}", v), "failed to extract items", ) } items := ItemSpec{} items.TupleMode = tupleMode var err error if err = items.Schemas.extractIfPresent(m, name); err != nil { return errors.Wrap(err, "failed to schema for item") } *res = &items return nil }
// NewFs constructs an Fs from the path, container:path func NewFs(name, root string) (fs.Fs, error) { client, _, err := oauthutil.NewClient(name, oauthConfig) if err != nil { return nil, errors.Wrap(err, "failed to configure Hubic") } f := &Fs{ client: client, } // Make the swift Connection c := &swiftLib.Connection{ Auth: newAuth(f), UserAgent: fs.UserAgent, ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport Transport: fs.Config.Transport(), } err = c.Authenticate() if err != nil { return nil, errors.Wrap(err, "error authenticating swift connection") } // Make inner swift Fs from the connection swiftFs, err := swift.NewFsWithConnection(name, root, c) if err != nil { return nil, err } f.Fs = swiftFs return f, nil }
// PrepareSockets sets up the ZMQ sockets through which the kernel will communicate. func PrepareSockets(connInfo ConnectionInfo) (SocketGroup, error) { // Initialize the Socket Group. context, sg, err := createSockets() if err != nil { return sg, errors.Wrap(err, "Could not initialize context and Socket Group") } // Bind the sockets. address := fmt.Sprintf("%v://%v:%%v", connInfo.Transport, connInfo.IP) sg.ShellSocket.Bind(fmt.Sprintf(address, connInfo.ShellPort)) sg.ControlSocket.Bind(fmt.Sprintf(address, connInfo.ControlPort)) sg.StdinSocket.Bind(fmt.Sprintf(address, connInfo.StdinPort)) sg.IOPubSocket.Bind(fmt.Sprintf(address, connInfo.IOPubPort)) // Message signing key sg.Key = []byte(connInfo.Key) // Start the heartbeat device HBSocket, err := context.NewSocket(zmq.REP) if err != nil { return sg, errors.Wrap(err, "Could not get the Heartbeat device socket") } HBSocket.Bind(fmt.Sprintf(address, connInfo.HBPort)) go zmq.Device(zmq.FORWARDER, HBSocket, HBSocket) return sg, nil }
// Hash returns the requested hash of a file as a lowercase hex string func (o *Object) Hash(r fs.HashType) (string, error) { // Check that the underlying file hasn't changed oldtime := o.info.ModTime() oldsize := o.info.Size() err := o.lstat() if err != nil { return "", errors.Wrap(err, "hash: failed to stat") } if !o.info.ModTime().Equal(oldtime) || oldsize != o.info.Size() { o.hashes = nil } if o.hashes == nil { o.hashes = make(map[fs.HashType]string) in, err := os.Open(o.path) if err != nil { return "", errors.Wrap(err, "hash: failed to open") } o.hashes, err = fs.HashStream(in) closeErr := in.Close() if err != nil { return "", errors.Wrap(err, "hash: failed to read") } if closeErr != nil { return "", errors.Wrap(closeErr, "hash: failed to close") } } return o.hashes[r], nil }
func initScanArgs(args []string) (startKey, endKey roachpb.Key, _ error) { if len(args) >= 1 { unquoted, err := unquoteArg(args[0], false) if err != nil { return nil, nil, errors.Wrap(err, "invalid start key") } startKey = roachpb.Key(unquoted) } else { // Start with the first key after the system key range. startKey = keys.UserDataSpan.Key } if len(args) >= 2 { unquoted, err := unquoteArg(args[1], false) if err != nil { return nil, nil, errors.Wrap(err, "invalid end key") } endKey = roachpb.Key(unquoted) } else { // Exclude table data keys by default. The user can explicitly request them // by passing \xff\xff for the end key. endKey = keys.UserDataSpan.EndKey } if bytes.Compare(startKey, endKey) >= 0 { return nil, nil, errors.New("start key must be smaller than end key") } return startKey, endKey, nil }