func (p rootDevicePartitioner) GetDeviceSizeInBytes(devicePath string) (uint64, error) { p.logger.Debug(p.logTag, "Getting size of disk remaining after first partition") stdout, _, _, err := p.cmdRunner.RunCommand("parted", "-m", devicePath, "unit", "B", "print") if err != nil { return 0, bosherr.WrapErrorf(err, "Getting remaining size of `%s'", devicePath) } allLines := strings.Split(stdout, "\n") if len(allLines) < 3 { return 0, bosherr.Errorf("Getting remaining size of `%s'", devicePath) } partitionInfoLines := allLines[1:3] deviceInfo := strings.Split(partitionInfoLines[0], ":") deviceFullSizeInBytes, err := strconv.ParseUint(strings.TrimRight(deviceInfo[1], "B"), 10, 64) if err != nil { return 0, bosherr.WrapErrorf(err, "Getting remaining size of `%s'", devicePath) } firstPartitionInfo := strings.Split(partitionInfoLines[1], ":") firstPartitionEndInBytes, err := strconv.ParseUint(strings.TrimRight(firstPartitionInfo[2], "B"), 10, 64) if err != nil { return 0, bosherr.WrapErrorf(err, "Getting remaining size of `%s'", devicePath) } remainingSizeInBytes := deviceFullSizeInBytes - firstPartitionEndInBytes - 1 return remainingSizeInBytes, nil }
func (fs *osFileSystem) Chown(path, username string) error { fs.logger.Debug(fs.logTag, "Chown %s to user %s", path, username) uid, err := fs.runCommand(fmt.Sprintf("id -u %s", username)) if err != nil { return bosherr.WrapErrorf(err, "Getting user id for '%s'", username) } uidAsInt, err := strconv.Atoi(uid) if err != nil { return bosherr.WrapError(err, "Converting UID to integer") } gid, err := fs.runCommand(fmt.Sprintf("id -g %s", username)) if err != nil { return bosherr.WrapErrorf(err, "Getting group id for '%s'", username) } gidAsInt, err := strconv.Atoi(gid) if err != nil { return bosherr.WrapError(err, "Converting GID to integer") } err = os.Chown(path, uidAsInt, gidAsInt) if err != nil { return bosherr.WrapError(err, "Doing Chown") } return nil }
func (fs *osFileSystem) Symlink(oldPath, newPath string) error { fs.logger.Debug(fs.logTag, "Symlinking oldPath %s with newPath %s", oldPath, newPath) actualOldPath, err := filepath.EvalSymlinks(oldPath) if err != nil { return bosherr.WrapErrorf(err, "Evaluating symlinks for %s", oldPath) } existingTargetedPath, err := filepath.EvalSymlinks(newPath) if err == nil { if existingTargetedPath == actualOldPath { return nil } err = os.Remove(newPath) if err != nil { return bosherr.WrapErrorf(err, "Failed to delete symlimk at %s", newPath) } } containingDir := filepath.Dir(newPath) if !fs.FileExists(containingDir) { fs.MkdirAll(containingDir, os.FileMode(0700)) } return os.Symlink(oldPath, newPath) }
func (fs *osFileSystem) ConvergeFileContents(path string, content []byte) (bool, error) { if fs.filesAreIdentical(content, path) { fs.logger.Debug(fs.logTag, "Skipping writing %s because contents are identical", path) return false, nil } fs.logger.Debug(fs.logTag, "File %s will be overwritten", path) err := fs.MkdirAll(filepath.Dir(path), os.ModePerm) if err != nil { return true, bosherr.WrapErrorf(err, "Making dir for file %s", path) } file, err := os.Create(path) if err != nil { return true, bosherr.WrapErrorf(err, "Creating file %s", path) } defer file.Close() _, err = file.Write(content) if err != nil { return true, bosherr.WrapErrorf(err, "Writing content to file %s", path) } return true, nil }
func (net centosNetManager) writeDHCPConfiguration(dnsServers []string, dhcpInterfaceConfigurations []DHCPInterfaceConfiguration) (bool, error) { buffer := bytes.NewBuffer([]byte{}) t := template.Must(template.New("dhcp-config").Parse(centosDHCPConfigTemplate)) // Keep DNS servers in the order specified by the network // because they are added by a *single* DHCP's prepend command dnsServersList := strings.Join(dnsServers, ", ") err := t.Execute(buffer, dnsServersList) if err != nil { return false, bosherr.WrapError(err, "Generating config from template") } dhclientConfigFile := "/etc/dhcp/dhclient.conf" changed, err := net.fs.ConvergeFileContents(dhclientConfigFile, buffer.Bytes()) if err != nil { return changed, bosherr.WrapErrorf(err, "Writing to %s", dhclientConfigFile) } for i := range dhcpInterfaceConfigurations { name := dhcpInterfaceConfigurations[i].Name interfaceDhclientConfigFile := filepath.Join("/etc/dhcp/", "dhclient-"+name+".conf") err = net.fs.Symlink(dhclientConfigFile, interfaceDhclientConfigFile) if err != nil { return changed, bosherr.WrapErrorf(err, "Symlinking '%s' to '%s'", interfaceDhclientConfigFile, dhclientConfigFile) } } return changed, nil }
func (p linux) setupRunDir(sysDir string) error { runDir := filepath.Join(sysDir, "run") runDirIsMounted, err := p.IsMountPoint(runDir) if err != nil { return bosherr.WrapErrorf(err, "Checking for mount point %s", runDir) } if !runDirIsMounted { err = p.fs.MkdirAll(runDir, runDirPermissions) if err != nil { return bosherr.WrapErrorf(err, "Making %s dir", runDir) } err = p.diskManager.GetMounter().Mount("tmpfs", runDir, "-t", "tmpfs", "-o", "size=1m") if err != nil { return bosherr.WrapErrorf(err, "Mounting tmpfs to %s", runDir) } _, _, _, err = p.cmdRunner.RunCommand("chown", "root:vcap", runDir) if err != nil { return bosherr.WrapErrorf(err, "chown %s", runDir) } } return nil }
func (p linux) SetupDataDir() error { dataDir := p.dirProvider.DataDir() sysDataDir := filepath.Join(dataDir, "sys") logDir := filepath.Join(sysDataDir, "log") err := p.fs.MkdirAll(logDir, logDirPermissions) if err != nil { return bosherr.WrapErrorf(err, "Making %s dir", logDir) } _, _, _, err = p.cmdRunner.RunCommand("chown", "root:vcap", sysDataDir) if err != nil { return bosherr.WrapErrorf(err, "chown %s", sysDataDir) } _, _, _, err = p.cmdRunner.RunCommand("chown", "root:vcap", logDir) if err != nil { return bosherr.WrapErrorf(err, "chown %s", logDir) } err = p.setupRunDir(sysDataDir) if err != nil { return err } sysDir := filepath.Join(filepath.Dir(dataDir), "sys") err = p.fs.Symlink(sysDataDir, sysDir) if err != nil { return bosherr.WrapErrorf(err, "Symlinking '%s' to '%s'", sysDir, sysDataDir) } return nil }
func (p sfdiskPartitioner) diskMatchesPartitions(devicePath string, partitionsToMatch []Partition) (result bool) { existingPartitions, err := p.getPartitions(devicePath) if err != nil { err = bosherr.WrapErrorf(err, "Getting partitions for %s", devicePath) return } if len(existingPartitions) < len(partitionsToMatch) { return } remainingDiskSpace, err := p.GetDeviceSizeInBytes(devicePath) if err != nil { err = bosherr.WrapErrorf(err, "Getting device size for %s", devicePath) return } for index, partitionToMatch := range partitionsToMatch { if index == len(partitionsToMatch)-1 { partitionToMatch.SizeInBytes = remainingDiskSpace } existingPartition := existingPartitions[index] switch { case existingPartition.Type != partitionToMatch.Type: return case !withinDelta(existingPartition.SizeInBytes, partitionToMatch.SizeInBytes, p.convertFromMbToBytes(20)): return } remainingDiskSpace = remainingDiskSpace - partitionToMatch.SizeInBytes } return true }
func (p rootDevicePartitioner) Partition(devicePath string, partitions []Partition) error { existingPartitions, deviceFullSizeInBytes, err := p.getPartitions(devicePath) if err != nil { return bosherr.WrapErrorf(err, "Getting existing partitions of `%s'", devicePath) } p.logger.Debug(p.logTag, "Current partitions: %#v", existingPartitions) if len(existingPartitions) == 0 { return bosherr.Errorf("Missing first partition on `%s'", devicePath) } if p.partitionsMatch(existingPartitions[1:], partitions) { p.logger.Info(p.logTag, "Partitions already match, skipping partitioning") return nil } if len(existingPartitions) > 1 { p.logger.Error(p.logTag, "Failed to create ephemeral partitions on root device `%s'. Expected 1 partition, found %d: %s", devicePath, len(existingPartitions), existingPartitions, ) return bosherr.Errorf("Found %d unexpected partitions on `%s'", len(existingPartitions)-1, devicePath) } // To support optimal reads on HDDs and optimal erasure on SSD: use 1MiB partition alignments. alignmentInBytes := uint64(1048576) partitionStart := p.roundUp(existingPartitions[0].EndInBytes+1, alignmentInBytes) for index, partition := range partitions { partitionEnd := partitionStart + partition.SizeInBytes - 1 if partitionEnd >= deviceFullSizeInBytes { partitionEnd = deviceFullSizeInBytes - 1 p.logger.Info(p.logTag, "Partition %d would be larger than remaining space. Reducing size to %dB", index, partitionEnd-partitionStart) } p.logger.Info(p.logTag, "Creating partition %d with start %dB and end %dB", index, partitionStart, partitionEnd) _, _, _, err := p.cmdRunner.RunCommand( "parted", "-s", devicePath, "unit", "B", "mkpart", "primary", fmt.Sprintf("%d", partitionStart), fmt.Sprintf("%d", partitionEnd), ) if err != nil { return bosherr.WrapErrorf(err, "Partitioning disk `%s'", devicePath) } partitionStart = p.roundUp(partitionEnd+1, alignmentInBytes) } return nil }
func (dispatcher concreteActionDispatcher) dispatchAsynchronousAction( action boshaction.Action, req boshhandler.Request, ) boshhandler.Response { dispatcher.logger.Info(actionDispatcherLogTag, "Running async action %s", req.Method) var task boshtask.Task var err error runTask := func() (interface{}, error) { return dispatcher.actionRunner.Run(action, req.GetPayload()) } cancelTask := func(_ boshtask.Task) error { return action.Cancel() } // Certain long-running tasks (e.g. configure_networks) must be resumed // after agent restart so that API consumers do not need to know // if agent is restarted midway through the task. if action.IsPersistent() { dispatcher.logger.Info(actionDispatcherLogTag, "Running persistent action %s", req.Method) task, err = dispatcher.taskService.CreateTask(runTask, cancelTask, dispatcher.removeInfo) if err != nil { err = bosherr.WrapErrorf(err, "Create Task Failed %s", req.Method) dispatcher.logger.Error(actionDispatcherLogTag, err.Error()) return boshhandler.NewExceptionResponse(err) } taskInfo := boshtask.Info{ TaskID: task.ID, Method: req.Method, Payload: req.GetPayload(), } err = dispatcher.taskManager.AddInfo(taskInfo) if err != nil { err = bosherr.WrapErrorf(err, "Action Failed %s", req.Method) dispatcher.logger.Error(actionDispatcherLogTag, err.Error()) return boshhandler.NewExceptionResponse(err) } } else { task, err = dispatcher.taskService.CreateTask(runTask, cancelTask, nil) if err != nil { err = bosherr.WrapErrorf(err, "Create Task Failed %s", req.Method) dispatcher.logger.Error(actionDispatcherLogTag, err.Error()) return boshhandler.NewExceptionResponse(err) } } dispatcher.taskService.StartTask(task) return boshhandler.NewValueResponse(boshtask.StateValue{ AgentTaskID: task.ID, State: task.State, }) }
func (m linuxMounter) Remount(fromMountPoint, toMountPoint string, mountOptions ...string) error { partitionPath, found, err := m.findDeviceMatchingMountPoint(fromMountPoint) if err != nil || !found { return bosherr.WrapErrorf(err, "Error finding device for mount point %s", fromMountPoint) } _, err = m.Unmount(fromMountPoint) if err != nil { return bosherr.WrapErrorf(err, "Unmounting %s", fromMountPoint) } return m.Mount(partitionPath, toMountPoint, mountOptions...) }
func (p linux) changeTmpDirPermissions(path string) error { _, _, _, err := p.cmdRunner.RunCommand("chown", "root:vcap", path) if err != nil { return bosherr.WrapErrorf(err, "chown %s", path) } _, _, _, err = p.cmdRunner.RunCommand("chmod", "0770", path) if err != nil { return bosherr.WrapErrorf(err, "chmod %s", path) } return nil }
// TerminateNicely can be called multiple times simultaneously from different goroutines func (p *execProcess) TerminateNicely(killGracePeriod time.Duration) error { // Make sure process is being waited on for process state reaping to occur // as to avoid forcibly killing the process after killGracePeriod if p.waitCh == nil { panic("TerminateNicely() must be called after Wait()") } err := p.signalGroup(syscall.SIGTERM) if err != nil { return bosherr.WrapErrorf(err, "Sending SIGTERM to process group %d", p.pid) } terminatedCh := make(chan struct{}) stopCheckingTerminatedCh := make(chan struct{}) go func() { for p.groupExists() { select { case <-time.After(500 * time.Millisecond): // nothing to do case <-stopCheckingTerminatedCh: return } } close(terminatedCh) }() select { case <-terminatedCh: // nothing to do case <-time.After(killGracePeriod): close(stopCheckingTerminatedCh) err = p.signalGroup(syscall.SIGKILL) if err != nil { return bosherr.WrapErrorf(err, "Sending SIGKILL to process group %d", p.pid) } } // It takes some time for the process to disappear for i := 0; i < 20; i++ { if !p.groupExists() { return nil } time.Sleep(500 * time.Millisecond) } return bosherr.Errorf("Failed to kill process after grace timeout (PID %d)", p.pid) }
func (fs *osFileSystem) listDirContents(dirPath string) ([]os.FileInfo, error) { directory, err := os.Open(dirPath) if err != nil { return nil, bosherr.WrapErrorf(err, "Openning dir '%s' for reading", dirPath) } defer directory.Close() files, err := directory.Readdir(-1) if err != nil { return nil, bosherr.WrapErrorf(err, "Reading dir '%s' contents", dirPath) } return files, nil }
func (r ipResolver) GetPrimaryIPv4(interfaceName string) (*gonet.IPNet, error) { addrs, err := r.ifaceToAddrsFunc(interfaceName) if err != nil { return nil, bosherr.WrapErrorf(err, "Looking up addresses for interface '%s'", interfaceName) } if len(addrs) == 0 { return nil, bosherr.Errorf("No addresses found for interface '%s'", interfaceName) } for _, addr := range addrs { ip, ok := addr.(*gonet.IPNet) if !ok { continue } // ignore ipv6 if ip.IP.To4() == nil { continue } return ip, nil } return nil, bosherr.Errorf("Failed to find primary IPv4 address for interface '%s'", interfaceName) }
func (h *natsHandler) Start(handlerFunc boshhandler.Func) error { h.RegisterAdditionalFunc(handlerFunc) connProvider, err := h.getConnectionInfo() if err != nil { return bosherr.WrapError(err, "Getting connection info") } err = h.client.Connect(connProvider) if err != nil { return bosherr.WrapError(err, "Connecting") } settings := h.settingsService.GetSettings() subject := fmt.Sprintf("agent.%s", settings.AgentID) h.logger.Info(h.logTag, "Subscribing to %s", subject) _, err = h.client.Subscribe(subject, func(natsMsg *yagnats.Message) { for _, handlerFunc := range h.handlerFuncs { h.handleNatsMsg(natsMsg, handlerFunc) } }) if err != nil { return bosherr.WrapErrorf(err, "Subscribing to %s", subject) } return nil }
func (p linux) partitionEphemeralDisk(realPath string) (string, string, error) { p.logger.Info(logTag, "Creating swap & ephemeral partitions on ephemeral disk...") p.logger.Debug(logTag, "Getting device size of `%s'", realPath) diskSizeInBytes, err := p.diskManager.GetPartitioner().GetDeviceSizeInBytes(realPath) if err != nil { return "", "", bosherr.WrapError(err, "Getting device size") } p.logger.Debug(logTag, "Calculating ephemeral disk partition sizes of `%s' with total disk size %dB", realPath, diskSizeInBytes) swapSizeInBytes, linuxSizeInBytes, err := p.calculateEphemeralDiskPartitionSizes(diskSizeInBytes) if err != nil { return "", "", bosherr.WrapError(err, "Calculating partition sizes") } partitions := []boshdisk.Partition{ {SizeInBytes: swapSizeInBytes, Type: boshdisk.PartitionTypeSwap}, {SizeInBytes: linuxSizeInBytes, Type: boshdisk.PartitionTypeLinux}, } p.logger.Info(logTag, "Partitioning ephemeral disk `%s' with %s", realPath, partitions) err = p.diskManager.GetPartitioner().Partition(realPath, partitions) if err != nil { return "", "", bosherr.WrapErrorf(err, "Partitioning ephemeral disk `%s'", realPath) } swapPartitionPath := realPath + "1" dataPartitionPath := realPath + "2" return swapPartitionPath, dataPartitionPath, nil }
func (r defaultNetworkResolver) GetDefaultNetwork() (boshsettings.Network, error) { network := boshsettings.Network{} routes, err := r.routesSearcher.SearchRoutes() if err != nil { return network, bosherr.WrapError(err, "Searching routes") } if len(routes) == 0 { return network, bosherr.Error("No routes found") } for _, route := range routes { if !route.IsDefault() { continue } ip, err := r.ipResolver.GetPrimaryIPv4(route.InterfaceName) if err != nil { return network, bosherr.WrapErrorf(err, "Getting primary IPv4 for interface '%s'", route.InterfaceName) } return boshsettings.Network{ IP: ip.IP.String(), Netmask: gonet.IP(ip.Mask).String(), Gateway: route.Gateway, }, nil } return network, bosherr.Error("Failed to find default route") }
func (net centosNetManager) writeIfcfgFile(name string, t *template.Template, config interface{}) (bool, error) { buffer := bytes.NewBuffer([]byte{}) err := t.Execute(buffer, config) if err != nil { return false, bosherr.WrapErrorf(err, "Generating '%s' config from template", name) } filePath := ifcfgFilePath(name) changed, err := net.fs.ConvergeFileContents(filePath, buffer.Bytes()) if err != nil { return false, bosherr.WrapErrorf(err, "Writing config to '%s'", filePath) } return changed, nil }
func (cdrom LinuxCdrom) Eject() (err error) { _, stderr, _, err := cdrom.runner.RunCommand("eject", "-v", cdrom.devicePath) if err != nil { err = bosherr.WrapErrorf(err, "Ejecting CDROM: %s", stderr) } return }
func (cdrom LinuxCdrom) Unmount() (err error) { _, stderr, _, err := cdrom.runner.RunCommand("umount", cdrom.devicePath) if err != nil { err = bosherr.WrapErrorf(err, "Unmounting CDROM: %s", stderr) } return }
func (a *concreteApplier) Apply(currentApplySpec, desiredApplySpec as.ApplySpec) error { err := a.jobSupervisor.RemoveAllJobs() if err != nil { return bosherr.WrapError(err, "Removing all jobs") } jobs := desiredApplySpec.Jobs() for _, job := range jobs { err = a.jobApplier.Apply(job) if err != nil { return bosherr.WrapErrorf(err, "Applying job %s", job.Name) } } err = a.jobApplier.KeepOnly(append(currentApplySpec.Jobs(), desiredApplySpec.Jobs()...)) if err != nil { return bosherr.WrapError(err, "Keeping only needed jobs") } for _, pkg := range desiredApplySpec.Packages() { err = a.packageApplier.Apply(pkg) if err != nil { return bosherr.WrapErrorf(err, "Applying package %s", pkg.Name) } } err = a.packageApplier.KeepOnly(append(currentApplySpec.Packages(), desiredApplySpec.Packages()...)) if err != nil { return bosherr.WrapError(err, "Keeping only needed packages") } for i := 0; i < len(jobs); i++ { job := jobs[len(jobs)-1-i] err = a.jobApplier.Configure(job, i) if err != nil { return bosherr.WrapErrorf(err, "Configuring job %s", job.Name) } } err = a.jobSupervisor.Reload() if err != nil { return bosherr.WrapError(err, "Reloading jobSupervisor") } return a.setUpLogrotate(desiredApplySpec) }
// New returns a new logger (that writes to the specified file) & the open file handle // All log levels >= the specified level are written to the specified file. // User is responsible for closing the returned file handle, unless an error is returned. func New(level boshlog.LogLevel, filePath string, fileMode os.FileMode, fs boshsys.FileSystem) (boshlog.Logger, boshsys.File, error) { file, err := fs.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, fileMode) if err != nil { return nil, file, bosherr.WrapErrorf(err, "Failed to open log file '%s'", filePath) } return boshlog.NewWriterLogger(level, file, file), file, nil }
// applyPackages keeps job specific packages directory up-to-date with installed packages. // (e.g. /var/vcap/jobs/job-a/packages/pkg-a has symlinks to /var/vcap/packages/pkg-a) func (s *renderedJobApplier) applyPackages(job models.Job) error { packageApplier := s.packageApplierProvider.JobSpecific(job.Name) for _, pkg := range job.Packages { err := packageApplier.Apply(pkg) if err != nil { return bosherr.WrapErrorf(err, "Applying package %s for job %s", pkg.Name, job.Name) } } err := packageApplier.KeepOnly(job.Packages) if err != nil { return bosherr.WrapErrorf(err, "Keeping only needed packages for job %s", job.Name) } return nil }
func (p linux) GetHostPublicKey() (string, error) { hostPublicKeyPath := "/etc/ssh/ssh_host_rsa_key.pub" hostPublicKey, err := p.fs.ReadFileString(hostPublicKeyPath) if err != nil { return "", bosherr.WrapErrorf(err, "Unable to read host public key file: %s", hostPublicKeyPath) } return hostPublicKey, nil }
func (b externalBlobstore) run(method, src, dst string) (err error) { _, _, _, err = b.runner.RunCommand(b.executable(), "-c", b.configFilePath, method, src, dst) if err != nil { return bosherr.WrapErrorf(err, "Shelling out to %s cli", b.executable()) } return nil }
func NetworkInterfaceToAddrsFunc(interfaceName string) ([]gonet.Addr, error) { iface, err := gonet.InterfaceByName(interfaceName) if err != nil { return []gonet.Addr{}, bosherr.WrapErrorf(err, "Searching for '%s' interface", interfaceName) } return iface.Addrs() }
func (a *concreteApplier) Prepare(desiredApplySpec as.ApplySpec) error { for _, job := range desiredApplySpec.Jobs() { err := a.jobApplier.Prepare(job) if err != nil { return bosherr.WrapErrorf(err, "Preparing job %s", job.Name) } } for _, pkg := range desiredApplySpec.Packages() { err := a.packageApplier.Prepare(pkg) if err != nil { return bosherr.WrapErrorf(err, "Preparing package %s", pkg.Name) } } return nil }
func (c concreteCompiler) fetchAndUncompress(pkg Package, targetDir string) error { // Do not verify integrity of the download via SHA1 // because Director might have stored non-matching SHA1. // This will be fixed in future by explicitly asking to verify SHA1 // instead of doing that by default like all other downloads. // (Ruby agent mistakenly never checked SHA1.) depFilePath, err := c.blobstore.Get(pkg.BlobstoreID, "") if err != nil { return bosherr.WrapErrorf(err, "Fetching package blob %s", pkg.BlobstoreID) } err = c.atomicDecompress(depFilePath, targetDir) if err != nil { return bosherr.WrapErrorf(err, "Uncompressing package %s", pkg.Name) } return nil }
func (fs *osFileSystem) ReadFile(path string) (content []byte, err error) { fs.logger.Debug(fs.logTag, "Reading file %s", path) file, err := os.Open(path) if err != nil { err = bosherr.WrapErrorf(err, "Opening file %s", path) return } defer file.Close() bytes, err := ioutil.ReadAll(file) if err != nil { err = bosherr.WrapErrorf(err, "Reading file content %s", path) return } content = bytes fs.logger.DebugWithDetails(fs.logTag, "Read content", content) return }