func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Builder, error) { if plugin.legacyMode { // Legacy mode instances can be cleaned up but not created anew. return nil, fmt.Errorf("legacy mode: can not create new instances") } var gce *api.GCEPersistentDiskVolumeSource if spec.VolumeSource.GCEPersistentDisk != nil { gce = spec.VolumeSource.GCEPersistentDisk } else { gce = spec.PersistentVolumeSource.GCEPersistentDisk } pdName := gce.PDName fsType := gce.FSType partition := "" if gce.Partition != 0 { partition = strconv.Itoa(gce.Partition) } readOnly := gce.ReadOnly return &gcePersistentDisk{ podUID: podUID, volName: spec.Name, pdName: pdName, fsType: fsType, partition: partition, readOnly: readOnly, manager: manager, mounter: mounter, diskMounter: &gceSafeFormatAndMount{mounter, exec.New()}, plugin: plugin, legacyMode: false, }, nil }
func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Builder, error) { var ebs *api.AWSElasticBlockStoreVolumeSource if spec.VolumeSource.AWSElasticBlockStore != nil { ebs = spec.VolumeSource.AWSElasticBlockStore } else { ebs = spec.PersistentVolumeSource.AWSElasticBlockStore } volumeID := ebs.VolumeID fsType := ebs.FSType partition := "" if ebs.Partition != 0 { partition = strconv.Itoa(ebs.Partition) } readOnly := ebs.ReadOnly return &awsElasticBlockStore{ podUID: podUID, volName: spec.Name, volumeID: volumeID, fsType: fsType, partition: partition, readOnly: readOnly, manager: manager, mounter: mounter, diskMounter: &awsSafeFormatAndMount{mounter, exec.New()}, plugin: plugin, }, nil }
func (plugin *awsElasticBlockStorePlugin) newCleanerInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Cleaner, error) { return &awsElasticBlockStore{ podUID: podUID, volName: volName, manager: manager, mounter: mounter, diskMounter: &awsSafeFormatAndMount{mounter, exec.New()}, plugin: plugin, }, nil }
func (plugin *execNetworkPlugin) Init(host network.Host) error { err := plugin.validate() if err != nil { return err } plugin.host = host // call the init script out, err := utilexec.New().Command(plugin.getExecutable(), initCmd).CombinedOutput() glog.V(5).Infof("Init 'exec' network plugin output: %s, %v", string(out), err) return err }
func (plugin *glusterfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) { source := plugin.getGlusterVolumeSource(spec) ep_name := source.EndpointsName ns := pod.Namespace ep, err := plugin.host.GetQingClient().Endpoints(ns).Get(ep_name) if err != nil { glog.Errorf("Glusterfs: failed to get endpoints %s[%v]", ep_name, err) return nil, err } glog.V(1).Infof("Glusterfs: endpoints %v", ep) return plugin.newBuilderInternal(spec, ep, pod, mounter, exec.New()) }
// doNsenterMount nsenters the host's mount namespace and performs the // requested mount. func doNsenterMount(source, target, fstype string, options []string) error { glog.V(5).Infof("nsenter Mounting %s %s %s %v", source, target, fstype, options) args := makeNsenterArgs(source, target, fstype, options) glog.V(5).Infof("Mount command: %v %v", nsenterPath, args) exec := exec.New() outputBytes, err := exec.Command(nsenterPath, args...).CombinedOutput() if len(outputBytes) != 0 { glog.V(5).Infof("Output from mount command: %v", string(outputBytes)) } return err }
func (plugin *gcePersistentDiskPlugin) newCleanerInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Cleaner, error) { legacy := false if plugin.legacyMode { legacy = true } return &gcePersistentDisk{ podUID: podUID, volName: volName, manager: manager, mounter: mounter, diskMounter: &gceSafeFormatAndMount{mounter, exec.New()}, plugin: plugin, legacyMode: legacy, }, nil }
// Unmount runs umount(8) in the host's mount namespace. func (*NsenterMounter) Unmount(target string) error { args := []string{ "--mount=/rootfs/proc/1/ns/mnt", "/usr/bin/umount", target, } glog.V(5).Infof("Unmount command: %v %v", nsenterPath, args) exec := exec.New() outputBytes, err := exec.Command(nsenterPath, args...).CombinedOutput() if len(outputBytes) != 0 { glog.V(5).Infof("Output from mount command: %v", string(outputBytes)) } return err }
func (plugin *gitRepoPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) { if plugin.legacyMode { // Legacy mode instances can be cleaned up but not created anew. return nil, fmt.Errorf("legacy mode: can not create new instances") } return &gitRepo{ pod: *pod, volName: spec.Name, source: spec.VolumeSource.GitRepo.Repository, revision: spec.VolumeSource.GitRepo.Revision, exec: exec.New(), plugin: plugin, legacyMode: false, opts: opts, mounter: mounter, }, nil }
// IsMountPoint determines whether a path is a mountpoint by calling findmnt // in the host's root mount namespace. func (*NsenterMounter) IsMountPoint(file string) (bool, error) { file, err := filepath.Abs(file) if err != nil { return false, err } args := []string{"--mount=/rootfs/proc/1/ns/mnt", "/usr/bin/findmnt", "-o", "target", "--noheadings", "--target", file} glog.V(5).Infof("findmnt command: %v %v", nsenterPath, args) exec := exec.New() out, err := exec.Command(nsenterPath, args...).CombinedOutput() if err != nil { // If findmnt didn't run, just claim it's not a mount point. return false, nil } strOut := strings.TrimSuffix(string(out), "\n") glog.V(5).Infof("IsMountPoint findmnt output: %v", strOut) if strOut == file { return true, nil } return false, nil }
// This is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&ISCSIPlugin{nil, exec.New()}} }
func (plugin *execNetworkPlugin) TearDownPod(namespace string, name string, id qingletTypes.DockerID) error { out, err := utilexec.New().Command(plugin.getExecutable(), tearDownCmd, namespace, name, string(id)).CombinedOutput() glog.V(5).Infof("TearDownPod 'exec' network plugin output: %s, %v", string(out), err) return err }
// Run runs the specified ProxyServer. This should never exit. func (s *ProxyServer) Run(_ []string) error { // TODO(vmarmol): Use container config for this. if err := util.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { glog.V(2).Info(err) } // Run in its own container. if err := util.RunInResourceContainer(s.ResourceContainer); err != nil { glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err) } else { glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer) } serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() protocol := iptables.ProtocolIpv4 if net.IP(s.BindAddress).To4() == nil { protocol = iptables.ProtocolIpv6 } loadBalancer := proxy.NewLoadBalancerRR() proxier, err := proxy.NewProxier(loadBalancer, net.IP(s.BindAddress), iptables.New(exec.New(), protocol), s.PortRange) if err != nil { glog.Fatalf("Unable to create proxer: %v", err) } // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire loadBalancer to handle changes to endpoints to services endpointsConfig.RegisterHandler(loadBalancer) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. // define api config source if s.Qingconfig == "" && s.Master == "" { glog.Warningf("Neither --qingconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified qingconfig // file, and then overriding the Master flag, if non-empty. qingconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Qingconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } client, err := client.New(qingconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } config.NewSourceAPI( client.Services(api.NamespaceAll), client.Endpoints(api.NamespaceAll), 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) if s.HealthzPort > 0 { go util.Forever(func() { err := http.ListenAndServe(s.HealthzBindAddress.String()+":"+strconv.Itoa(s.HealthzPort), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } // Just loop forever for now... proxier.SyncLoop() return nil }