コード例 #1
0
ファイル: fc.go プロジェクト: qinguoan/vulcan
func (plugin *fcPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) {
	// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
	// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	var readOnly bool
	var fc *api.FCVolumeSource
	if spec.Volume != nil && spec.Volume.FC != nil {
		fc = spec.Volume.FC
		readOnly = fc.ReadOnly
	} else {
		fc = spec.PersistentVolume.Spec.FC
		readOnly = spec.ReadOnly
	}

	if fc.Lun == nil {
		return nil, fmt.Errorf("empty lun")
	}

	lun := strconv.Itoa(*fc.Lun)

	return &fcDiskBuilder{
		fcDisk: &fcDisk{
			podUID:  podUID,
			volName: spec.Name(),
			wwns:    fc.TargetWWNs,
			lun:     lun,
			manager: manager,
			mounter: &mount.SafeFormatAndMount{mounter, exec.New()},
			io:      &osIOHandler{},
			plugin:  plugin},
		fsType:   fc.FSType,
		readOnly: readOnly,
	}, nil
}
コード例 #2
0
ファイル: proxier.go プロジェクト: qinguoan/vulcan
// ShouldUseIptablesProxier returns true if we should use the iptables Proxier
// instead of the "classic" userspace Proxier.  This is determined by checking
// the iptables version and for the existence of kernel features. It may return
// an error if it fails to get the itpables version without error, in which
// case it will also return false.
func ShouldUseIptablesProxier() (bool, error) {
	exec := utilexec.New()
	minVersion, err := semver.NewVersion(iptablesMinVersion)
	if err != nil {
		return false, err
	}
	// returns "X.X.X", err
	versionString, err := utiliptables.GetIptablesVersionString(exec)
	if err != nil {
		return false, err
	}
	version, err := semver.NewVersion(versionString)
	if err != nil {
		return false, err
	}
	if version.LessThan(*minVersion) {
		return false, nil
	}

	// Check for the required sysctls.  We don't care about the value, just
	// that it exists.  If this Proxier is chosen, we'll iniialize it as we
	// need.
	_, err = utilsysctl.GetSysctl(sysctlRouteLocalnet)
	if err != nil {
		return false, err
	}

	return true, nil
}
コード例 #3
0
ファイル: gce_util.go プロジェクト: qinguoan/vulcan
// Calls "udevadm trigger --action=change" on the specified drive.
// drivePath must be the the block device path to trigger on, in the format "/dev/sd*", or a symlink to it.
// This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed.
func udevadmChangeToDrive(drivePath string) error {
	glog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath)

	// Evaluate symlink, if any
	drive, err := filepath.EvalSymlinks(drivePath)
	if err != nil {
		return fmt.Errorf("udevadmChangeToDrive: filepath.EvalSymlinks(%q) failed with %v.", drivePath, err)
	}
	glog.V(5).Infof("udevadmChangeToDrive: symlink path is %q", drive)

	// Check to make sure input is "/dev/sd*"
	if !strings.Contains(drive, diskSDPath) {
		return fmt.Errorf("udevadmChangeToDrive: expected input in the form \"%s\" but drive is %q.", diskSDPattern, drive)
	}

	// Call "udevadm trigger --action=change --property-match=DEVNAME=/dev/sd..."
	_, err = exec.New().Command(
		"udevadm",
		"trigger",
		"--action=change",
		fmt.Sprintf("--property-match=DEVNAME=%s", drive)).CombinedOutput()
	if err != nil {
		return fmt.Errorf("udevadmChangeToDrive: udevadm trigger failed for drive %q with %v.", drive, err)
	}
	return nil
}
コード例 #4
0
ファイル: exec.go プロジェクト: qinguoan/vulcan
func (plugin *execNetworkPlugin) Status(namespace string, name string, id kubeletTypes.DockerID) (*network.PodNetworkStatus, error) {
	out, err := utilexec.New().Command(plugin.getExecutable(), statusCmd, namespace, name, string(id)).CombinedOutput()
	glog.V(5).Infof("Status 'exec' network plugin output: %s, %v", string(out), err)
	if err != nil {
		return nil, err
	}
	if string(out) == "" {
		return nil, nil
	}
	findVersion := struct {
		unversioned.TypeMeta `json:",inline"`
	}{}
	err = json.Unmarshal(out, &findVersion)
	if err != nil {
		return nil, err
	}

	// check kind and version
	if findVersion.Kind != "" && findVersion.Kind != "PodNetworkStatus" {
		errStr := fmt.Sprintf("Invalid 'kind' returned in network status for pod '%s'. Valid value is 'PodNetworkStatus', got '%s'.", name, findVersion.Kind)
		return nil, errors.New(errStr)
	}
	switch findVersion.APIVersion {
	case "":
		fallthrough
	case "v1beta1":
		networkStatus := &network.PodNetworkStatus{}
		err = json.Unmarshal(out, networkStatus)
		return networkStatus, err
	}
	errStr := fmt.Sprintf("Unknown version '%s' in network status for pod '%s'.", findVersion.APIVersion, name)
	return nil, errors.New(errStr)
}
コード例 #5
0
ファイル: rbd.go プロジェクト: qinguoan/vulcan
func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, secret string) (volume.Builder, error) {
	source, readOnly := plugin.getRBDVolumeSource(spec)
	pool := source.RBDPool
	if pool == "" {
		pool = "rbd"
	}
	id := source.RadosUser
	if id == "" {
		id = "admin"
	}
	keyring := source.Keyring
	if keyring == "" {
		keyring = "/etc/ceph/keyring"
	}

	return &rbdBuilder{
		rbd: &rbd{
			podUID:   podUID,
			volName:  spec.Name(),
			Image:    source.RBDImage,
			Pool:     pool,
			ReadOnly: readOnly,
			manager:  manager,
			mounter:  &mount.SafeFormatAndMount{mounter, exec.New()},
			plugin:   plugin,
		},
		Mon:     source.CephMonitors,
		Id:      id,
		Keyring: keyring,
		Secret:  secret,
		fsType:  source.FSType,
	}, nil
}
コード例 #6
0
ファイル: gce_pd.go プロジェクト: qinguoan/vulcan
func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Builder, error) {
	// GCEPDs used directly in a pod have a ReadOnly flag set by the pod author.
	// GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	var readOnly bool

	var gce *api.GCEPersistentDiskVolumeSource
	if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
		gce = spec.Volume.GCEPersistentDisk
		readOnly = gce.ReadOnly
	} else {
		gce = spec.PersistentVolume.Spec.GCEPersistentDisk
		readOnly = spec.ReadOnly
	}

	pdName := gce.PDName
	fsType := gce.FSType
	partition := ""
	if gce.Partition != 0 {
		partition = strconv.Itoa(gce.Partition)
	}

	return &gcePersistentDiskBuilder{
		gcePersistentDisk: &gcePersistentDisk{
			podUID:    podUID,
			volName:   spec.Name(),
			pdName:    pdName,
			partition: partition,
			mounter:   mounter,
			manager:   manager,
			plugin:    plugin,
		},
		fsType:      fsType,
		readOnly:    readOnly,
		diskMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil
}
コード例 #7
0
ファイル: aws_ebs.go プロジェクト: qinguoan/vulcan
func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Builder, error) {
	// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
	// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	var readOnly bool
	var ebs *api.AWSElasticBlockStoreVolumeSource
	if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
		ebs = spec.Volume.AWSElasticBlockStore
		readOnly = ebs.ReadOnly
	} else {
		ebs = spec.PersistentVolume.Spec.AWSElasticBlockStore
		readOnly = spec.ReadOnly
	}

	volumeID := ebs.VolumeID
	fsType := ebs.FSType
	partition := ""
	if ebs.Partition != 0 {
		partition = strconv.Itoa(ebs.Partition)
	}

	return &awsElasticBlockStoreBuilder{
		awsElasticBlockStore: &awsElasticBlockStore{
			podUID:   podUID,
			volName:  spec.Name(),
			volumeID: volumeID,
			manager:  manager,
			mounter:  mounter,
			plugin:   plugin,
		},
		fsType:      fsType,
		partition:   partition,
		readOnly:    readOnly,
		diskMounter: &mount.SafeFormatAndMount{plugin.host.GetMounter(), exec.New()}}, nil
}
コード例 #8
0
ファイル: cinder.go プロジェクト: qinguoan/vulcan
func (plugin *cinderPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Builder, error) {
	var cinder *api.CinderVolumeSource
	if spec.Volume != nil && spec.Volume.Cinder != nil {
		cinder = spec.Volume.Cinder
	} else {
		cinder = spec.PersistentVolume.Spec.Cinder
	}

	pdName := cinder.VolumeID
	fsType := cinder.FSType
	readOnly := cinder.ReadOnly

	return &cinderVolumeBuilder{
		cinderVolume: &cinderVolume{
			podUID:  podUID,
			volName: spec.Name(),
			pdName:  pdName,
			mounter: mounter,
			manager: manager,
			plugin:  plugin,
		},
		fsType:             fsType,
		readOnly:           readOnly,
		blockDeviceMounter: &cinderSafeFormatAndMount{mounter, exec.New()}}, nil
}
コード例 #9
0
ファイル: nsenter_mount.go プロジェクト: qinguoan/vulcan
// IsLikelyNotMountPoint determines whether a path is a mountpoint by calling findmnt
// in the host's root mount namespace.
func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) {
	file, err := filepath.Abs(file)
	if err != nil {
		return true, err
	}

	args := []string{"--mount=/rootfs/proc/1/ns/mnt", "--", n.absHostPath("findmnt"), "-o", "target", "--noheadings", "--target", file}
	glog.V(5).Infof("findmnt command: %v %v", nsenterPath, args)

	exec := exec.New()
	out, err := exec.Command(nsenterPath, args...).CombinedOutput()
	if err != nil {
		// If the command itself is correct, then if we encountered error
		// then most likely this means that the directory does not exist.
		return true, os.ErrNotExist
	}
	strOut := strings.TrimSuffix(string(out), "\n")

	glog.V(5).Infof("IsLikelyNotMountPoint findmnt output: %v", strOut)
	if strOut == file {
		return false, nil
	}

	return true, nil
}
コード例 #10
0
ファイル: linux.go プロジェクト: qinguoan/vulcan
func NewTCShaper(iface string) BandwidthShaper {
	shaper := &tcShaper{
		e:     exec.New(),
		iface: iface,
	}
	return shaper
}
コード例 #11
0
ファイル: rbd_util.go プロジェクト: qinguoan/vulcan
// search /sys/bus for rbd device that matches given pool and image
func getDevFromImageAndPool(pool, image string) (string, bool) {
	// /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool
	sys_path := "/sys/bus/rbd/devices"
	if dirs, err := ioutil.ReadDir(sys_path); err == nil {
		for _, f := range dirs {
			// pool and name format:
			// see rbd_pool_show() and rbd_name_show() at
			// https://github.com/torvalds/linux/blob/master/drivers/block/rbd.c
			name := f.Name()
			// first match pool, then match name
			po := path.Join(sys_path, name, "pool")
			img := path.Join(sys_path, name, "name")
			exe := exec.New()
			out, err := exe.Command("cat", po, img).CombinedOutput()
			if err != nil {
				continue
			}
			matched, err := regexp.MatchString("^"+pool+"\n"+image+"\n$", string(out))
			if err != nil || !matched {
				continue
			}
			// found a match, check if device exists
			devicePath := "/dev/rbd" + name
			if _, err := os.Lstat(devicePath); err == nil {
				return devicePath, true
			}
		}
	}
	return "", false
}
コード例 #12
0
ファイル: exec.go プロジェクト: qinguoan/vulcan
func (plugin *execNetworkPlugin) Init(host network.Host) error {
	err := plugin.validate()
	if err != nil {
		return err
	}
	plugin.host = host
	// call the init script
	out, err := utilexec.New().Command(plugin.getExecutable(), initCmd).CombinedOutput()
	glog.V(5).Infof("Init 'exec' network plugin output: %s, %v", string(out), err)
	return err
}
コード例 #13
0
ファイル: glusterfs.go プロジェクト: qinguoan/vulcan
func (plugin *glusterfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) {
	source, _ := plugin.getGlusterVolumeSource(spec)
	ep_name := source.EndpointsName
	ns := pod.Namespace
	ep, err := plugin.host.GetKubeClient().Endpoints(ns).Get(ep_name)
	if err != nil {
		glog.Errorf("Glusterfs: failed to get endpoints %s[%v]", ep_name, err)
		return nil, err
	}
	glog.V(1).Infof("Glusterfs: endpoints %v", ep)
	return plugin.newBuilderInternal(spec, ep, pod, plugin.host.GetMounter(), exec.New())
}
コード例 #14
0
ファイル: cinder_util.go プロジェクト: qinguoan/vulcan
func probeAttachedVolume() error {
	executor := exec.New()
	args := []string{"trigger"}
	cmd := executor.Command("/usr/bin/udevadm", args...)
	_, err := cmd.CombinedOutput()
	if err != nil {
		glog.Errorf("error running udevadm trigger %v\n", err)
		return err
	}
	glog.V(4).Infof("Successfully probed all attachments")
	return nil
}
コード例 #15
0
ファイル: nsenter_mount.go プロジェクト: qinguoan/vulcan
// doNsenterMount nsenters the host's mount namespace and performs the
// requested mount.
func doNsenterMount(source, target, fstype string, options []string) error {
	glog.V(5).Infof("nsenter Mounting %s %s %s %v", source, target, fstype, options)
	args := makeNsenterArgs(source, target, fstype, options)

	glog.V(5).Infof("Mount command: %v %v", nsenterPath, args)
	exec := exec.New()
	outputBytes, err := exec.Command(nsenterPath, args...).CombinedOutput()
	if len(outputBytes) != 0 {
		glog.V(5).Infof("Output from mount command: %v", string(outputBytes))
	}

	return err
}
コード例 #16
0
ファイル: git_repo.go プロジェクト: qinguoan/vulcan
func (plugin *gitRepoPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) {
	return &gitRepoVolumeBuilder{
		gitRepoVolume: &gitRepoVolume{
			volName: spec.Name(),
			podUID:  pod.UID,
			plugin:  plugin,
		},
		pod:      *pod,
		source:   spec.Volume.GitRepo.Repository,
		revision: spec.Volume.GitRepo.Revision,
		exec:     exec.New(),
		opts:     opts,
	}, nil
}
コード例 #17
0
ファイル: nsenter_mount.go プロジェクト: qinguoan/vulcan
// Unmount runs umount(8) in the host's mount namespace.
func (*NsenterMounter) Unmount(target string) error {
	args := []string{
		"--mount=/rootfs/proc/1/ns/mnt",
		"/usr/bin/umount",
		target,
	}

	glog.V(5).Infof("Unmount command: %v %v", nsenterPath, args)
	exec := exec.New()
	outputBytes, err := exec.Command(nsenterPath, args...).CombinedOutput()
	if len(outputBytes) != 0 {
		glog.V(5).Infof("Output from mount command: %v", string(outputBytes))
	}

	return err
}
コード例 #18
0
ファイル: nsenter_mount.go プロジェクト: qinguoan/vulcan
// IsMountPoint determines whether a path is a mountpoint by calling findmnt
// in the host's root mount namespace.
func (*NsenterMounter) IsMountPoint(file string) (bool, error) {
	file, err := filepath.Abs(file)
	if err != nil {
		return false, err
	}

	args := []string{"--mount=/rootfs/proc/1/ns/mnt", "/usr/bin/findmnt", "-o", "target", "--noheadings", "--target", file}
	glog.V(5).Infof("findmnt command: %v %v", nsenterPath, args)

	exec := exec.New()
	out, err := exec.Command(nsenterPath, args...).CombinedOutput()
	if err != nil {
		// If findmnt didn't run, just claim it's not a mount point.
		return false, nil
	}
	strOut := strings.TrimSuffix(string(out), "\n")

	glog.V(5).Infof("IsMountPoint findmnt output: %v", strOut)
	if strOut == file {
		return true, nil
	}

	return false, nil
}
コード例 #19
0
ファイル: rbd.go プロジェクト: qinguoan/vulcan
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
	return []volume.VolumePlugin{&rbdPlugin{nil, exec.New()}}
}
コード例 #20
0
ファイル: exec.go プロジェクト: qinguoan/vulcan
func (plugin *execNetworkPlugin) TearDownPod(namespace string, name string, id kubeletTypes.DockerID) error {
	out, err := utilexec.New().Command(plugin.getExecutable(), tearDownCmd, namespace, name, string(id)).CombinedOutput()
	glog.V(5).Infof("TearDownPod 'exec' network plugin output: %s, %v", string(out), err)
	return err
}
コード例 #21
0
ファイル: hairpin.go プロジェクト: qinguoan/vulcan
func SetUpContainer(containerPid int, containerInterfaceName string) error {
	e := exec.New()
	return setUpContainerInternal(e, containerPid, containerInterfaceName)
}
コード例 #22
0
ファイル: server.go プロジェクト: qinguoan/vulcan
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *ProxyServerConfig) (*ProxyServer, error) {
	protocol := utiliptables.ProtocolIpv4
	if config.BindAddress.To4() == nil {
		protocol = utiliptables.ProtocolIpv6
	}

	// We ommit creation of pretty much everything if we run in cleanup mode
	if config.CleanupAndExit {
		execer := exec.New()
		dbus := utildbus.New()
		IptInterface := utiliptables.New(execer, dbus, protocol)
		return &ProxyServer{
			Config:       config,
			IptInterface: IptInterface,
		}, nil
	}

	// TODO(vmarmol): Use container config for this.
	var oomAdjuster *oom.OomAdjuster
	if config.OOMScoreAdj != 0 {
		oomAdjuster := oom.NewOomAdjuster()
		if err := oomAdjuster.ApplyOomScoreAdj(0, config.OOMScoreAdj); err != nil {
			glog.V(2).Info(err)
		}
	}

	if config.ResourceContainer != "" {
		// Run in its own container.
		if err := util.RunInResourceContainer(config.ResourceContainer); err != nil {
			glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
		} else {
			glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
		}
	}

	// Create a Kube Client
	// define api config source
	if config.Kubeconfig == "" && config.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}
	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
	if err != nil {
		return nil, err
	}
	client, err := kubeclient.New(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	// Create event recorder
	hostname := nodeutil.GetHostname(config.HostnameOverride)
	eventBroadcaster := record.NewBroadcaster()
	recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})
	eventBroadcaster.StartRecordingToSink(client.Events(""))

	// Create a iptables utils.
	execer := exec.New()
	dbus := utildbus.New()
	iptInterface := utiliptables.New(execer, dbus, protocol)

	var proxier proxy.ProxyProvider
	var endpointsHandler proxyconfig.EndpointsConfigHandler

	useIptablesProxy := false
	if mayTryIptablesProxy(config.ProxyMode, client.Nodes(), hostname) {
		var err error
		// guaranteed false on error, error only necessary for debugging
		useIptablesProxy, err = iptables.ShouldUseIptablesProxier()
		if err != nil {
			glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err)
		}
	}

	if useIptablesProxy {
		glog.V(2).Info("Using iptables Proxier.")
		execer := exec.New()
		proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.SyncPeriod, config.MasqueradeAll)
		if err != nil {
			glog.Fatalf("Unable to create proxier: %v", err)
		}
		proxier = proxierIptables
		endpointsHandler = proxierIptables
		// No turning back. Remove artifacts that might still exist from the userspace Proxier.
		glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.")
		userspace.CleanupLeftovers(iptInterface)
	} else {
		glog.V(2).Info("Using userspace Proxier.")
		// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
		// our config.EndpointsConfigHandler.
		loadBalancer := userspace.NewLoadBalancerRR()
		// set EndpointsConfigHandler to our loadBalancer
		endpointsHandler = loadBalancer

		proxierUserspace, err := userspace.NewProxier(loadBalancer, config.BindAddress, iptInterface, config.PortRange, config.SyncPeriod, config.UDPIdleTimeout)
		if err != nil {
			glog.Fatalf("Unable to create proxier: %v", err)
		}
		proxier = proxierUserspace
		// Remove artifacts from the pure-iptables Proxier.
		glog.V(2).Info("Tearing down pure-iptables proxy rules. Errors here are acceptable.")
		iptables.CleanupLeftovers(iptInterface)
	}
	iptInterface.AddReloadFunc(proxier.Sync)

	// Create configs (i.e. Watches for Services and Endpoints)
	// Note: RegisterHandler() calls need to happen before creation of Sources because sources
	// only notify on changes, and the initial update (on process start) may be lost if no handlers
	// are registered yet.
	serviceConfig := proxyconfig.NewServiceConfig()
	serviceConfig.RegisterHandler(proxier)

	endpointsConfig := proxyconfig.NewEndpointsConfig()
	endpointsConfig.RegisterHandler(endpointsHandler)

	proxyconfig.NewSourceAPI(
		client,
		30*time.Second,
		serviceConfig.Channel("api"),
		endpointsConfig.Channel("api"),
	)

	config.nodeRef = &api.ObjectReference{
		Kind:      "Node",
		Name:      hostname,
		UID:       types.UID(hostname),
		Namespace: "",
	}

	return NewProxyServer(config, client, endpointsConfig, endpointsHandler, iptInterface, oomAdjuster, proxier, recorder, serviceConfig)
}