示例#1
2
func deleteLocalSubnetRoute(device, localSubnetCIDR string) {
	backoff := utilwait.Backoff{
		Duration: 100 * time.Millisecond,
		Factor:   1.25,
		Steps:    6,
	}
	err := utilwait.ExponentialBackoff(backoff, func() (bool, error) {
		itx := ipcmd.NewTransaction(kexec.New(), device)
		routes, err := itx.GetRoutes()
		if err != nil {
			return false, fmt.Errorf("could not get routes: %v", err)
		}
		for _, route := range routes {
			if strings.Contains(route, localSubnetCIDR) {
				itx.DeleteRoute(localSubnetCIDR)
				err = itx.EndTransaction()
				if err != nil {
					return false, fmt.Errorf("could not delete route: %v", err)
				}
				return true, nil
			}
		}
		return false, nil
	})

	if err != nil {
		glog.Errorf("Error removing %s route from dev %s: %v; if the route appears later it will not be deleted.", localSubnetCIDR, device, err)
	}
}
示例#2
0
// Called by higher layers to create the plugin SDN node instance
func NewNodePlugin(pluginName string, osClient *osclient.Client, kClient *kclient.Client, hostname string, selfIP string, iptablesSyncPeriod time.Duration, mtu uint32) (*OsdnNode, error) {
	if !osapi.IsOpenShiftNetworkPlugin(pluginName) {
		return nil, nil
	}

	log.Infof("Initializing SDN node of type %q with configured hostname %q (IP %q), iptables sync period %q", pluginName, hostname, selfIP, iptablesSyncPeriod.String())
	if hostname == "" {
		output, err := kexec.New().Command("uname", "-n").CombinedOutput()
		if err != nil {
			return nil, err
		}
		hostname = strings.TrimSpace(string(output))
		log.Infof("Resolved hostname to %q", hostname)
	}
	if selfIP == "" {
		var err error
		selfIP, err = netutils.GetNodeIP(hostname)
		if err != nil {
			log.V(5).Infof("Failed to determine node address from hostname %s; using default interface (%v)", hostname, err)
			var defaultIP net.IP
			defaultIP, err = kubeutilnet.ChooseHostInterface()
			if err != nil {
				return nil, err
			}
			selfIP = defaultIP.String()
			log.Infof("Resolved IP address to %q", selfIP)
		}
	}

	ovsif, err := ovs.New(kexec.New(), BR)
	if err != nil {
		return nil, err
	}

	plugin := &OsdnNode{
		multitenant:        osapi.IsOpenShiftMultitenantNetworkPlugin(pluginName),
		kClient:            kClient,
		osClient:           osClient,
		ovs:                ovsif,
		localIP:            selfIP,
		hostName:           hostname,
		vnids:              newNodeVNIDMap(),
		podNetworkReady:    make(chan struct{}),
		kubeletInitReady:   make(chan struct{}),
		iptablesSyncPeriod: iptablesSyncPeriod,
		mtu:                mtu,
		egressPolicies:     make(map[uint32][]*osapi.EgressNetworkPolicy),
	}

	if err := plugin.dockerPreCNICleanup(); err != nil {
		return nil, err
	}

	return plugin, nil
}
func NewPlugin() network.NetworkPlugin {
	protocol := utiliptables.ProtocolIpv4
	execer := utilexec.New()
	dbus := utildbus.New()
	iptInterface := utiliptables.New(execer, dbus, protocol)

	return &kubenetNetworkPlugin{
		podIPs:      make(map[kubecontainer.ContainerID]string),
		hostPortMap: make(map[hostport]closeable),
		MTU:         1460, //TODO: don't hardcode this
		execer:      utilexec.New(),
		iptables:    iptInterface,
	}
}
示例#4
0
func NewPlugin(networkPluginDir string) network.NetworkPlugin {
	protocol := utiliptables.ProtocolIpv4
	execer := utilexec.New()
	dbus := utildbus.New()
	iptInterface := utiliptables.New(execer, dbus, protocol)
	return &kubenetNetworkPlugin{
		podIPs:            make(map[kubecontainer.ContainerID]string),
		MTU:               1460, //TODO: don't hardcode this
		execer:            utilexec.New(),
		iptables:          iptInterface,
		vendorDir:         networkPluginDir,
		hostportHandler:   hostport.NewHostportHandler(),
		nonMasqueradeCIDR: "10.0.0.0/8",
	}
}
示例#5
0
// WaitForAttach runs on the node to detect if the volume (referenced by LUN) is attached. If attached, the device path is returned
func (attacher *azureDiskAttacher) WaitForAttach(spec *volume.Spec, lunStr string, timeout time.Duration) (string, error) {
	volumeSource, err := getVolumeSource(spec)
	if err != nil {
		return "", err
	}

	if len(lunStr) == 0 {
		return "", fmt.Errorf("WaitForAttach failed for Azure disk %q: lun is empty.", volumeSource.DiskName)
	}

	lun, err := strconv.Atoi(lunStr)
	if err != nil {
		return "", fmt.Errorf("WaitForAttach: wrong lun %q, err: %v", lunStr, err)
	}
	scsiHostRescan(&osIOHandler{})
	exe := exec.New()
	devicePath := ""

	err = wait.Poll(checkSleepDuration, timeout, func() (bool, error) {
		glog.V(4).Infof("Checking Azure disk %q(lun %s) is attached.", volumeSource.DiskName, lunStr)
		if devicePath, err = findDiskByLun(lun, &osIOHandler{}, exe); err == nil {
			glog.V(4).Infof("Successfully found attached Azure disk %q(lun %s, device path %s).", volumeSource.DiskName, lunStr, devicePath)
			return true, nil
		} else {
			//Log error, if any, and continue checking periodically
			glog.V(4).Infof("Error Stat Azure disk (%q) is attached: %v", volumeSource.DiskName, err)
			return false, nil
		}
	})
	return devicePath, err
}
示例#6
0
func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Builder, error) {
	// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
	// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	var readOnly bool
	var ebs *api.AWSElasticBlockStoreVolumeSource
	if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
		ebs = spec.Volume.AWSElasticBlockStore
		readOnly = ebs.ReadOnly
	} else {
		ebs = spec.PersistentVolume.Spec.AWSElasticBlockStore
		readOnly = spec.ReadOnly
	}

	volumeID := ebs.VolumeID
	fsType := ebs.FSType
	partition := ""
	if ebs.Partition != 0 {
		partition = strconv.Itoa(ebs.Partition)
	}

	return &awsElasticBlockStoreBuilder{
		awsElasticBlockStore: &awsElasticBlockStore{
			podUID:   podUID,
			volName:  spec.Name(),
			volumeID: volumeID,
			manager:  manager,
			mounter:  mounter,
			plugin:   plugin,
		},
		fsType:      fsType,
		partition:   partition,
		readOnly:    readOnly,
		diskMounter: &mount.SafeFormatAndMount{plugin.host.GetMounter(), exec.New()}}, nil
}
示例#7
0
func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, secret string) (volume.Builder, error) {
	source, readOnly := plugin.getRBDVolumeSource(spec)
	pool := source.RBDPool
	if pool == "" {
		pool = "rbd"
	}
	id := source.RadosUser
	if id == "" {
		id = "admin"
	}
	keyring := source.Keyring
	if keyring == "" {
		keyring = "/etc/ceph/keyring"
	}

	return &rbdBuilder{
		rbd: &rbd{
			podUID:   podUID,
			volName:  spec.Name(),
			Image:    source.RBDImage,
			Pool:     pool,
			ReadOnly: readOnly,
			manager:  manager,
			mounter:  &mount.SafeFormatAndMount{mounter, exec.New()},
			plugin:   plugin,
		},
		Mon:     source.CephMonitors,
		Id:      id,
		Keyring: keyring,
		Secret:  secret,
		fsType:  source.FSType,
	}, nil
}
示例#8
0
文件: fc.go 项目: CodeJuan/kubernetes
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
	// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
	// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	fc, readOnly, err := getVolumeSource(spec)
	if err != nil {
		return nil, err
	}

	if fc.Lun == nil {
		return nil, fmt.Errorf("empty lun")
	}

	lun := strconv.Itoa(int(*fc.Lun))

	return &fcDiskMounter{
		fcDisk: &fcDisk{
			podUID:  podUID,
			volName: spec.Name(),
			wwns:    fc.TargetWWNs,
			lun:     lun,
			manager: manager,
			io:      &osIOHandler{},
			plugin:  plugin},
		fsType:   fc.FSType,
		readOnly: readOnly,
		mounter:  &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
	}, nil
}
示例#9
0
func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
	// iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author.
	// iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	iscsi, readOnly, err := getVolumeSource(spec)
	if err != nil {
		return nil, err
	}

	lun := strconv.Itoa(int(iscsi.Lun))
	portal := portalMounter(iscsi.TargetPortal)

	iface := iscsi.ISCSIInterface

	return &iscsiDiskMounter{
		iscsiDisk: &iscsiDisk{
			podUID:  podUID,
			volName: spec.Name(),
			portal:  portal,
			iqn:     iscsi.IQN,
			lun:     lun,
			iface:   iface,
			manager: manager,
			plugin:  plugin},
		fsType:     iscsi.FSType,
		readOnly:   readOnly,
		mounter:    &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
		deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()),
	}, nil
}
示例#10
0
文件: fc.go 项目: jojimt/contrib
func (plugin *fcPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) {
	// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
	// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	var readOnly bool
	var fc *api.FCVolumeSource
	if spec.Volume != nil && spec.Volume.FC != nil {
		fc = spec.Volume.FC
		readOnly = fc.ReadOnly
	} else {
		fc = spec.PersistentVolume.Spec.FC
		readOnly = spec.ReadOnly
	}

	if fc.Lun == nil {
		return nil, fmt.Errorf("empty lun")
	}

	lun := strconv.Itoa(*fc.Lun)

	return &fcDiskBuilder{
		fcDisk: &fcDisk{
			podUID:  podUID,
			volName: spec.Name(),
			wwns:    fc.TargetWWNs,
			lun:     lun,
			manager: manager,
			io:      &osIOHandler{},
			plugin:  plugin},
		fsType:   fc.FSType,
		readOnly: readOnly,
		mounter:  &mount.SafeFormatAndMount{mounter, exec.New()},
	}, nil
}
示例#11
0
文件: common.go 项目: rrati/origin
// Called by plug factory functions to initialize the generic plugin instance
func (oc *OvsController) BaseInit(registry *Registry, flowController FlowController, pluginHooks PluginHooks, hostname string, selfIP string) error {
	if hostname == "" {
		output, err := kexec.New().Command("uname", "-n").CombinedOutput()
		if err != nil {
			return err
		}
		hostname = strings.TrimSpace(string(output))
	}

	if selfIP == "" {
		var err error
		selfIP, err = netutils.GetNodeIP(hostname)
		if err != nil {
			return err
		}
	}
	log.Infof("Self IP: %s.", selfIP)

	oc.pluginHooks = pluginHooks
	oc.Registry = registry
	oc.flowController = flowController
	oc.localIP = selfIP
	oc.hostName = hostname
	oc.VNIDMap = make(map[string]uint)
	oc.sig = make(chan struct{})
	oc.podNetworkReady = make(chan struct{})
	oc.adminNamespaces = make([]string, 0)
	oc.services = make(map[string]api.Service)

	return nil
}
示例#12
0
func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Builder, error) {
	// GCEPDs used directly in a pod have a ReadOnly flag set by the pod author.
	// GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	var readOnly bool

	var gce *api.GCEPersistentDiskVolumeSource
	if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
		gce = spec.Volume.GCEPersistentDisk
		readOnly = gce.ReadOnly
	} else {
		gce = spec.PersistentVolume.Spec.GCEPersistentDisk
		readOnly = spec.ReadOnly
	}

	pdName := gce.PDName
	fsType := gce.FSType
	partition := ""
	if gce.Partition != 0 {
		partition = strconv.Itoa(gce.Partition)
	}

	return &gcePersistentDiskBuilder{
		gcePersistentDisk: &gcePersistentDisk{
			podUID:    podUID,
			volName:   spec.Name(),
			pdName:    pdName,
			partition: partition,
			mounter:   mounter,
			manager:   manager,
			plugin:    plugin,
		},
		fsType:      fsType,
		readOnly:    readOnly,
		diskMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil
}
示例#13
0
// IsLikelyNotMountPoint determines whether a path is a mountpoint by calling findmnt
// in the host's root mount namespace.
func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) {
	file, err := filepath.Abs(file)
	if err != nil {
		return true, err
	}

	args := []string{"--mount=/rootfs/proc/1/ns/mnt", "--", n.absHostPath("findmnt"), "-o", "target", "--noheadings", "--target", file}
	glog.V(5).Infof("findmnt command: %v %v", nsenterPath, args)

	exec := exec.New()
	out, err := exec.Command(nsenterPath, args...).CombinedOutput()
	if err != nil {
		glog.Errorf("Failed to nsenter mount, return file doesn't exist: %v", err)
		// If the command itself is correct, then if we encountered error
		// then most likely this means that the directory does not exist.
		return true, os.ErrNotExist
	}
	strOut := strings.TrimSuffix(string(out), "\n")

	glog.V(5).Infof("IsLikelyNotMountPoint findmnt output: %v", strOut)
	if strOut == file {
		return false, nil
	}

	return true, nil
}
示例#14
0
// checkPodToPodConnection verifies connection from fromPod to toPod.
// Connection check from toPod to fromPod will be done by the node of toPod.
func (d CheckPodNetwork) checkPodToPodConnection(fromPod, toPod *kapi.Pod) {
	if len(fromPod.Status.ContainerStatuses) == 0 {
		err := fmt.Errorf("ContainerID not found for pod %q", util.PrintPod(fromPod))
		d.res.Error("DPodNet1006", err, err.Error())
		return
	}

	success := util.ExpectedConnectionStatus(fromPod.Namespace, toPod.Namespace, d.vnidMap)

	kexecer := kexec.New()
	containerID := kcontainer.ParseContainerID(fromPod.Status.ContainerStatuses[0].ContainerID).ID
	pid, err := kexecer.Command("docker", "inspect", "-f", "{{.State.Pid}}", containerID).CombinedOutput()
	if err != nil {
		d.res.Error("DPodNet1007", err, fmt.Sprintf("Fetching pid for pod %q, container %q failed. Error: %s", util.PrintPod(fromPod), containerID, err))
		return
	}

	out, err := kexecer.Command("nsenter", "-n", "-t", strings.Trim(fmt.Sprintf("%s", pid), "\n"), "--", "ping", "-c1", "-W2", toPod.Status.PodIP).CombinedOutput()
	if success && err != nil {
		d.res.Error("DPodNet1008", err, fmt.Sprintf("Connectivity from pod %q to pod %q failed. Error: %s, Out: %s", util.PrintPod(fromPod), util.PrintPod(toPod), err, string(out)))
	} else if !success && err == nil {
		msg := fmt.Sprintf("Unexpected connectivity from pod %q to pod %q.", util.PrintPod(fromPod), util.PrintPod(toPod))
		d.res.Error("DPodNet1009", fmt.Errorf("%s", msg), msg)
	}
}
示例#15
0
func deleteLocalSubnetRoute(device, localSubnetCIDR string) {
	const (
		timeInterval = 100 * time.Millisecond
		maxIntervals = 20
	)

	for i := 0; i < maxIntervals; i++ {
		itx := ipcmd.NewTransaction(kexec.New(), device)
		routes, err := itx.GetRoutes()
		if err != nil {
			glog.Errorf("Could not get routes for dev %s: %v", device, err)
			return
		}
		for _, route := range routes {
			if strings.Contains(route, localSubnetCIDR) {
				itx.DeleteRoute(localSubnetCIDR)
				err = itx.EndTransaction()
				if err != nil {
					glog.Errorf("Could not delete subnet route %s from dev %s: %v", localSubnetCIDR, device, err)
				}
				return
			}
		}

		time.Sleep(timeInterval)
	}

	glog.Errorf("Timed out looking for %s route for dev %s; if it appears later it will not be deleted.", localSubnetCIDR, device)
}
示例#16
0
func (plugin *quobytePlugin) CanSupport(spec *volume.Spec) bool {
	if (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Quobyte == nil) ||
		(spec.Volume != nil && spec.Volume.Quobyte == nil) {
		return false
	}

	// If Quobyte is already mounted we don't need to check if the binary is installed
	if mounter, err := plugin.newMounterInternal(spec, nil, plugin.host.GetMounter()); err == nil {
		qm, _ := mounter.(*quobyteMounter)
		pluginDir := plugin.host.GetPluginDir(strings.EscapeQualifiedNameForDisk(quobytePluginName))
		if mounted, err := qm.pluginDirIsMounted(pluginDir); mounted && err == nil {
			glog.V(4).Infof("quobyte: can support")
			return true
		}
	} else {
		glog.V(4).Infof("quobyte: Error: %v", err)
	}

	if out, err := exec.New().Command("ls", "/sbin/mount.quobyte").CombinedOutput(); err == nil {
		glog.V(4).Infof("quobyte: can support: %s", string(out))
		return true
	}

	return false
}
示例#17
0
// search /sys/bus for rbd device that matches given pool and image
func getDevFromImageAndPool(pool, image string) (string, bool) {
	// /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool
	sys_path := "/sys/bus/rbd/devices"
	if dirs, err := ioutil.ReadDir(sys_path); err == nil {
		for _, f := range dirs {
			// pool and name format:
			// see rbd_pool_show() and rbd_name_show() at
			// https://github.com/torvalds/linux/blob/master/drivers/block/rbd.c
			name := f.Name()
			// first match pool, then match name
			po := path.Join(sys_path, name, "pool")
			img := path.Join(sys_path, name, "name")
			exe := exec.New()
			out, err := exe.Command("cat", po, img).CombinedOutput()
			if err != nil {
				continue
			}
			matched, err := regexp.MatchString("^"+pool+"\n"+image+"\n$", string(out))
			if err != nil || !matched {
				continue
			}
			// found a match, check if device exists
			devicePath := "/dev/rbd" + name
			if _, err := os.Lstat(devicePath); err == nil {
				return devicePath, true
			}
		}
	}
	return "", false
}
示例#18
0
func (d CheckServiceNetwork) checkPodToServiceConnection(fromPod *kapi.Pod, toService *kapi.Service) {
	if len(fromPod.Status.ContainerStatuses) == 0 {
		d.res.Error("DSvcNet1008", nil, fmt.Sprintf("ContainerID not found for pod %q", util.PrintPod(fromPod)))
		return
	}

	success := util.ExpectedConnectionStatus(fromPod.Namespace, toService.Namespace, d.vnidMap)

	kexecer := kexec.New()
	containerID := kcontainer.ParseContainerID(fromPod.Status.ContainerStatuses[0].ContainerID).ID
	pid, err := kexecer.Command("docker", "inspect", "-f", "{{.State.Pid}}", containerID).CombinedOutput()
	if err != nil {
		d.res.Error("DSvcNet1009", err, fmt.Sprintf("Fetching pid for pod %q failed. Error: %s", util.PrintPod(fromPod), err))
		return
	}

	// In bash, redirecting to /dev/tcp/HOST/PORT or /dev/udp/HOST/PORT opens a connection
	// to that HOST:PORT. Use this to test connectivity to the service; we can't use ping
	// like in the pod connectivity check because only connections to the correct port
	// get redirected by the iptables rules.
	srvConCmd := fmt.Sprintf("echo -n '' > /dev/%s/%s/%d", strings.ToLower(string(toService.Spec.Ports[0].Protocol)), toService.Spec.ClusterIP, toService.Spec.Ports[0].Port)
	out, err := kexecer.Command("nsenter", "-n", "-t", strings.Trim(fmt.Sprintf("%s", pid), "\n"), "--", "timeout", "1", "bash", "-c", srvConCmd).CombinedOutput()
	if success && err != nil {
		d.res.Error("DSvcNet1010", err, fmt.Sprintf("Connectivity from pod %q to service %q failed. Error: %s, Out: %s", util.PrintPod(fromPod), printService(toService), err, string(out)))
	} else if !success && err == nil {
		msg := fmt.Sprintf("Unexpected connectivity from pod %q to service %q.", util.PrintPod(fromPod), printService(toService))
		d.res.Error("DSvcNet1011", fmt.Errorf("%s", msg), msg)
	}
}
示例#19
0
func (plugin *iscsiPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) {
	// iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author.
	// iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	var readOnly bool
	var iscsi *api.ISCSIVolumeSource
	if spec.Volume != nil && spec.Volume.ISCSI != nil {
		iscsi = spec.Volume.ISCSI
		readOnly = iscsi.ReadOnly
	} else {
		iscsi = spec.PersistentVolume.Spec.ISCSI
		readOnly = spec.ReadOnly
	}

	lun := strconv.Itoa(iscsi.Lun)
	portal := portalBuilder(iscsi.TargetPortal)

	iface := iscsi.ISCSIInterface

	return &iscsiDiskBuilder{
		iscsiDisk: &iscsiDisk{
			podUID:  podUID,
			volName: spec.Name(),
			portal:  portal,
			iqn:     iscsi.IQN,
			lun:     lun,
			iface:   iface,
			manager: manager,
			plugin:  plugin},
		fsType:   iscsi.FSType,
		readOnly: readOnly,
		mounter:  &mount.SafeFormatAndMount{mounter, exec.New()},
	}, nil
}
示例#20
0
// MountDevice mounts device to global mount point.
func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
	mounter := attacher.host.GetMounter()
	notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
	if err != nil {
		if os.IsNotExist(err) {
			if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
				glog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err)
				return err
			}
			notMnt = true
		} else {
			return err
		}
	}

	volumeSource, _, err := getVolumeSource(spec)
	if err != nil {
		return err
	}

	options := []string{}

	if notMnt {
		diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
		err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, options)
		if err != nil {
			os.Remove(deviceMountPath)
			return err
		}
		glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options)
	}
	return nil
}
示例#21
0
func newNodeIPTables(clusterNetworkCIDR string, syncPeriod time.Duration) *NodeIPTables {
	return &NodeIPTables{
		ipt:                iptables.New(kexec.New(), utildbus.New(), iptables.ProtocolIpv4),
		clusterNetworkCIDR: clusterNetworkCIDR,
		syncPeriod:         syncPeriod,
	}
}
示例#22
0
func NewTCShaper(iface string) BandwidthShaper {
	shaper := &tcShaper{
		e:     exec.New(),
		iface: iface,
	}
	return shaper
}
示例#23
0
func (oc *OvsController) StartNode(mtu uint) error {
	// Assume we are working with IPv4
	clusterNetworkCIDR, err := oc.Registry.GetClusterNetworkCIDR()
	if err != nil {
		log.Errorf("Failed to obtain ClusterNetwork: %v", err)
		return err
	}

	ipt := iptables.New(kexec.New(), utildbus.New(), iptables.ProtocolIpv4)
	if err := SetupIptables(ipt, clusterNetworkCIDR); err != nil {
		return fmt.Errorf("Failed to set up iptables: %v", err)
	}

	ipt.AddReloadFunc(func() {
		err := SetupIptables(ipt, clusterNetworkCIDR)
		if err != nil {
			log.Errorf("Error reloading iptables: %v\n", err)
		}
	})

	if err := oc.pluginHooks.PluginStartNode(mtu); err != nil {
		return fmt.Errorf("Failed to start plugin: %v", err)
	}

	oc.markPodNetworkReady()

	return nil
}
示例#24
0
文件: common.go 项目: kcbabo/origin
func SetupIptables(fw *firewalld.Interface, clusterNetworkCIDR string) error {
	if fw.IsRunning() {
		rules := []FirewallRule{
			{firewalld.IPv4, "nat", "POSTROUTING", 0, []string{"-s", clusterNetworkCIDR, "!", "-d", clusterNetworkCIDR, "-j", "MASQUERADE"}},
			{firewalld.IPv4, "filter", "INPUT", 0, []string{"-p", "udp", "-m", "multiport", "--dports", "4789", "-m", "comment", "--comment", "001 vxlan incoming", "-j", "ACCEPT"}},
			{firewalld.IPv4, "filter", "INPUT", 0, []string{"-i", "tun0", "-m", "comment", "--comment", "traffic from docker for internet", "-j", "ACCEPT"}},
			{firewalld.IPv4, "filter", "FORWARD", 0, []string{"-d", clusterNetworkCIDR, "-j", "ACCEPT"}},
			{firewalld.IPv4, "filter", "FORWARD", 0, []string{"-s", clusterNetworkCIDR, "-j", "ACCEPT"}},
		}

		for _, rule := range rules {
			err := fw.EnsureRule(rule.ipv, rule.table, rule.chain, rule.priority, rule.args)
			if err != nil {
				return err
			}
		}
	} else {
		dbus := utildbus.New()
		ipt := iptables.New(kexec.New(), dbus, iptables.ProtocolIpv4)

		_, err := ipt.EnsureRule(iptables.Append, iptables.TableNAT, iptables.ChainPostrouting, "-s", clusterNetworkCIDR, "!", "-d", clusterNetworkCIDR, "-j", "MASQUERADE")
		if err != nil {
			return err
		}
	}

	return nil
}
示例#25
0
func (plugin *execNetworkPlugin) Status(namespace string, name string, id kubeletTypes.DockerID) (*network.PodNetworkStatus, error) {
	out, err := utilexec.New().Command(plugin.getExecutable(), statusCmd, namespace, name, string(id)).CombinedOutput()
	glog.V(5).Infof("Status 'exec' network plugin output: %s, %v", string(out), err)
	if err != nil {
		return nil, err
	}
	if string(out) == "" {
		return nil, nil
	}
	findVersion := struct {
		api.TypeMeta `json:",inline"`
	}{}
	err = json.Unmarshal(out, &findVersion)
	if err != nil {
		return nil, err
	}

	// check kind and version
	if findVersion.Kind != "" && findVersion.Kind != "PodNetworkStatus" {
		errStr := fmt.Sprintf("Invalid 'kind' returned in network status for pod '%s'. Valid value is 'PodNetworkStatus', got '%s'.", name, findVersion.Kind)
		return nil, errors.New(errStr)
	}
	switch findVersion.APIVersion {
	case "":
		fallthrough
	case "v1beta1":
		networkStatus := &network.PodNetworkStatus{}
		err = json.Unmarshal(out, networkStatus)
		return networkStatus, err
	}
	errStr := fmt.Sprintf("Unknown version '%s' in network status for pod '%s'.", findVersion.APIVersion, name)
	return nil, errors.New(errStr)
}
示例#26
0
// ShouldUseIptablesProxier returns true if we should use the iptables Proxier
// instead of the "classic" userspace Proxier.  This is determined by checking
// the iptables version and for the existence of kernel features. It may return
// an error if it fails to get the iptables version without error, in which
// case it will also return false.
func ShouldUseIptablesProxier() (bool, error) {
	exec := utilexec.New()
	minVersion, err := semver.NewVersion(iptablesMinVersion)
	if err != nil {
		return false, err
	}
	// returns "X.X.X", err
	versionString, err := utiliptables.GetIptablesVersionString(exec)
	if err != nil {
		return false, err
	}
	version, err := semver.NewVersion(versionString)
	if err != nil {
		return false, err
	}
	if version.LessThan(*minVersion) {
		return false, nil
	}

	// Check for the required sysctls.  We don't care about the value, just
	// that it exists.  If this Proxier is chosen, we'll iniialize it as we
	// need.
	_, err = getSysctl(sysctlRouteLocalnet)
	if err != nil {
		return false, err
	}

	return true, nil
}
示例#27
0
// MountDevice runs mount command on the node to mount the volume
func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
	mounter := attacher.host.GetMounter()
	notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
	if err != nil {
		if os.IsNotExist(err) {
			if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
				return err
			}
			notMnt = true
		} else {
			return err
		}
	}

	volumeSource, err := getVolumeSource(spec)
	if err != nil {
		return err
	}

	options := []string{}
	if spec.ReadOnly {
		options = append(options, "ro")
	}
	if notMnt {
		diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
		err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, options)
		if err != nil {
			os.Remove(deviceMountPath)
			return err
		}
	}
	return nil
}
示例#28
0
func (plugin *cinderPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Builder, error) {
	var cinder *api.CinderVolumeSource
	if spec.Volume != nil && spec.Volume.Cinder != nil {
		cinder = spec.Volume.Cinder
	} else {
		cinder = spec.PersistentVolume.Spec.Cinder
	}

	pdName := cinder.VolumeID
	fsType := cinder.FSType
	readOnly := cinder.ReadOnly

	return &cinderVolumeBuilder{
		cinderVolume: &cinderVolume{
			podUID:  podUID,
			volName: spec.Name(),
			pdName:  pdName,
			mounter: mounter,
			manager: manager,
			plugin:  plugin,
		},
		fsType:             fsType,
		readOnly:           readOnly,
		blockDeviceMounter: &cinderSafeFormatAndMount{mounter, exec.New()}}, nil
}
示例#29
0
func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
	// Only mount the PD globally once.
	mounter := attacher.host.GetMounter()
	notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
	if err != nil {
		if os.IsNotExist(err) {
			if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
				return err
			}
			notMnt = true
		} else {
			return err
		}
	}

	volumeSource, readOnly, err := getVolumeSource(spec)
	if err != nil {
		return err
	}

	options := []string{}
	if readOnly {
		options = append(options, "ro")
	}
	if notMnt {
		diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
		err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, options)
		if err != nil {
			os.Remove(deviceMountPath)
			return err
		}
		glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options)
	}
	return nil
}
示例#30
0
// Calls "udevadm trigger --action=change" on the specified drive.
// drivePath must be the the block device path to trigger on, in the format "/dev/sd*", or a symlink to it.
// This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed.
func udevadmChangeToDrive(drivePath string) error {
	glog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath)

	// Evaluate symlink, if any
	drive, err := filepath.EvalSymlinks(drivePath)
	if err != nil {
		return fmt.Errorf("udevadmChangeToDrive: filepath.EvalSymlinks(%q) failed with %v.", drivePath, err)
	}
	glog.V(5).Infof("udevadmChangeToDrive: symlink path is %q", drive)

	// Check to make sure input is "/dev/sd*"
	if !strings.Contains(drive, diskSDPath) {
		return fmt.Errorf("udevadmChangeToDrive: expected input in the form \"%s\" but drive is %q.", diskSDPattern, drive)
	}

	// Call "udevadm trigger --action=change --property-match=DEVNAME=/dev/sd..."
	_, err = exec.New().Command(
		"udevadm",
		"trigger",
		"--action=change",
		fmt.Sprintf("--property-match=DEVNAME=%s", drive)).CombinedOutput()
	if err != nil {
		return fmt.Errorf("udevadmChangeToDrive: udevadm trigger failed for drive %q with %v.", drive, err)
	}
	return nil
}