コード例 #1
1
ファイル: glusterfs.go プロジェクト: MarWestermann/gofabric8
func (c *glusterfsCleaner) cleanup(dir string) error {
	mountpoint, err := c.mounter.IsMountPoint(dir)
	if err != nil {
		glog.Errorf("Glusterfs: Error checking IsMountPoint: %v", err)
		return err
	}
	if !mountpoint {
		return os.RemoveAll(dir)
	}

	if err := c.mounter.Unmount(dir); err != nil {
		glog.Errorf("Glusterfs: Unmounting failed: %v", err)
		return err
	}
	mountpoint, mntErr := c.mounter.IsMountPoint(dir)
	if mntErr != nil {
		glog.Errorf("Glusterfs: IsMountpoint check failed: %v", mntErr)
		return mntErr
	}
	if !mountpoint {
		if err := os.RemoveAll(dir); err != nil {
			return err
		}
	}

	return nil
}
コード例 #2
0
ファイル: master.go プロジェクト: gogogocheng/kubernetes
func (m *Master) getServersToValidate(c *Config) map[string]apiserver.Server {
	serversToValidate := map[string]apiserver.Server{
		"controller-manager": {Addr: "127.0.0.1", Port: ports.ControllerManagerPort, Path: "/healthz"},
		"scheduler":          {Addr: "127.0.0.1", Port: ports.SchedulerPort, Path: "/healthz"},
	}
	for ix, machine := range c.DatabaseStorage.Backends() {
		etcdUrl, err := url.Parse(machine)
		if err != nil {
			glog.Errorf("Failed to parse etcd url for validation: %v", err)
			continue
		}
		var port int
		var addr string
		if strings.Contains(etcdUrl.Host, ":") {
			var portString string
			addr, portString, err = net.SplitHostPort(etcdUrl.Host)
			if err != nil {
				glog.Errorf("Failed to split host/port: %s (%v)", etcdUrl.Host, err)
				continue
			}
			port, _ = strconv.Atoi(portString)
		} else {
			addr = etcdUrl.Host
			port = 4001
		}
		serversToValidate[fmt.Sprintf("etcd-%d", ix)] = apiserver.Server{Addr: addr, Port: port, Path: "/health", Validate: etcdstorage.EtcdHealthCheck}
	}
	return serversToValidate
}
コード例 #3
0
ファイル: attacher.go プロジェクト: kubernetes/kubernetes
// Attach checks with the GCE cloud provider if the specified volume is already
// attached to the node with the specified Name.
// If the volume is attached, it succeeds (returns nil).
// If it is not, Attach issues a call to the GCE cloud provider to attach it.
// Callers are responsible for retrying on failure.
// Callers are responsible for thread safety between concurrent attach and
// detach operations.
func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
	volumeSource, readOnly, err := getVolumeSource(spec)
	if err != nil {
		return "", err
	}

	pdName := volumeSource.PDName

	attached, err := attacher.gceDisks.DiskIsAttached(pdName, nodeName)
	if err != nil {
		// Log error and continue with attach
		glog.Errorf(
			"Error checking if PD (%q) is already attached to current node (%q). Will continue and try attach anyway. err=%v",
			pdName, nodeName, err)
	}

	if err == nil && attached {
		// Volume is already attached to node.
		glog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, nodeName)
	} else {
		if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly); err != nil {
			glog.Errorf("Error attaching PD %q to node %q: %+v", pdName, nodeName, err)
			return "", err
		}
	}

	return path.Join(diskByIdPath, diskGooglePrefix+pdName), nil
}
コード例 #4
0
ファイル: nfs.go プロジェクト: gabrielweyer/kubernetes
func (c *nfsCleaner) TearDownAt(dir string) error {
	mountpoint, err := c.mounter.IsMountPoint(dir)
	if err != nil {
		glog.Errorf("Error checking IsMountPoint: %v", err)
		return err
	}
	if !mountpoint {
		return os.Remove(dir)
	}

	if err := c.mounter.Unmount(dir); err != nil {
		glog.Errorf("Unmounting failed: %v", err)
		return err
	}
	mountpoint, mntErr := c.mounter.IsMountPoint(dir)
	if mntErr != nil {
		glog.Errorf("IsMountpoint check failed: %v", mntErr)
		return mntErr
	}
	if !mountpoint {
		if err := os.Remove(dir); err != nil {
			return err
		}
	}

	return nil
}
コード例 #5
0
ファイル: controller.go プロジェクト: richm/origin
func deleteLocalSubnetRoute(device, localSubnetCIDR string) {
	const (
		timeInterval = 100 * time.Millisecond
		maxIntervals = 20
	)

	for i := 0; i < maxIntervals; i++ {
		itx := ipcmd.NewTransaction(device)
		routes, err := itx.GetRoutes()
		if err != nil {
			glog.Errorf("Could not get routes for dev %s: %v", device, err)
			return
		}
		for _, route := range routes {
			if strings.Contains(route, localSubnetCIDR) {
				itx.DeleteRoute(localSubnetCIDR)
				err = itx.EndTransaction()
				if err != nil {
					glog.Errorf("Could not delete subnet route %s from dev %s: %v", localSubnetCIDR, device, err)
				}
				return
			}
		}

		time.Sleep(timeInterval)
	}

	glog.Errorf("Timed out looking for %s route for dev %s; if it appears later it will not be deleted.", localSubnetCIDR, device)
}
コード例 #6
0
ファイル: controller_base.go プロジェクト: roackb2/kubernetes
// updateClaim is callback from framework.Controller watching PersistentVolumeClaim
// events.
func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) {
	// Store the new claim version in the cache and do not process it if this is
	// an old version.
	new, err := storeObjectUpdate(ctrl.claims, newObj, "claim")
	if err != nil {
		glog.Errorf("%v", err)
	}
	if !new {
		return
	}

	newClaim, ok := newObj.(*api.PersistentVolumeClaim)
	if !ok {
		glog.Errorf("Expected PersistentVolumeClaim but updateClaim received %+v", newObj)
		return
	}
	if err := ctrl.syncClaim(newClaim); err != nil {
		if errors.IsConflict(err) {
			// Version conflict error happens quite often and the controller
			// recovers from it easily.
			glog.V(3).Infof("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err)
		} else {
			glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err)
		}
	}
}
コード例 #7
0
ファイル: controller_base.go プロジェクト: roackb2/kubernetes
// initializeCaches fills all controller caches with initial data from etcd in
// order to have the caches already filled when first addClaim/addVolume to
// perform initial synchronization of the controller.
func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSource cache.ListerWatcher) {
	volumeListObj, err := volumeSource.List(api.ListOptions{})
	if err != nil {
		glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
		return
	}
	volumeList, ok := volumeListObj.(*api.List)
	if !ok {
		glog.Errorf("PersistentVolumeController can't initialize caches, expected list of volumes, got: %+v", volumeListObj)
		return
	}
	for _, volume := range volumeList.Items {
		// Ignore template volumes from kubernetes 1.2
		deleted := ctrl.upgradeVolumeFrom1_2(volume.(*api.PersistentVolume))
		if !deleted {
			storeObjectUpdate(ctrl.volumes.store, volume, "volume")
		}
	}

	claimListObj, err := claimSource.List(api.ListOptions{})
	if err != nil {
		glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
		return
	}
	claimList, ok := claimListObj.(*api.List)
	if !ok {
		glog.Errorf("PersistentVolumeController can't initialize caches, expected list of claims, got: %+v", volumeListObj)
		return
	}
	for _, claim := range claimList.Items {
		storeObjectUpdate(ctrl.claims, claim, "claim")
	}
	glog.V(4).Infof("controller initialized")
}
コード例 #8
0
ファイル: driver.go プロジェクト: nlamirault/heapster
// Stores events into the backend.
func (sink *influxdbSink) StoreEvents(events []kube_api.Event) error {
	dataPoints := []*influxdb.Series{}
	if events == nil || len(events) <= 0 {
		return nil
	}
	if !sink.c.avoidColumns {
		dataPoint, err := sink.storeEventsColumns(events)
		if err != nil {
			glog.Errorf("failed to parse events: %v", err)
			return err
		}
		dataPoints = append(dataPoints, dataPoint)
	} else {
		for _, event := range events {
			dataPoint, err := sink.storeEventNoColumns(event)
			if err != nil {
				glog.Errorf("failed to parse events: %v", err)
				return err
			}
			dataPoints = append(dataPoints, dataPoint)
		}
	}
	err := sink.client.WriteSeriesWithTimePrecision(dataPoints, influxdb.Millisecond)
	if err != nil {
		glog.Errorf("failed to write events to influxDB - %s", err)
		sink.recordWriteFailure()
	} else {
		glog.V(1).Info("Successfully flushed events to influxDB")
	}
	return err

}
コード例 #9
0
// Attach exposes a volume on the host.
func (u *flexVolumeUtil) attach(f *flexVolumeMounter) (string, error) {
	execPath := f.execPath

	var options string
	if f.options != nil {
		out, err := json.Marshal(f.options)
		if err != nil {
			glog.Errorf("Failed to marshal plugin options, error: %s", err.Error())
			return "", err
		}
		if len(out) != 0 {
			options = string(out)
		} else {
			options = ""
		}
	}

	cmd := f.runner.Command(execPath, attachCmd, options)
	output, err := cmd.CombinedOutput()
	if err != nil {
		glog.Errorf("Failed to attach volume %s, output: %s, error: %s", f.volName, output, err.Error())
		_, err := handleCmdResponse(attachCmd, output)
		return "", err
	}

	status, err := handleCmdResponse(attachCmd, output)
	if err != nil {
		return "", err
	}

	glog.Infof("Successfully attached volume %s on device: %s", f.volName, status.Device)

	return status.Device, nil
}
コード例 #10
0
// When a pod is deleted, enqueue the replica set that manages the pod and update its expectations.
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
func (rsc *ReplicaSetController) deletePod(obj interface{}) {
	pod, ok := obj.(*api.Pod)

	// When a delete is dropped, the relist will notice a pod in the store not
	// in the list, leading to the insertion of a tombstone object which contains
	// the deleted key/value. Note that this value might be stale. If the pod
	// changed labels the new ReplicaSet will not be woken up till the periodic resync.
	if !ok {
		tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
		if !ok {
			glog.Errorf("Couldn't get object from tombstone %+v, could take up to %v before a replica set recreates a replica", obj, controller.ExpectationsTimeout)
			return
		}
		pod, ok = tombstone.Obj.(*api.Pod)
		if !ok {
			glog.Errorf("Tombstone contained object that is not a pod %+v, could take up to %v before replica set recreates a replica", obj, controller.ExpectationsTimeout)
			return
		}
	}
	if rs := rsc.getPodReplicaSet(pod); rs != nil {
		rsKey, err := controller.KeyFunc(rs)
		if err != nil {
			glog.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err)
			return
		}
		rsc.expectations.DeletionObserved(rsKey)
		rsc.enqueueReplicaSet(rs)
	}
}
コード例 #11
0
func (dc *DisruptionController) deletePod(obj interface{}) {
	pod, ok := obj.(*api.Pod)
	// When a delete is dropped, the relist will notice a pod in the store not
	// in the list, leading to the insertion of a tombstone object which contains
	// the deleted key/value. Note that this value might be stale. If the pod
	// changed labels the new ReplicaSet will not be woken up till the periodic
	// resync.
	if !ok {
		tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
		if !ok {
			glog.Errorf("Couldn't get object from tombstone %+v", obj)
			return
		}
		pod, ok = tombstone.Obj.(*api.Pod)
		if !ok {
			glog.Errorf("Tombstone contained object that is not a pod %+v", obj)
			return
		}
	}
	glog.V(4).Infof("deletePod called on pod %q", pod.Name)
	pdb := dc.getPdbForPod(pod)
	if pdb == nil {
		glog.V(4).Infof("No matching pdb for pod %q", pod.Name)
		return
	}
	glog.V(4).Infof("deletePod %q -> PDB %q", pod.Name, pdb.Name)
	dc.enqueuePdb(pdb)
}
コード例 #12
0
ファイル: volume.go プロジェクト: MrXiaoZ/bfs
// NewVolume new a volume and init it.
func NewVolume(id int32, bfile, ifile string) (v *Volume, err error) {
	v = &Volume{}
	v.Id = id
	if v.block, err = NewSuperBlock(bfile); err != nil {
		log.Errorf("init super block: \"%s\" error(%v)", bfile, err)
		return
	}
	if v.indexer, err = NewIndexer(ifile, 102400); err != nil {
		log.Errorf("init indexer: %s error(%v)", ifile, err)
		goto failed
	}
	v.needles = make(map[int64]NeedleCache)
	if err = v.init(); err != nil {
		goto failed
	}
	v.signal = make(chan uint32, volumeDelChNum)
	v.compressKeys = []int64{}
	go v.del()
	return
failed:
	v.block.Close()
	if v.indexer != nil {
		v.indexer.Close()
	}
	return
}
コード例 #13
0
ファイル: main.go プロジェクト: xiaoma20082008/bfs
func main() {
	var (
		config *Config
		zk     *Zookeeper
		p      *Pitchfork
		err    error
	)
	flag.Parse()
	defer log.Flush()
	log.Infof("bfs pitchfork start")
	if config, err = NewConfig(configFile); err != nil {
		log.Errorf("NewConfig(\"%s\") error(%v)", configFile, err)
		return
	}
	log.Infof("init zookeeper...")
	if zk, err = NewZookeeper(config.ZookeeperAddrs, config.ZookeeperTimeout, config.ZookeeperPitchforkRoot, config.ZookeeperStoreRoot,
		config.ZookeeperVolumeRoot); err != nil {
		log.Errorf("NewZookeeper() failed, Quit now")
		return
	}
	log.Infof("register pitchfork...")
	if p, err = NewPitchfork(zk, config); err != nil {
		log.Errorf("pitchfork NewPitchfork() failed, Quit now")
		return
	}
	log.Infof("starts probe stores...")
	go p.Probe()
	StartSignal()
	return
}
コード例 #14
0
ファイル: controller.go プロジェクト: ruizeng/kubernetes
// When a pod is deleted, enqueue the job that manages the pod and update its expectations.
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
func (jm *JobController) deletePod(obj interface{}) {
	pod, ok := obj.(*api.Pod)

	// When a delete is dropped, the relist will notice a pod in the store not
	// in the list, leading to the insertion of a tombstone object which contains
	// the deleted key/value. Note that this value might be stale. If the pod
	// changed labels the new job will not be woken up till the periodic resync.
	if !ok {
		tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
		if !ok {
			glog.Errorf("Couldn't get object from tombstone %+v, could take up to %v before a job recreates a pod", obj, controller.ExpectationsTimeout)
			return
		}
		pod, ok = tombstone.Obj.(*api.Pod)
		if !ok {
			glog.Errorf("Tombstone contained object that is not a pod %+v, could take up to %v before job recreates a pod", obj, controller.ExpectationsTimeout)
			return
		}
	}
	if job := jm.getPodJob(pod); job != nil {
		jobKey, err := controller.KeyFunc(job)
		if err != nil {
			glog.Errorf("Couldn't get key for job %#v: %v", job, err)
			return
		}
		jm.expectations.DeletionObserved(jobKey)
		jm.enqueueController(job)
	}
}
コード例 #15
0
ファイル: master.go プロジェクト: gogogocheng/kubernetes
func (m *Master) generateSSHKey(user, privateKeyfile, publicKeyfile string) error {
	private, public, err := util.GenerateKey(2048)
	if err != nil {
		return err
	}
	// If private keyfile already exists, we must have only made it halfway
	// through last time, so delete it.
	exists, err := util.FileExists(privateKeyfile)
	if err != nil {
		glog.Errorf("Error detecting if private key exists: %v", err)
	} else if exists {
		glog.Infof("Private key exists, but public key does not")
		if err := os.Remove(privateKeyfile); err != nil {
			glog.Errorf("Failed to remove stale private key: %v", err)
		}
	}
	if err := ioutil.WriteFile(privateKeyfile, util.EncodePrivateKey(private), 0600); err != nil {
		return err
	}
	publicKeyBytes, err := util.EncodePublicKey(public)
	if err != nil {
		return err
	}
	if err := ioutil.WriteFile(publicKeyfile+".tmp", publicKeyBytes, 0600); err != nil {
		return err
	}
	return os.Rename(publicKeyfile+".tmp", publicKeyfile)
}
コード例 #16
0
ファイル: pod_workers.go プロジェクト: alena1108/kubernetes
func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
	var minRuntimeCacheTime time.Time
	for newWork := range podUpdates {
		func() {
			defer p.checkForUpdates(newWork.pod.UID, newWork.updateCompleteFn)
			// We would like to have the state of the containers from at least
			// the moment when we finished the previous processing of that pod.
			if err := p.runtimeCache.ForceUpdateIfOlder(minRuntimeCacheTime); err != nil {
				glog.Errorf("Error updating the container runtime cache: %v", err)
				return
			}
			pods, err := p.runtimeCache.GetPods()
			if err != nil {
				glog.Errorf("Error getting pods while syncing pod: %v", err)
				return
			}

			err = p.syncPodFn(newWork.pod, newWork.mirrorPod,
				kubecontainer.Pods(pods).FindPodByID(newWork.pod.UID), newWork.updateType)
			if err != nil {
				glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err)
				p.recorder.Eventf(newWork.pod, "FailedSync", "Error syncing pod, skipping: %v", err)
				return
			}
			minRuntimeCacheTime = time.Now()

			newWork.updateCompleteFn()
		}()
	}
}
コード例 #17
0
ファイル: proxy.go プロジェクト: figoxu/myshard
func (s *Server) onConn(c net.Conn) {
	defer s.wg.Done()

	h := newHandler(s)
	conn, err := server.NewConn(c, s.user, s.password, h)
	if err != nil {
		log.Errorf("new connection error %s", err.Error())
		c.Close()
		return
	}

	h.conn = conn

	for {
		select {
		case <-s.quit:
			// Proxy quited, close conection
			conn.Close()
			return
		default:
			break
		}

		err = conn.HandleCommand()
		if err != nil {
			log.Errorf("handle command error %s", err.Error())
			return
		}
	}
}
コード例 #18
0
// Mount mounts the volume on the host.
func (u *flexVolumeUtil) mount(f *flexVolumeMounter, mntDevice, dir string) error {
	execPath := f.execPath

	var options string
	if f.options != nil {
		out, err := json.Marshal(f.options)
		if err != nil {
			glog.Errorf("Failed to marshal plugin options, error: %s", err.Error())
			return err
		}
		if len(out) != 0 {
			options = string(out)
		} else {
			options = ""
		}
	}

	// Executable provider command.
	cmd := f.runner.Command(execPath, mountCmd, dir, mntDevice, options)
	output, err := cmd.CombinedOutput()
	if err != nil {
		glog.Errorf("Failed to mount volume %s, output: %s, error: %s", f.volName, output, err.Error())
		_, err := handleCmdResponse(mountCmd, output)
		return err
	}

	_, err = handleCmdResponse(mountCmd, output)
	if err != nil {
		return err
	}

	glog.Infof("Successfully mounted volume %s on dir: %s", f.volName, dir)
	return nil
}
コード例 #19
0
// addVolume is callback from cache.Controller watching PersistentVolume
// events.
func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
	pv, ok := obj.(*v1.PersistentVolume)
	if !ok {
		glog.Errorf("expected PersistentVolume but handler received %#v", obj)
		return
	}

	if ctrl.upgradeVolumeFrom1_2(pv) {
		// volume deleted
		return
	}

	// Store the new volume version in the cache and do not process it if this
	// is an old version.
	new, err := ctrl.storeVolumeUpdate(pv)
	if err != nil {
		glog.Errorf("%v", err)
	}
	if !new {
		return
	}

	if err := ctrl.syncVolume(pv); err != nil {
		if errors.IsConflict(err) {
			// Version conflict error happens quite often and the controller
			// recovers from it easily.
			glog.V(3).Infof("PersistentVolumeController could not add volume %q: %+v", pv.Name, err)
		} else {
			glog.Errorf("PersistentVolumeController could not add volume %q: %+v", pv.Name, err)
		}
	}
}
コード例 #20
0
// When a pod is deleted, enqueue the controller that manages the pod and update its expectations.
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
func (rm *ReplicationManager) deletePod(obj interface{}) {
	pod, ok := obj.(*api.Pod)

	// When a delete is dropped, the relist will notice a pod in the store not
	// in the list, leading to the insertion of a tombstone object which contains
	// the deleted key/value. Note that this value might be stale. If the pod
	// changed labels the new rc will not be woken up till the periodic resync.
	if !ok {
		tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
		if !ok {
			glog.Errorf("Couldn't get object from tombstone %#v", obj)
			return
		}
		pod, ok = tombstone.Obj.(*api.Pod)
		if !ok {
			glog.Errorf("Tombstone contained object that is not a pod %#v", obj)
			return
		}
	}
	glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v, labels %+v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod.Labels)
	if rc := rm.getPodController(pod); rc != nil {
		rcKey, err := controller.KeyFunc(rc)
		if err != nil {
			glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err)
			return
		}
		rm.expectations.DeletionObserved(rcKey, controller.PodKey(pod))
		rm.enqueueController(rc)
	}
}
コード例 #21
0
ファイル: controller_base.go プロジェクト: roackb2/kubernetes
// updateVolume is callback from framework.Controller watching PersistentVolume
// events.
func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) {
	newVolume, ok := newObj.(*api.PersistentVolume)
	if !ok {
		glog.Errorf("Expected PersistentVolume but handler received %+v", newObj)
		return
	}

	if ctrl.upgradeVolumeFrom1_2(newVolume) {
		// volume deleted
		return
	}

	// Store the new volume version in the cache and do not process it if this
	// is an old version.
	new, err := storeObjectUpdate(ctrl.volumes.store, newObj, "volume")
	if err != nil {
		glog.Errorf("%v", err)
	}
	if !new {
		return
	}

	if err := ctrl.syncVolume(newVolume); err != nil {
		if errors.IsConflict(err) {
			// Version conflict error happens quite often and the controller
			// recovers from it easily.
			glog.V(3).Infof("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err)
		} else {
			glog.Errorf("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err)
		}
	}
}
コード例 #22
0
ファイル: http_admin.go プロジェクト: zhangjinde/bfs
func (h httpCompactVolumeHandler) ServeHTTP(wr http.ResponseWriter, r *http.Request) {
	var (
		err error
		vid int64
		res = map[string]interface{}{"ret": RetOK}
	)
	if r.Method != "POST" {
		http.Error(wr, "method not allowed", http.StatusMethodNotAllowed)
		return
	}
	defer HttpPostWriter(r, wr, time.Now(), res)
	if vid, err = strconv.ParseInt(r.FormValue("vid"), 10, 32); err != nil {
		log.Errorf("strconv.ParseInt(\"%s\") error(%v)", r.FormValue("vid"),
			err)
		res["ret"] = RetParamErr
		return
	}
	// long time processing, not block, we can from info stat api get status.
	go func() {
		if err = h.s.CompactVolume(int32(vid)); err != nil {
			log.Errorf("s.CompactVolume() error(%v)", err)
		}
	}()
	res["ret"] = RetOK
	return
}
コード例 #23
0
ファイル: controller_base.go プロジェクト: roackb2/kubernetes
// upgradeVolumeFrom1_2 updates PV from Kubernetes 1.2 to 1.3 and newer. In 1.2,
// we used template PersistentVolume instances for dynamic provisioning. In 1.3
// and later, these template (and not provisioned) instances must be removed to
// make the controller to provision a new PV.
// It returns true if the volume was deleted.
// TODO: remove this function when upgrade from 1.2 becomes unsupported.
func (ctrl *PersistentVolumeController) upgradeVolumeFrom1_2(volume *api.PersistentVolume) bool {
	annValue, found := volume.Annotations[pvProvisioningRequiredAnnotationKey]
	if !found {
		// The volume is not template
		return false
	}
	if annValue == pvProvisioningCompletedAnnotationValue {
		// The volume is already fully provisioned. The new controller will
		// ignore this annotation and it will obey its ReclaimPolicy, which is
		// likely to delete the volume when appropriate claim is deleted.
		return false
	}
	glog.V(2).Infof("deleting unprovisioned template volume %q from Kubernetes 1.2.", volume.Name)
	err := ctrl.kubeClient.Core().PersistentVolumes().Delete(volume.Name, nil)
	if err != nil {
		glog.Errorf("cannot delete unprovisioned template volume %q: %v", volume.Name, err)
	}
	// Remove from local cache
	err = ctrl.volumes.store.Delete(volume)
	if err != nil {
		glog.Errorf("cannot remove volume %q from local cache: %v", volume.Name, err)
	}

	return true
}
コード例 #24
0
// A non-nil return signals that event processing should stop.
func (agent *ActionAgent) dispatchAction(actionPath, data string) error {
	agent.actionMutex.Lock()
	defer agent.actionMutex.Unlock()

	log.Infof("action dispatch %v", actionPath)
	actionNode, err := actionnode.ActionNodeFromJson(data, actionPath)
	if err != nil {
		log.Errorf("action decode failed: %v %v", actionPath, err)
		return nil
	}

	cmd := []string{
		agent.vtActionBinFile,
		"-action", actionNode.Action,
		"-action-node", actionPath,
		"-action-guid", actionNode.ActionGuid,
	}
	cmd = append(cmd, logutil.GetSubprocessFlags()...)
	cmd = append(cmd, topo.GetSubprocessFlags()...)
	cmd = append(cmd, dbconfigs.GetSubprocessFlags()...)
	cmd = append(cmd, mysqlctl.GetSubprocessFlags()...)
	log.Infof("action launch %v", cmd)
	vtActionCmd := exec.Command(cmd[0], cmd[1:]...)

	stdOut, vtActionErr := vtActionCmd.CombinedOutput()
	if vtActionErr != nil {
		log.Errorf("agent action failed: %v %v\n%s", actionPath, vtActionErr, stdOut)
		// If the action failed, preserve single execution path semantics.
		return vtActionErr
	}

	log.Infof("Agent action completed %v %s", actionPath, stdOut)
	agent.afterAction(actionPath, actionNode.Action == actionnode.TABLET_ACTION_APPLY_SCHEMA)
	return nil
}
コード例 #25
0
ファイル: jwt.go プロジェクト: TheRemoteLab/collector
// Provide implements DockerConfigProvider
func (j *jwtProvider) Provide() credentialprovider.DockerConfig {
	cfg := credentialprovider.DockerConfig{}

	ts := j.config.TokenSource(oauth2.NoContext)
	token, err := ts.Token()
	if err != nil {
		glog.Errorf("while exchanging json key %s for access token %v", *j.path, err)
		return cfg
	}
	if !token.Valid() {
		glog.Errorf("Got back invalid token: %v", token)
		return cfg
	}

	entry := credentialprovider.DockerConfigEntry{
		Username: "******",
		Password: token.AccessToken,
		Email:    j.config.Email,
	}

	// Add our entry for each of the supported container registry URLs
	for _, k := range containerRegistryUrls {
		cfg[k] = entry
	}
	return cfg
}
コード例 #26
0
ファイル: pitchfork.go プロジェクト: wtmmac/bfs
// watchStores get all the store nodes and set up the watcher in the zookeeper.
func (p *Pitchfork) watchStores() (res []*meta.Store, ev <-chan zk.Event, err error) {
	var (
		rack, store   string
		racks, stores []string
		data          []byte
		storeMeta     *meta.Store
	)
	if racks, ev, err = p.zk.WatchRacks(); err != nil {
		log.Errorf("zk.WatchGetStore() error(%v)", err)
		return
	}
	for _, rack = range racks {
		if stores, err = p.zk.Stores(rack); err != nil {
			return
		}
		for _, store = range stores {
			if data, err = p.zk.Store(rack, store); err != nil {
				return
			}
			storeMeta = new(meta.Store)
			if err = json.Unmarshal(data, storeMeta); err != nil {
				log.Errorf("json.Unmarshal() error(%v)", err)
				return
			}
			res = append(res, storeMeta)
		}
	}
	sort.Sort(meta.StoreList(res))
	return
}
コード例 #27
0
func (rm *ReplicationManager) watchControllers() {
	watching, err := rm.watchMaker()
	if err != nil {
		glog.Errorf("Unexpected failure to watch: %v", err)
		time.Sleep(5 * time.Second)
		return
	}

	for {
		select {
		case <-rm.syncTime:
			rm.synchronize()
		case event, open := <-watching.ResultChan():
			if !open {
				// watchChannel has been closed, or something else went
				// wrong with our etcd watch call. Let the util.Forever()
				// that called us call us again.
				return
			}
			glog.Infof("Got watch: %#v", event)
			if rc, ok := event.Object.(*api.ReplicationController); !ok {
				glog.Errorf("unexpected object: %#v", event.Object)
			} else {
				rm.syncHandler(*rc)
			}
		}
	}
}
コード例 #28
0
ファイル: server.go プロジェクト: asiainfoLDP/datafactory
// resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr.
// resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames
// are resolved.
func resolveTCPAddrs(urls [][]url.URL) ([][]url.URL, error) {
	newurls := make([][]url.URL, 0)
	for _, us := range urls {
		nus := make([]url.URL, len(us))
		for i, u := range us {
			nu, err := url.Parse(u.String())
			if err != nil {
				return nil, err
			}
			nus[i] = *nu
		}
		for i, u := range nus {
			host, _, err := net.SplitHostPort(u.Host)
			if err != nil {
				glog.Errorf("could not parse url %s during tcp resolving", u.Host)
				return nil, err
			}
			if host == "localhost" {
				continue
			}
			if net.ParseIP(host) != nil {
				continue
			}
			tcpAddr, err := net.ResolveTCPAddr("tcp", u.Host)
			if err != nil {
				glog.Errorf("could not resolve host %s", u.Host)
				return nil, err
			}
			glog.V(4).Infof("resolving %s to %s", u.Host, tcpAddr.String())
			nus[i].Host = tcpAddr.String()
		}
		newurls = append(newurls, nus)
	}
	return newurls, nil
}
コード例 #29
0
ファイル: attacher.go プロジェクト: kubernetes/kubernetes
func (attacher *gcePersistentDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
	volumesAttachedCheck := make(map[*volume.Spec]bool)
	volumePdNameMap := make(map[string]*volume.Spec)
	pdNameList := []string{}
	for _, spec := range specs {
		volumeSource, _, err := getVolumeSource(spec)
		// If error is occured, skip this volume and move to the next one
		if err != nil {
			glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
			continue
		}
		pdNameList = append(pdNameList, volumeSource.PDName)
		volumesAttachedCheck[spec] = true
		volumePdNameMap[volumeSource.PDName] = spec
	}
	attachedResult, err := attacher.gceDisks.DisksAreAttached(pdNameList, nodeName)
	if err != nil {
		// Log error and continue with attach
		glog.Errorf(
			"Error checking if PDs (%v) are already attached to current node (%q). err=%v",
			pdNameList, nodeName, err)
		return volumesAttachedCheck, err
	}

	for pdName, attached := range attachedResult {
		if !attached {
			spec := volumePdNameMap[pdName]
			volumesAttachedCheck[spec] = false
			glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", pdName, spec.Name())
		}
	}
	return volumesAttachedCheck, nil
}
コード例 #30
0
ファイル: master.go プロジェクト: asim/kubernetes
func makeMinionRegistry(c *Config) minion.Registry {
	var minionRegistry minion.Registry
	if c.Cloud != nil && len(c.MinionRegexp) > 0 {
		var err error
		minionRegistry, err = minion.NewCloudRegistry(c.Cloud, c.MinionRegexp)
		if err != nil {
			glog.Errorf("Failed to initalize cloud minion registry reverting to static registry (%#v)", err)
		}
	}
	if minionRegistry == nil {
		minionRegistry = minion.NewRegistry(c.Minions)
	}
	if c.HealthCheckMinions {
		minionRegistry = minion.NewHealthyRegistry(minionRegistry, &http.Client{})
	}
	if c.MinionCacheTTL > 0 {
		cachingMinionRegistry, err := minion.NewCachingRegistry(minionRegistry, c.MinionCacheTTL)
		if err != nil {
			glog.Errorf("Failed to initialize caching layer, ignoring cache.")
		} else {
			minionRegistry = cachingMinionRegistry
		}
	}
	return minionRegistry
}