Пример #1
0
func (d *AgentServices) Fetch(clients *ClientSet, waitIndex uint64) (interface{}, *ResponseMetadata, error) {

	if waitIndex != 0 {
		log.Debugf("%s pretending to long-poll", d.Name())
		log.Debugf("sleeping %v...", sleeptime)
		time.Sleep(sleeptime)
	}

	consul, err := clients.Consul()
	if err != nil {
		return nil, nil, fmt.Errorf("agent services: error getting client: %s", err)
	}

	agent := consul.Agent()
	agentServices, err := agent.Services()
	if err != nil {
		return nil, nil, fmt.Errorf("agent services: error fetching: %s", err)
	}

	ts := time.Now().Unix()
	rm := &ResponseMetadata{
		LastIndex: uint64(ts),
		// LastContact: time.Duration(ts),
	}

	return agentServices, rm, nil
}
Пример #2
0
Файл: view.go Проект: ahjdzx/dis
func (v *View) poll(viewCh chan<- *View, errCh chan<- error) {

	for {
		doneCh, fetchErrCh, quitCh := make(chan struct{}, 1), make(chan error, 1), make(chan struct{}, 1)
		go v.fetch(doneCh, fetchErrCh, quitCh)

		select {
		case <-doneCh:
			log.Debugf("view %s received data", v.Dependency.Name())
			select {
			case <-v.stopCh:
			case viewCh <- v:
			}
		case err := <-fetchErrCh:
			log.Debugf("view %s %s", v.Dependency.Name(), err)

			// Push the error back up to the watcher
			select {
			case <-v.stopCh:
			case errCh <- err:
			}

			// Sleep and retry
			log.Debugf("view %s errored, retrying in %s", v.Dependency.Name(), defaultRetry)
			time.Sleep(defaultRetry)
			continue
		case <-v.stopCh:
			quitCh <- struct{}{}
			log.Debugf("view %s stopping poll (received on view stopCh)", v.Dependency.Name())
			return
		}
	}
}
Пример #3
0
// Stop halts this watcher and any currently polling views immediately. If a
// view was in the middle of a poll, no data will be returned.
func (w *Watch) Stop() {
	w.Lock()
	defer w.Unlock()

	log.Debugf("watcher stopping all views")

	for _, view := range w.depViewMap {
		log.Debugf("watcher stopping %s\n", view.Dependency.Name())
		view.stop()
	}

	// Reset the map to have no views
	w.depViewMap = make(map[string]*View)
}
Пример #4
0
// Remove removes the given dependency from the list and stops the
// associated View. If a View for the given dependency does not exist, this
// function will return false. If the View does exist, this function will return
// true upon successful deletion.
func (w *Watch) Remove(d Dependency) bool {
	w.Lock()
	defer w.Unlock()

	log.Debugf("watcher removing %s", d.Name())

	if view, ok := w.depViewMap[d.Name()]; ok {
		log.Debugf("watcher actually removing %s", d.Name())
		view.stop()
		delete(w.depViewMap, d.Name())
		return true
	}

	log.Warnf("watcher %s did not exist, skipping", d.Name())
	return false
}
Пример #5
0
Файл: view.go Проект: ahjdzx/dis
func (v *View) fetch(doneCh chan<- struct{}, errCh chan<- error, quitCh <-chan struct{}) {
	for {
		select {
		case <-quitCh:
			return
		default:
			{
				log.Debugf("%s fetching ", v.Dependency.Name())
				waitIndex := v.LastIndex
				data, rm, err := v.Dependency.Fetch(v.Clients, waitIndex)
				if err != nil {
					errCh <- err
					return
				}

				if rm == nil {
					errCh <- fmt.Errorf("internal error. ResponseMetadata is null.")
					return
				}

				if rm.LastIndex == v.LastIndex {
					log.Debugf("view %s no new data (index was the same)", v.Dependency.Name())
					continue
				}

				if rm.LastIndex < v.LastIndex {
					log.Debugf("view %s had a lower index, resetting", v.Dependency.Name())
					v.LastIndex = 0
					continue
				}

				v.LastIndex = rm.LastIndex

				if v.ReceivedData && reflect.DeepEqual(data, v.Data) {
					log.Debugf("view %s no new data (contents were the same)", v.Dependency.Name())
					continue
				}

				v.Data = data
				v.ReceivedData = true
				close(doneCh)
				return
			}
		}
	}
}
Пример #6
0
func Run() {
	Watcher.Add(&DockerContainers{})
	for {
		select {
		case data := <-Watcher.DataCh:
			{
				view = data

				log.Debugf("name: %s, data: %v", data.Dependency.Name(), data.Data)
				switch data.Dependency.Name() {
				case "docker_containers":
					{
						containers := data.Data.([]*Container)
						localComps := LocalComponents()
						log.Infoln("localComps = ", localComps)
						for _, comp := range localComps {
							switch comp.Name {
							case "dis-collectd":
								{
									err := rewriteConfigForCollectd(comp, containers)
									if err != nil {
										log.Errorln(err)
										continue
									}
									out, err := utils.ExecCommand(false, comp.DeployFilepath, "reload")
									if err != nil {
										log.Errorf("Execute command '%s reload' error: %s", comp.DeployFilepath, err)
										continue
									}
									log.Debugln(out)
								}
							case "logstash-forwarder":
								{
									err := rewriteConfigForLogstashForwarder(comp, containers)
									if err != nil {
										log.Errorln(err)
										continue
									}
									out, err := utils.ExecCommand(false, comp.DeployFilepath, "restart")
									if err != nil {
										log.Errorf("Execute command '%s restart' error: %s", comp.DeployFilepath, err.Error())
										continue
									}
									log.Debugln(out)
								}
							}
						}
					}
				}
			}
		case err := <-Watcher.ErrCh:
			log.Errorln(err)
		}
	}
}
Пример #7
0
func (d *DockerContainers) Fetch(clients *ClientSet, waitIndex uint64) (interface{}, *ResponseMetadata, error) {

	if waitIndex != 0 {
		log.Debugf("%s pretending to long-poll", d.Name())
		log.Debugf("sleeping %v...", sleeptime)
		time.Sleep(sleeptime)
	}

	docker, err := clients.Docker()
	if err != nil {
		return nil, nil, fmt.Errorf("docker containers: error getting client: %s", err)
	}

	opts := dockerapi.ListContainersOptions{}

	apiContainers, err := docker.ListContainers(opts)
	if err != nil {
		return nil, nil, fmt.Errorf("docker containers: error list containers: %s", err)
	}
	containers := make([]*Container, len(apiContainers))
	for i, ctn := range apiContainers {
		containers[i] = &Container{
			ID:      ctn.ID,
			Image:   ctn.Image,
			Created: ctn.Created,
			Names:   ctn.Names,
		}
	}

	ts := time.Now().Unix()
	rm := &ResponseMetadata{
		LastIndex: uint64(ts),
		// LastContact: time.Duration(ts),
	}

	return containers, rm, nil

}
Пример #8
0
func (w *Watch) Add(d Dependency) (bool, error) {
	w.Lock()
	defer w.Unlock()

	log.Debugf("%s is adding.", d.Name())

	if _, ok := w.depViewMap[d.Name()]; ok {
		log.Warnf("watcher %s already exists, skipping", d.Name())
		return false, nil
	}

	v, err := NewView(d)
	if err != nil {
		return false, err
	}
	v.Clients = w.clientSet

	log.Debugf("watcher %s starting", d.Name())

	w.depViewMap[d.Name()] = v
	go v.poll(w.DataCh, w.ErrCh)

	return true, nil
}
Пример #9
0
Файл: main.go Проект: ahjdzx/dis
func main() {
	flag.Parse()

	if *versionFlag {
		fmt.Println(VERSION)
		os.Exit(0)
	}

	if *logstashFlag {
		log.ChangeToLogstashFormater(APP_NAME)
	}

	log.SetLogFile(LOG_FILE)

	err := config.LoadConfig(*cfgFileFlag)
	if err != nil {
		log.Fatalln(err)
	}

	pid := os.Getpid()
	// Save the pid into the pid file.
	err = utils.WriteFile(PID_FILE, []byte(strconv.Itoa(pid)), 0644)
	if err != nil {
		log.Fatalln(err)
	}
	defer os.Remove(PID_FILE)

	go api.Start()

	// catch some signal
	sigCh := make(chan os.Signal)
	signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM, os.Kill)
	// Block until a signal is received.
	s := <-sigCh

	log.Debugf("Caught Signal: %s, shutting down...", s)
}
Пример #10
0
// Update or create
func heartbeat() {
	log.Debugln("heartbeat......")
	log.Debugln("stackDir is ", stackDir)

	localComps := LocalComponents()

	hbtRequest, err := buildHeartbeatRequest()
	if err != nil {
		log.Errorln(err)
		return
	}
	hbtRequest.RemoteComponents = localComps

	log.Infoln("request: ", hbtRequest)
	// http request to server heartbeat url.
	if config.AppConfig().Server == "" {
		log.Errorln("configuration server is blank.")
		return
	}
	heartbeatURL := fmt.Sprintf("http://%s/heartbeat", config.AppConfig().Server)
	hbtResponse, err := requestHeartbeat(heartbeatURL, hbtRequest)
	if err != nil {
		log.Errorln(err)
		return
	}
	log.Infoln("response: ", hbtResponse)

	if hbtResponse != nil && hbtResponse.Components != nil {
		log.Infoln("response comps : ", hbtResponse.Components)
		for _, newComp := range hbtResponse.Components {
			newComp.InitAttrs(stackDir)
			log.Debugf("newComp: %s", newComp)

			// 1. Create version directory.   stack_dir/comp_name/version_number/
			if utils.IsExist(newComp.VersionDir) {
				_, err := utils.ExecCommand(false, "mkdir", "-p", newComp.VersionDir)
				if err != nil {
					log.Errorln(err)
					continue
				}
			}

			// 2. Download new version component.
			downURL := fmt.Sprintf("http://%s/%s", config.AppConfig().Server, newComp.TarballFilename)
			err = downloadFromURL(newComp.TarballFilepath, downURL)
			if err != nil {
				log.Errorln(err)
				continue
			}
			// 3. Uncompress the new component file.
			output, err := utils.ExecCommand(false, "tar", "-zxf", newComp.TarballFilepath, "-C", newComp.VersionDir)
			if err != nil {
				log.Errorln(err)
				continue
			}
			log.Debugf("Untar file: %s , output: %s", newComp.TarballFilepath, output)

			if newComp.Cmd == "start" {
				if view != nil {
					containers := view.Data.([]*Container)
					switch newComp.Name {
					case "dis-collectd":
						{
							err := rewriteConfigForCollectd(newComp, containers)
							if err != nil {
								log.Errorln("rewriteConfigForCollectd error: ", err)
								continue
							}
						}
					case "logstash-forwarder":
						{
							err := rewriteConfigForLogstashForwarder(newComp, containers)
							if err != nil {
								log.Errorln("rewriteConfigForLogstashForwarder error: ", err)
								continue
							}
						}
					}
				}

				log.Debugln("new component DeployFilepath = ", newComp.DeployFilepath)
				output, err = utils.ExecCommand(false, newComp.DeployFilepath, "start")
				if err != nil {
					log.Errorln(err)
					continue
				}
				log.Debugf("Execute command: %s %s, output: %s\n", newComp.DeployFilepath, newComp.Cmd, output)

				// Componet directory was existed, then shutdown old verison component.
				if utils.IsExist(path.Join(newComp.Directory, ".version")) {
					oldVersion_b, err := utils.ReadFile(path.Join(newComp.Directory, ".version"))
					if err != nil {
						log.Errorln(err)
						continue
					}
					oldDeployFilePath := path.Join(newComp.Directory, strings.TrimSpace(string(oldVersion_b)), "deploy")
					_, err = utils.ExecCommand(false, oldDeployFilePath, "stop")
					if err != nil {
						log.Errorln(err)
						continue
					}
				}

				// Write the verion number to .version in the parent directory.
				log.Debugln("write version number file,  ", newComp.Directory)
				err = utils.WriteFile(path.Join(newComp.Directory, ".version"), []byte(newComp.Version), 0644)
				if err != nil {
					log.Errorln(err)
					continue
				}

				// Register this component to consul.
				err = common.ConsulRegister(newComp.Name, newComp.DeployFilepath)
				if err != nil {
					log.Errorln(err)
					continue
				}
			} else {
				log.Debugln("new component DeployFilepath = ", newComp.DeployFilepath)
				output, err = utils.ExecCommand(false, newComp.DeployFilepath, newComp.Cmd)
				if err != nil {
					log.Errorln(err)
					continue
				}
				log.Debugln("Execute command: %s %s, output: %s\n", newComp.DeployFilepath, newComp.Cmd, output)
			}
		}
	}
}
Пример #11
0
func transfer(sshConfig *sshlib.SSHConfig) (succeedFiles, failedFiles []string, err error) {
	client, err := sshlib.NewSSHClient(sshConfig)
	if err != nil {
		return
	}
	defer client.Close()

	scp := sshlib.NewScp(client)

	localTarballDir := config.AppConfig().LocalTarballDir
	for _, comp := range config.AppConfig().Components {
		comp.InitAttrs(comp.TarballDir)

		_, err := sshlib.RunCommand(client, "ls "+comp.VersionDir)
		if err != nil { // fileDir not exists.
			// Create a new directory to save tarball file.
			err = sshlib.Mkdir(client, comp.VersionDir)
			if err != nil {
				log.Errorf("Mkdir %s error: %s", comp.VersionDir, err.Error())
				failedFiles = append(failedFiles, comp.TarballFilename)
				continue
			}
		}

		localTarbalPath := path.Join(localTarballDir, comp.TarballFilename)
		remoteFilePath := comp.TarballFilepath

		_, err = sshlib.RunCommand(client, "ls "+remoteFilePath)
		if err != nil { // remoteFilePath not exists.
			err = scp.PushFile(localTarbalPath, remoteFilePath)
			if err != nil {
				log.Errorf("Push file: %s --> %s, error: %s", localTarbalPath, remoteFilePath, err.Error())
				failedFiles = append(failedFiles, comp.TarballFilename)
				continue
			}
			log.Debug("push file ", localTarbalPath, " successfully.")
			succeedFiles = append(succeedFiles, comp.TarballFilename)

			// Unpack the tarball or zip file.
			_, err = sshlib.RunCommand(client, "tar -zxf "+remoteFilePath+" -C "+comp.VersionDir)
			if err != nil {
				log.Errorf("Unpack file %s error: %s", remoteFilePath, err.Error())
				continue
			}

			// if component name is AGENT_NAME, then use ssh execute the cmd option.
			if comp.Name == model.AGENT_NAME {
				var output string
				switch comp.Cmd {
				case "start":
					startCmd := fmt.Sprintf("cd %s && ./deploy start", comp.VersionDir)
					output, err = sshlib.RunCommand(client, startCmd)
					if err != nil {
						log.Errorf("Command: %s, Error: %s", comp.Cmd, err.Error())
						continue
					}
					if output != "" {
						log.Debugf("Command: %s, Output: %s", comp.Cmd, string(output))
					}
				case "status": // TODO
					statusCmd := fmt.Sprintf("cd %s && ./deploy status", comp.VersionDir)
					output, err = sshlib.RunCommand(client, statusCmd)
					if err != nil {
						log.Errorf("Command: %s, Error: %s", comp.Cmd, err.Error())
						continue
					}
					if output != "" {
						log.Debugf("Command: %s, Output: %s", comp.Cmd, string(output))
					}
				}

			}
		}

	}

	return
}