Exemplo n.º 1
0
func processImage(tempDir string, inFile string, args *ProcessArgs) (string, error) {
	outFile := filepath.Join(tempDir, "out")
	cmdArgs, outFileWithFormat := args.CommandArgs(inFile, outFile)

	grohl.Log(grohl.Data{
		"processor": "imagick",
		"args":      cmdArgs,
	})

	executable := "convert"
	cmd := exec.Command(executable, cmdArgs...)
	var outErr bytes.Buffer
	cmd.Stdout, cmd.Stderr = &outErr, &outErr
	err := runWithTimeout(cmd, normalTimeout)
	if err != nil {
		grohl.Log(grohl.Data{
			"processor": "imagick",
			"step":      "convert",
			"failure":   err,
			"args":      cmdArgs,
			"output":    string(outErr.Bytes()),
		})
	}

	return outFileWithFormat, err
}
Exemplo n.º 2
0
func (r Html) Apply(req *r.Request, resp *r.Response) {
	splits := strings.Split(req.URL.String(), "/")

	processArgs := models.NewProcessArgs(splits[1:])

	processor := &models.IMagick{}

	resp.Out.Header().Set("Cache-Control", "public, max-age=864000")

	err := processor.Process(resp.Out, req.Request, processArgs)
	if err != nil {
		grohl.Log(grohl.Data{
			"error": err.Error(),
			"parts": splits,
		})
		panic("processing failed")
		return
	}

	grohl.Log(grohl.Data{
		"action":  "process",
		"args":    processArgs,
		"headers": req.Header,
	})
}
Exemplo n.º 3
0
func postProcessImage(tempDir string, inFile string, args *ProcessArgs) (string, error) {
	// If it originally "mp4" was requested even if before processing
	// changed it to "gif"
	grohl.Log(grohl.Data{"args": args})
	if args.RequestFormat == "mp4" && args.Format == "gif" {
		outFile := filepath.Join(tempDir, "video.mp4")
		cmdArgs := []string{"-f", "gif", "-i", inFile, outFile}

		grohl.Log(grohl.Data{
			"processor": "ffmpeg",
			"args":      cmdArgs,
		})

		cmd := exec.Command("ffmpeg", cmdArgs...)
		var outErr bytes.Buffer
		cmd.Stdout, cmd.Stderr = &outErr, &outErr
		err := runWithTimeout(cmd, normalTimeout)
		if err != nil {
			grohl.Log(grohl.Data{
				"processor": "ffmpeg",
				"step":      "post-process-mp4",
				"failure":   err,
				"args":      cmdArgs,
				"output":    string(outErr.Bytes()),
			})
		}

		return outFile, err
	}

	return inFile, nil
}
Exemplo n.º 4
0
// TODO: Pass through requests without an account subdomain
func (c *ImagesController) Get(w http.ResponseWriter, r *http.Request) {
	subdomain := strings.Split(r.Host, ".")[0]
	models.CreateImageRequestForSubdomain(subdomain, r.RequestURI)

	vars := mux.Vars(r)

	url := "http" + vars["path"]
	args := strings.Split(vars["args"], "/")
	processArgs := models.NewProcessArgs(args, url)

	processor := &models.IMagick{}

	w.Header().Set("Cache-Control", "public, max-age=864000")

	err := processor.Process(w, r, processArgs)
	if err != nil {
		grohl.Log(grohl.Data{
			"error": err.Error(),
			"parts": args,
			"url":   url,
		})
		panic("processing failed")
	}

	grohl.Log(grohl.Data{
		"action":  "process",
		"args":    processArgs,
		"headers": r.Header,
	})
}
Exemplo n.º 5
0
func syncScheduler(scheduler sampiSyncClient, events <-chan host.Event) {
	for event := range events {
		if event.Event != "stop" {
			continue
		}
		grohl.Log(grohl.Data{"fn": "scheduler_event", "at": "remove_job", "job.id": event.JobID})
		if err := scheduler.RemoveJobs([]string{event.JobID}); err != nil {
			grohl.Log(grohl.Data{"fn": "scheduler_event", "at": "remove_job", "status": "error", "err": err, "job.id": event.JobID})
		}
	}
}
Exemplo n.º 6
0
func TrackReadReceipt(dbmap *gorp.DbMap, account *Account, key, reader string) error {
	aid, err := FindArticleIdByKey(dbmap, account.Id, key)
	if err != nil {
		return err
	}

	if aid == 0 {
		aid, err = UpsertArticle(dbmap, account.Id, key)
		if err != nil {
			return err
		}
	}

	rid, err := InsertReader(dbmap, account.Id, reader)
	if err != nil {
		return err
	}

	_, err = UpsertReadReceipt(dbmap, aid, rid)
	if err != nil {
		return err
	}

	grohl.Log(grohl.Data{
		"account": account.Id,
		"reader":  reader,
		"article": key,
		"track":   "read",
	})

	return nil
}
Exemplo n.º 7
0
func downloadRemote(tempDir string, _ string, args *ProcessArgs) (string, error) {
	url := args.Url
	inFile := filepath.Join(tempDir, "in")

	grohl.Log(grohl.Data{
		"processor": "imagick",
		"download":  url,
		"local":     inFile,
	})

	out, err := os.Create(inFile)
	if err != nil {
		return inFile, err
	}
	defer out.Close()

	resp, err := http.Get(url)
	if err != nil {
		return inFile, err
	}
	defer resp.Body.Close()

	_, err = io.Copy(out, resp.Body)

	return inFile, err
}
Exemplo n.º 8
0
// readPump pumps messages from the websocket connection to the hub.
func (c *wsconn) readPump() {
	defer func() {
		hub.unregister <- c
		c.ws.Close()
	}()

	c.ws.SetReadLimit(maxMessageSize)
	c.ws.SetReadDeadline(time.Now().Add(pongWait))
	c.ws.SetPongHandler(func(string) error { c.ws.SetReadDeadline(time.Now().Add(pongWait)); return nil })
	for {
		_, message, err := c.ws.ReadMessage()
		if err != nil {
			break
		}

		var j map[string]interface{}
		if err := json.Unmarshal(message, &j); err != nil {
			grohl.Log(grohl.Data{
				"ws":    "json parse error",
				"error": err.Error(),
			})
		} else {
			c.processMessage(j)
		}
	}
}
Exemplo n.º 9
0
// Process a remote asset url using graphicsmagick with the args supplied
// and write the response to w
func (p *IMagick) Process(w http.ResponseWriter, r *http.Request, args *ProcessArgs) (err error) {
	executable := "convert"

	tempDir, err := ioutil.TempDir("", "_firesize")
	if err != nil {
		return
	}
	// defer os.RemoveAll(tempDir)
	inFile := filepath.Join(tempDir, "in")
	outFile := filepath.Join(tempDir, "out")

	grohl.Log(grohl.Data{
		"processor": "imagick",
		"download":  args.Url,
		"local":     inFile,
	})

	if err = downloadRemote(args.Url, inFile); err != nil {
		return
	}

	cmdArgs, outFileWithFormat := args.CommandArgs(inFile, outFile)

	grohl.Log(grohl.Data{
		"processor": "imagick",
		"args":      cmdArgs,
	})

	cmd := exec.Command(executable, cmdArgs...)
	outErr, err := runWithTimeout(cmd, 60*time.Second)
	if err != nil {
		grohl.Log(grohl.Data{
			"processor": "imagick",
			"failure":   err,
			"args":      cmdArgs,
			"output":    string(outErr),
		})
	}
	http.ServeFile(w, r, outFileWithFormat)
	return
}
Exemplo n.º 10
0
func isAnimatedGif(inFile string) bool {
	// identify -format %n updates-product-click.gif # => 105
	cmd := exec.Command("identify", "-format", "%n", inFile)
	var stdout, stderr bytes.Buffer
	cmd.Stdout = &stdout
	cmd.Stderr = &stderr
	err := runWithTimeout(cmd, 10*time.Second)
	if err != nil {
		output := string(stderr.Bytes())
		grohl.Log(grohl.Data{
			"processor": "imagick",
			"step":      "identify",
			"failure":   err,
			"output":    output,
		})
	} else {
		output := string(stdout.Bytes())
		output = strings.TrimSpace(output)
		numFrames, err := strconv.Atoi(output)
		if err != nil {
			grohl.Log(grohl.Data{
				"processor": "imagick",
				"step":      "identify",
				"failure":   err,
				"output":    output,
				"message":   "non numeric identify output",
			})
		} else {
			grohl.Log(grohl.Data{
				"processor":  "imagick",
				"step":       "identify",
				"num-frames": numFrames,
			})
			return numFrames > 1
		}
	}
	// if anything f***s out assume not animated
	return false
}
Exemplo n.º 11
0
func ReqLogger() martini.Handler {
	return func(res http.ResponseWriter, req *http.Request, c martini.Context, log *log.Logger) {
		start := time.Now().UTC()
		rw := res.(martini.ResponseWriter)
		c.Next()

		grohl.Log(grohl.Data{
			"method":   req.Method,
			"path":     req.URL.Path,
			"status":   rw.Status(),
			"duration": time.Since(start).Seconds(),
		})
	}
}
Exemplo n.º 12
0
func PostArticles(client *gokiq.ClientConfig, req *http.Request, w http.ResponseWriter, account *Account) (string, int) {
	decoder := json.NewDecoder(req.Body)
	var p ArticleParams
	err := decoder.Decode(&p)
	if err != nil {
		panic(err)
	}

	cid, err := UpsertArticle(dbmap, account.Id, p.Key)
	if _, ok := err.(*pq.Error); ok {
		if strings.Index(err.Error(), `duplicate key value violates unique constraint "articles_key_key"`) == -1 {
			panic(err)
		}
	}

	grohl.Log(grohl.Data{
		"account":  account.Id,
		"register": p.Key,
		"readers":  p.Recipients,
	})

	rids, err := AddArticleReaders(dbmap, account.Id, cid, p.Recipients)
	for _, callback := range p.Callbacks {
		at := time.Unix(callback.At, 0)

		if callback.Recipients != nil {
			rids, err = AddArticleReaders(dbmap, account.Id, cid, callback.Recipients)
			if err != nil {
				panic(err)
			}
		}
		ScheduleCallbacks(client, rids, at, callback.Url)
	}

	ci, err := FindArticleWithReadReceipts(dbmap, cid)

	json, err := json.Marshal(map[string]interface{}{
		"article": ci,
	})
	if err != nil {
		panic(err)
	}

	go func() {
		hub.broadcast <- &Broadcast{rids, json}
	}()

	w.Header().Set("Content-Type", "application/json")
	return string(json), http.StatusCreated
}
Exemplo n.º 13
0
func (a *Account) SendNewAccountEmail(client *gokiq.ClientConfig) error {
	err := client.QueueJob(&NewAccountEmailJob{
		AccountId: a.Id,
	})
	if err != nil {
		return err
	}

	grohl.Log(grohl.Data{
		"queue":   "NewAccountEmailJob",
		"account": a.Id,
	})

	return nil
}
Exemplo n.º 14
0
func (c *wsconn) processMessage(m map[string]interface{}) {
	fmt.Println("msg", m)
	switch {
	case m["subscribe"] != "":
		reader, err := FindReaderByAccountIdDistinctId(c.aid, m["subscribe"].(string))
		if err != nil {
			grohl.Log(grohl.Data{
				"ws":    "FindReader",
				"error": err.Error(),
			})
			return
		}
		hub.subscribe <- &Channel{Conn: c, Id: reader.Id}
		fmt.Println("subscribe", reader.Id)
	}
}
Exemplo n.º 15
0
func ScheduleCallbacks(client *gokiq.ClientConfig, readerIds []int64, at time.Time, url string) error {
	conn := client.RedisPool.Get()
	defer conn.Close()

	config := gokiq.JobConfig{
		At: at,
	}

	// Don't queue up job within 6 seconds of an existing job
	minScore := at.Add(-3 * time.Second).Unix()
	maxScore := at.Add(3 * time.Second).Unix()

	jobs, err := redis.Strings(conn.Do("ZRANGEBYSCORE", client.RedisNamespace+":schedule", minScore, maxScore))
	userJobs := make(map[int64]int64)
	for _, job := range jobs {
		var entry UserCallbackJobEntry

		if err = json.Unmarshal([]byte(job), &entry); err != nil {
			return err
		}

		userJobs[entry.Args.ReaderId] += 1
	}

	for _, rid := range readerIds {
		if userJobs[rid] > 0 {
			continue
		}

		err = client.QueueJobConfig(&UserCallbackJob{
			Url:      url,
			ReaderId: rid,
		}, config)

		if err != nil {
			return err
		}

		grohl.Log(grohl.Data{
			"schedule_callback": at,
			"url":               url,
			"reader":            rid,
		})
	}

	return nil
}
Exemplo n.º 16
0
func coalesceAnimatedGif(tempDir string, inFile string) (string, error) {
	outFile := filepath.Join(tempDir, "temp")

	// convert do.gif -coalesce temporary.gif
	cmd := exec.Command("convert", inFile, "-coalesce", outFile)
	var outErr bytes.Buffer
	cmd.Stdout, cmd.Stderr = &outErr, &outErr

	err := runWithTimeout(cmd, 60*time.Second)
	if err != nil {
		grohl.Log(grohl.Data{
			"processor": "imagick",
			"step":      "coalesce",
			"failure":   err,
			"output":    string(outErr.Bytes()),
		})
	}

	return outFile, err
}
Exemplo n.º 17
0
func (j *UserCallbackJob) Perform() error {
	articles, err := UnreadArticles(dbmap, j.ReaderId)

	distinctId, err := dbmap.SelectStr("select distinct_id from readers where id = $1;", j.ReaderId)
	if err != nil {
		return err
	}

	keys := make([]string, 0)
	for _, a := range articles {
		keys = append(keys, a.Key)
	}

	callback := UserCallback{
		User:    distinctId,
		Pending: keys,
	}

	var buf bytes.Buffer
	enc := json.NewEncoder(&buf)
	err = enc.Encode(&callback)
	if err != nil {
		panic(err)
	}

	resp, err := http.Post(j.Url, "application/json", &buf)
	if err != nil {
		panic(err)
	}

	grohl.Log(grohl.Data{
		"callback": j.Url,
		"reader":   j.ReaderId,
		"expected": keys,
		"status":   resp.Status,
	})

	return nil
}
Exemplo n.º 18
0
// Reads chunks from available file readers, putting together ready 'chunks'
// that can be sent to clients.
func (s *Supervisor) populateReadyChunks() {
	logger := grohl.NewContext(grohl.Data{"ns": "Supervisor", "fn": "populateReadyChunks"})

	backoff := &ExponentialBackoff{Minimum: 50 * time.Millisecond, Maximum: 5000 * time.Millisecond}
	for {
		available, locked := s.readerPool.Counts()
		GlobalStatistics.UpdateFileReaderPoolStatistics(available, locked)

		currentChunk := &readyChunk{
			Chunk:         make([]*FileData, 0),
			LockedReaders: make([]*FileReader, 0),
		}

		for len(currentChunk.Chunk) < s.SpoolSize {
			if reader := s.readerPool.LockNext(); reader != nil {
				select {
				case <-s.stopRequest:
					return
				case chunk := <-reader.C:
					if chunk != nil {
						currentChunk.Chunk = append(currentChunk.Chunk, chunk...)
						currentChunk.LockedReaders = append(currentChunk.LockedReaders, reader)

						if len(chunk) > 0 {
							if hwm := chunk[len(chunk)-1].HighWaterMark; hwm != nil {
								GlobalStatistics.SetFilePosition(hwm.FilePath, hwm.Position)
							}
						}
					} else {
						// The reader hit EOF or another error. Remove it and it'll get
						// picked up by populateReaderPool again if it still needs to be
						// read.
						logger.Log(grohl.Data{"status": "EOF", "file": reader.FilePath()})

						s.readerPool.Remove(reader)
						GlobalStatistics.DeleteFileStatistics(reader.FilePath())
					}
				default:
					// The reader didn't have anything queued up for us. Unlock the
					// reader and move on.
					s.readerPool.Unlock(reader)
				}
			} else {
				// If there are no more readers, send the chunk ASAP so we can get
				// the next chunk in line
				logger.Log(grohl.Data{"msg": "no readers available", "resolution": "sending current chunk"})
				break
			}
		}

		if len(currentChunk.Chunk) > 0 {
			select {
			case <-s.stopRequest:
				return
			case s.readyChunks <- currentChunk:
				backoff.Reset()
			}
		} else {
			select {
			case <-s.stopRequest:
				return
			case <-time.After(backoff.Next()):
				grohl.Log(grohl.Data{"msg": "no lines available to send", "resolution": "backing off"})
			}
		}
	}
}
Exemplo n.º 19
0
func main() {
	hostname, _ := os.Hostname()
	externalAddr := flag.String("external", "", "external IP of host")
	bindAddr := flag.String("bind", "", "bind containers to this IP")
	configFile := flag.String("config", "", "configuration file")
	manifestFile := flag.String("manifest", "/etc/flynn-host.json", "manifest file")
	hostID := flag.String("id", hostname, "host id")
	force := flag.Bool("force", false, "kill all containers booted by flynn-host before starting")
	attributes := make(AttributeFlag)
	flag.Var(&attributes, "attribute", "key=value pair to add as an attribute")
	flag.Parse()
	grohl.AddContext("app", "lorne")
	grohl.Log(grohl.Data{"at": "start"})
	g := grohl.NewContext(grohl.Data{"fn": "main"})

	dockerc, err := docker.NewClient("unix:///var/run/docker.sock")
	if err != nil {
		log.Fatal(err)
	}

	if *force {
		if err := killExistingContainers(dockerc); err != nil {
			os.Exit(1)
		}
	}

	state := NewState()
	ports := make(chan int)

	go allocatePorts(ports, 55000, 65535)
	go serveHTTP(&Host{state: state, docker: dockerc}, &attachHandler{state: state, docker: dockerc})
	go streamEvents(dockerc, state)

	processor := &jobProcessor{
		externalAddr: *externalAddr,
		bindAddr:     *bindAddr,
		docker:       dockerc,
		state:        state,
		discoverd:    os.Getenv("DISCOVERD"),
	}

	runner := &manifestRunner{
		env:        parseEnviron(),
		externalIP: *externalAddr,
		ports:      ports,
		processor:  processor,
		docker:     dockerc,
	}

	var disc *discoverd.Client
	if *manifestFile != "" {
		var r io.Reader
		var f *os.File
		if *manifestFile == "-" {
			r = os.Stdin
		} else {
			f, err = os.Open(*manifestFile)
			if err != nil {
				log.Fatal(err)
			}
			r = f
		}
		services, err := runner.runManifest(r)
		if err != nil {
			log.Fatal(err)
		}
		if f != nil {
			f.Close()
		}

		if d, ok := services["discoverd"]; ok {
			processor.discoverd = fmt.Sprintf("%s:%d", d.InternalIP, d.TCPPorts[0])
			var disc *discoverd.Client
			err = Attempts.Run(func() (err error) {
				disc, err = discoverd.NewClientWithAddr(processor.discoverd)
				return
			})
			if err != nil {
				log.Fatal(err)
			}
		}
	}

	if processor.discoverd == "" && *externalAddr != "" {
		processor.discoverd = *externalAddr + ":1111"
	}
	// HACK: use env as global for discoverd connection in sampic
	os.Setenv("DISCOVERD", processor.discoverd)
	if disc == nil {
		disc, err = discoverd.NewClientWithAddr(processor.discoverd)
		if err != nil {
			log.Fatal(err)
		}
	}
	sampiStandby, err := disc.RegisterAndStandby("flynn-host", *externalAddr+":1113", map[string]string{"id": *hostID})
	if err != nil {
		log.Fatal(err)
	}

	// Check if we are the leader so that we can use the cluster functions directly
	sampiCluster := sampi.NewCluster(sampi.NewState())
	select {
	case <-sampiStandby:
		g.Log(grohl.Data{"at": "sampi_leader"})
		rpc.Register(sampiCluster)
	case <-time.After(5 * time.Millisecond):
		go func() {
			<-sampiStandby
			g.Log(grohl.Data{"at": "sampi_leader"})
			rpc.Register(sampiCluster)
		}()
	}
	cluster, err := cluster.NewClientWithSelf(*hostID, NewLocalClient(*hostID, sampiCluster))
	if err != nil {
		log.Fatal(err)
	}

	g.Log(grohl.Data{"at": "sampi_connected"})

	events := make(chan host.Event)
	state.AddListener("all", events)
	go syncScheduler(cluster, events)

	h := &host.Host{}
	if *configFile != "" {
		h, err = openConfig(*configFile)
		if err != nil {
			log.Fatal(err)
		}
	}
	if h.Attributes == nil {
		h.Attributes = make(map[string]string)
	}
	for k, v := range attributes {
		h.Attributes[k] = v
	}
	h.ID = *hostID

	for {
		newLeader := cluster.NewLeaderSignal()

		h.Jobs = state.ClusterJobs()
		jobs := make(chan *host.Job)
		hostErr := cluster.RegisterHost(h, jobs)
		g.Log(grohl.Data{"at": "host_registered"})
		processor.Process(ports, jobs)
		g.Log(grohl.Data{"at": "sampi_disconnected", "err": *hostErr})

		<-newLeader
	}
}
Exemplo n.º 20
0
func main() {
	hostname, _ := os.Hostname()
	externalAddr := flag.String("external", "", "external IP of host")
	configFile := flag.String("config", "", "configuration file")
	manifestFile := flag.String("manifest", "", "manifest file")
	hostID := flag.String("id", hostname, "host id")
	attributes := make(AttributeFlag)
	flag.Var(&attributes, "attribute", "key=value pair to add as an attribute")
	flag.Parse()
	grohl.AddContext("app", "lorne")
	grohl.Log(grohl.Data{"at": "start"})
	g := grohl.NewContext(grohl.Data{"fn": "main"})

	dockerc, err := docker.NewClient("unix:///var/run/docker.sock")
	if err != nil {
		log.Fatal(err)
	}

	state := NewState()
	ports := make(chan int)

	go allocatePorts(ports, 55000, 65535)
	go serveHTTP(&Host{state: state, docker: dockerc}, &attachHandler{state: state, docker: dockerc})
	go streamEvents(dockerc, state)

	processor := &jobProcessor{
		externalAddr: *externalAddr,
		docker:       dockerc,
		state:        state,
		discoverd:    os.Getenv("DISCOVERD"),
	}

	runner := &manifestRunner{
		env:        parseEnviron(),
		externalIP: *externalAddr,
		ports:      ports,
		processor:  processor,
		docker:     dockerc,
	}

	var disc *discoverd.Client
	if *manifestFile != "" {
		f, err := os.Open(*manifestFile)
		if err != nil {
			log.Fatal(err)
		}
		services, err := runner.runManifest(f)
		if err != nil {
			log.Fatal(err)
		}
		f.Close()

		if d, ok := services["discoverd"]; ok {
			processor.discoverd = fmt.Sprintf("%s:%d", d.InternalIP, d.TCPPorts[0])
			var disc *discoverd.Client
			err = Attempts.Run(func() (err error) {
				disc, err = discoverd.NewClientUsingAddress(processor.discoverd)
				return
			})
			if err != nil {
				log.Fatal(err)
			}
		}
	}

	if processor.discoverd == "" && *externalAddr != "" {
		processor.discoverd = *externalAddr + ":1111"
	}
	// HACK: use env as global for discoverd connection in sampic
	os.Setenv("DISCOVERD", processor.discoverd)
	if disc == nil {
		disc, err = discoverd.NewClientUsingAddress(processor.discoverd)
		if err != nil {
			log.Fatal(err)
		}
	}
	sampiStandby, err := disc.RegisterAndStandby("flynn-host", *externalAddr+":1113", map[string]string{"id": *hostID})
	if err != nil {
		log.Fatal(err)
	}
	go func() {
		<-sampiStandby
		rpc.Register(sampi.NewCluster(sampi.NewState()))
	}()

	cluster, err := client.New()
	if err != nil {
		log.Fatal(err)
	}
	g.Log(grohl.Data{"at": "sampi_connected"})

	events := make(chan host.Event)
	state.AddListener("all", events)
	go syncScheduler(cluster, events)

	var h *host.Host
	if *configFile != "" {
		h, err = openConfig(*configFile)
		if err != nil {
			log.Fatal(err)
		}
	} else {
		h = &host.Host{Resources: make(map[string]host.ResourceValue)}
	}
	if _, ok := h.Resources["memory"]; !ok {
		h.Resources["memory"] = host.ResourceValue{Value: 1024}
	}
	h.ID = *hostID
	h.Jobs = state.ClusterJobs()

	if h.Attributes == nil {
		h.Attributes = make(map[string]string)
	}

	for k, v := range attributes {
		h.Attributes[k] = v
	}

	jobs := make(chan *host.Job)
	hostErr := cluster.ConnectHost(h, jobs)
	g.Log(grohl.Data{"at": "host_registered"})
	processor.Process(ports, jobs)
	log.Fatal(*hostErr)
}