Example #1
0
func (e *Executor) newProducer(valueSerializer func(interface{}) ([]byte, error)) (*producer.KafkaProducer, error) {
	if Config.ProducerProperties != "" {
		producerConfig, err := producer.ProducerConfigFromFile(Config.ProducerProperties)
		if err != nil {
			return nil, err
		}

		c, err := cfg.LoadNewMap(Config.ProducerProperties)
		if err != nil {
			return nil, err
		}

		connectorConfig := siesta.NewConnectorConfig()
		connectorConfig.BrokerList = strings.Split(c["bootstrap.servers"], ",")

		connector, err := siesta.NewDefaultConnector(connectorConfig)
		if err != nil {
			return nil, err
		}

		return producer.NewKafkaProducer(producerConfig, producer.ByteSerializer, valueSerializer, connector), nil
	} else {
		producerConfig := producer.NewProducerConfig()
		connectorConfig := siesta.NewConnectorConfig()
		connectorConfig.BrokerList = strings.Split(Config.BrokerList, ",")

		connector, err := siesta.NewDefaultConnector(connectorConfig)
		if err != nil {
			return nil, err
		}

		return producer.NewKafkaProducer(producerConfig, producer.ByteSerializer, valueSerializer, connector), nil
	}
}
func ProducerConfigFromFile(filename string) (*ProducerConfig, error) {
	c, err := cfg.LoadNewMap(filename)
	if err != nil {
		return nil, err
	}

	producerConfig := NewProducerConfig()
	if err := setDurationConfig(&producerConfig.MetadataFetchTimeout, c["metadata.fetch.timeout"]); err != nil {
		return nil, err
	}
	if err := setDurationConfig(&producerConfig.MetadataExpire, c["metadata.max.age"]); err != nil {
		return nil, err
	}
	if err := setIntConfig(&producerConfig.MaxRequestSize, c["metadata.expire"]); err != nil {
		return nil, err
	}
	if err := setIntConfig(&producerConfig.BatchSize, c["batch.size"]); err != nil {
		return nil, err
	}
	if err := setIntConfig(&producerConfig.RequiredAcks, c["acks"]); err != nil {
		return nil, err
	}
	if err := setInt32Config(&producerConfig.AckTimeoutMs, c["timeout.ms"]); err != nil {
		return nil, err
	}
	if err := setDurationConfig(&producerConfig.Linger, c["linger"]); err != nil {
		return nil, err
	}
	setStringConfig(&producerConfig.ClientID, c["client.id"])
	if err := setIntConfig(&producerConfig.SendRoutines, c["send.routines"]); err != nil {
		return nil, err
	}
	if err := setIntConfig(&producerConfig.ReceiveRoutines, c["receive.routines"]); err != nil {
		return nil, err
	}
	if err := setIntConfig(&producerConfig.MaxRequestSize, c["max.request.size"]); err != nil {
		return nil, err
	}
	setBoolConfig(&producerConfig.BlockOnBufferFull, c["block.on.buffer.full"])
	if err := setIntConfig(&producerConfig.Retries, c["retries"]); err != nil {
		return nil, err
	}
	if err := setDurationConfig(&producerConfig.RetryBackoff, c["retry.backoff"]); err != nil {
		return nil, err
	}
	setStringConfig(&producerConfig.CompressionType, c["compression.type"])
	if err := setIntConfig(&producerConfig.MaxRequests, c["max.requests"]); err != nil {
		return nil, err
	}

	setStringsConfig(&producerConfig.BrokerList, c["bootstrap.servers"])
	if len(producerConfig.BrokerList) == 0 {
		setStringsConfig(&producerConfig.BrokerList, c["metadata.broker.list"])
	}

	return producerConfig, nil
}
func main() {
	var err error

	cfgFile := flag.String("config", "/opt/camelmonkey_backend/camelmonkey_backend.cfg", "Configuration File")
	flag.Parse()

	cfgMap, err = cfg.LoadNewMap(*cfgFile)
	if err != nil {
		log.Fatal("Error loading config file: ", *cfgFile)
	}

	log.Println("Configuration loaded from file", *cfgFile)
	addr := cfgMap["api.address"] + ":" + cfgMap["api.port"]

	http.HandleFunc("/v1/healthcheck", healthcheck)
	http.HandleFunc("/v1/login", login)

	log.Println("Starting server at", addr)
	log.Fatalln(http.ListenAndServe(addr, nil))
}
Example #4
0
func main() {

	var startDate string
	if len(os.Args) >= 2 {
		startDate = os.Args[1]
	} else {
		startDate = time.Now().AddDate(0, 0, -1).Format("2006-01-02")
	}

	var endDate string
	if len(os.Args) >= 3 {
		endDate = os.Args[2]
	} else {
		endDate = time.Now().Format("2006-01-02")
	}

	config, err := cfg.LoadNewMap(os.Getenv("HOME") + "/tyme2mite.cfg")
	checkErr(err)
	miteImportActive := strings.ToLower(config["mite_import_active"]) == "true"
	miteBaseUrl := string(config["mite_base_url"])
	miteApiKey := string(config["mite_api_key"])
	miteRequestUrlPattern := miteBaseUrl + "/[RESOURCE_ID]?api_key=" + miteApiKey
	miteTimeEntriesUrl := strings.Replace(miteRequestUrlPattern, "[RESOURCE_ID]", "time_entries.xml", 1)
	miteProjectsUrl := strings.Replace(miteRequestUrlPattern, "[RESOURCE_ID]", "projects.xml", 1)
	miteServicesUrl := strings.Replace(miteRequestUrlPattern, "[RESOURCE_ID]", "services.xml", 1)

	if miteImportActive == false {
		fmt.Print("[DRY RUN] ")
	}
	fmt.Println("Transferring time entries from " + startDate + " to " + endDate + " ...")

	projects := Projects{}
	res, err := http.Get(miteProjectsUrl)
	checkErr(err)
	xmlBody, err := ioutil.ReadAll(res.Body)
	checkErr(err)
	xml.Unmarshal(xmlBody, &projects)
	res.Body.Close()

	services := Services{}
	resSrv, err := http.Get(miteServicesUrl)
	checkErr(err)
	xmlBodySrv, err := ioutil.ReadAll(resSrv.Body)
	checkErr(err)
	xml.Unmarshal(xmlBodySrv, &services)
	resSrv.Body.Close()

	err = mack.Tell("Tyme",
		"set ex to make new export",
		"set startDate of ex to (date \""+startDate+"\")",
		"set endDate of ex to (date \""+endDate+"\")",
		"set exportFormat of ex to csv",
		"set exportFileName of ex to \""+TMP_TYME_EXPORT_CSV_FILE_NAME+"\"",
		"save export ex")
	checkErr(err)

	tmpTymeExportCsvFilePath := os.Getenv("HOME") + "/Downloads/" + TMP_TYME_EXPORT_CSV_FILE_NAME
	csvfile, err := os.Open(tmpTymeExportCsvFilePath)
	checkErr(err)

	defer csvfile.Close()
	os.Remove(tmpTymeExportCsvFilePath)

	csvReader := csv.NewReader(csvfile)
	checkErr(err)

	csvReader.Comma = ';'
	csvReader.FieldsPerRecord = -1

	csvColHeaders, err := csvReader.Read()
	checkErr(err)

	assert("Date", csvColHeaders[0])
	assert("Project", csvColHeaders[1])
	assert("Task", csvColHeaders[2])
	assert("Duration", csvColHeaders[6])
	assert("Notes", csvColHeaders[9])

	rawCSVdata, err := csvReader.ReadAll()
	checkErr(err)

	var timeEntries []TimeEntry
	for _, each := range rawCSVdata {

		date := each[0]

		var minutes int
		duration := strings.Split(each[6], ":")
		if len(duration) > 1 {
			hours, err := strconv.Atoi(duration[0])
			checkErr(err)
			minutes, err = strconv.Atoi(duration[1])
			checkErr(err)
			minutes = hours*60 + minutes
		} else if strings.HasSuffix(duration[0], "s") {
			minutes = 0
		}

		var projectId int
		customerProject := strings.Split(each[1], "|")
		if len(customerProject) > 1 {
			customerTyme := strings.TrimSpace(customerProject[0])
			projectTyme := strings.TrimSpace(customerProject[1])
			for idx := 0; idx < len(projects.Project); idx++ {
				projectMite := strings.TrimSpace(projects.Project[idx].Name)
				customerMite := strings.TrimSpace(projects.Project[idx].CustomerName)
				if customerTyme == customerMite && projectTyme == projectMite {
					projectId = projects.Project[idx].Id
					break
				}
			}
		}

		var notePrefix string
		var noteText string
		var service string
		taskService := strings.Split(each[2], "|")
		if len(taskService) > 1 {
			notePrefix = strings.TrimSpace(taskService[0]) + ": "
			noteText = each[9]
			service = strings.TrimSpace(taskService[1])
		} else {
			notePrefix = ""
			noteText = each[9]
			service = strings.TrimSpace(taskService[0])
		}

		var serviceId int
		for idx := 0; idx < len(services.Service); idx++ {
			if service == services.Service[idx].Name {
				serviceId = services.Service[idx].Id
				break
			}
		}

		cumulateTimeEntryIndex := -1
		for idx := 0; idx < len(timeEntries); idx++ {
			if timeEntries[idx].Date == date && timeEntries[idx].ProjectId == projectId && timeEntries[idx].ServiceId == serviceId {
				if len(notePrefix) == 0 || strings.HasPrefix(timeEntries[idx].Note, notePrefix) {
					cumulateTimeEntryIndex = idx
					break
				}
			}
		}

		if cumulateTimeEntryIndex == -1 {
			var timeEntry TimeEntry
			timeEntry.Date = date
			timeEntry.Minutes = minutes
			timeEntry.Note = notePrefix + noteText
			timeEntry.ProjectId = projectId
			timeEntry.ServiceId = serviceId
			timeEntries = append(timeEntries, timeEntry)
		} else {
			timeEntries[cumulateTimeEntryIndex].Minutes += minutes
			timeEntries[cumulateTimeEntryIndex].Note = timeEntries[cumulateTimeEntryIndex].Note + ", " + noteText
		}
	}

	for idx := 0; idx < len(timeEntries); idx++ {

		xmlBody, err := xml.MarshalIndent(timeEntries[idx], "", "  ")
		checkErr(err)

		var xmlString = string(xmlBody)
		fmt.Println(xmlString)

		if miteImportActive {
			res, err := http.Post(miteTimeEntriesUrl, "application/xml", strings.NewReader(string(xmlBody)))
			checkErr(err)
			fmt.Print("Import result: ")
			fmt.Println(res)
		}

		fmt.Println()
	}

	if miteImportActive == false {
		fmt.Print("[DRY RUN] ")
	}
	fmt.Println("Transferred time entries from " + startDate + " to " + endDate)
}
Example #5
0
func mustReadConsumerConfig(path string) consumer.PartitionConsumerConfig {
	config := consumer.PartitionConsumerConfig{}
	cfgMap, err := cfg.LoadNewMap(path)
	if err != nil {
		panic(err)
	}

	config.Group = cfgMap["group"]
	config.ClientID = cfgMap["client.id"]
	config.BrokerList = strings.Split(cfgMap["broker.list"], ",")
	fmt.Printf("%v\n", config.BrokerList)
	commitOffsetBackoff, err := time.ParseDuration(cfgMap["commit.backoff"])
	if err != nil {
		panic(err)
	}
	config.CommitOffsetBackoff = commitOffsetBackoff
	commitOffsetRetries, err := strconv.Atoi(cfgMap["commit.retries"])
	if err != nil {
		panic(err)
	}
	config.CommitOffsetRetries = commitOffsetRetries
	connectTimeout, err := time.ParseDuration(cfgMap["connect.timeout"])
	if err != nil {
		panic(err)
	}
	config.ConnectTimeout = connectTimeout
	consumerMetadataBackoff, err := time.ParseDuration(cfgMap["metadata.backoff"])
	if err != nil {
		panic(err)
	}
	config.ConsumerMetadataBackoff = consumerMetadataBackoff
	consumerMetadataRetries, err := strconv.Atoi(cfgMap["consumer.metadata.retries"])
	if err != nil {
		panic(err)
	}
	config.ConsumerMetadataRetries = consumerMetadataRetries
	fetchMaxWaitTime, err := strconv.Atoi(cfgMap["fetch.max.wait"])
	if err != nil {
		panic(err)
	}
	config.FetchMaxWaitTime = int32(fetchMaxWaitTime)
	fetchMinBytes, err := strconv.Atoi(cfgMap["fetch.min.bytes"])
	if err != nil {
		panic(err)
	}
	config.FetchMinBytes = int32(fetchMinBytes)
	fetchSize, err := strconv.Atoi(cfgMap["fetch.size"])
	if err != nil {
		panic(err)
	}
	config.FetchSize = int32(fetchSize)
	keepAlive, err := strconv.ParseBool(cfgMap["keep.alive"])
	if err != nil {
		panic(err)
	}
	config.KeepAlive = keepAlive
	keepAliveTimeout, err := time.ParseDuration(cfgMap["keep.alive.timeout"])
	if err != nil {
		panic(err)
	}
	config.KeepAliveTimeout = keepAliveTimeout
	maxConnections, err := strconv.Atoi(cfgMap["max.connections"])
	if err != nil {
		panic(err)
	}
	config.MaxConnections = maxConnections
	maxConnectionsPerBroker, err := strconv.Atoi(cfgMap["max.broker.connections"])
	if err != nil {
		panic(err)
	}
	config.MaxConnectionsPerBroker = maxConnectionsPerBroker
	metadataBackoff, err := time.ParseDuration(cfgMap["metadata.backoff"])
	if err != nil {
		panic(err)
	}
	config.MetadataBackoff = metadataBackoff
	metadataRetries, err := strconv.Atoi(cfgMap["metadata.retries"])
	if err != nil {
		panic(err)
	}
	config.MetadataRetries = metadataRetries
	readTimeout, err := time.ParseDuration(cfgMap["read.timeout"])
	if err != nil {
		panic(err)
	}
	config.ReadTimeout = readTimeout
	writeTimeout, err := time.ParseDuration(cfgMap["write.timeout"])
	if err != nil {
		panic(err)
	}
	config.WriteTimeout = writeTimeout

	return config
}