// askForConfirmation uses Scanln to parse user input. A user must type in "yes" or "no" and // then press enter. It has fuzzy matching, so "y", "Y", "yes", "YES", and "Yes" all count as // confirmations. If the input is not recognized, it will ask again. The function does not return // until it gets a valid response from the user. Typically, you should use fmt to print out a question // before calling askForConfirmation. E.g. fmt.Println("WARNING: Are you sure? (yes/no)") func askForConfirmation(message string) bool { fmt.Printf(message) var response string _, err := fmt.Scanln(&response) if err != nil { logger.Fatal("Cannot read from stdin: %s", err.Error()) } yesResponses := []string{"y", "Y", "yes", "Yes", "YES"} noResponses := []string{"n", "N", "no", "No", "NO"} if containsString(yesResponses, response) { return true } else if containsString(noResponses, response) { return false } else { return askForConfirmation(message) } }
func getESClient(esURL string) (*elastic.Client, error) { esClient, err := elastic.NewClient( elastic.SetURL(esURL), elastic.SetSniff(false), elastic.SetErrorLog(logger.DefaultLogger.Handlers[0].(*logger.DefaultHandler).ErrorLogger), elastic.SetInfoLog(logger.DefaultLogger.Handlers[0].(*logger.DefaultHandler).DebugLogger), elastic.SetTraceLog(logger.DefaultLogger.Handlers[0].(*logger.DefaultHandler).DebugLogger), ) if err != nil { return esClient, err } esVersion, err := esClient.ElasticsearchVersion(esURL) if err != nil { logger.Fatal("Error getting ES version: %+v", err.Error()) } logger.Info("Connected in Elasticsearch <%s>, version %s", esURL, esVersion) return esClient, err }
func main() { flag.Parse() machineFile := flag.Arg(0) if machineFile == "" { logger.Fatal("You need to pass a machine definition file, type: %s <machine.yml>\n", os.Args[0]) } machineContent, err := ioutil.ReadFile(machineFile) if err != nil { logger.Fatal("Error open machine file: %s", err.Error()) } var machineConfig machine.Machine err = yaml.Unmarshal(machineContent, &machineConfig) if err != nil { logger.Fatal("Error reading machine file: %s", err.Error()) } auth, err := AwsAuth() if err != nil { logger.Fatal("Error reading aws credentials: %s", err.Error()) } if machineConfig.Instance.AvailableZone == "" { if machineConfig.Instance.DefaultAvailableZone == "" { logger.Fatal("Cannot create machine, instance.availablezone is missing") } else { machineConfig.Instance.AvailableZone = machineConfig.Instance.DefaultAvailableZone } } err = machine.Get(&machineConfig, auth) if err != nil { logger.Fatal("Error getting machine: %s", err.Error()) } }
func main() { flag.StringVar(&fromHost, "from-host", defaultElasticSearch, "elastic search host to get data from") flag.StringVar(&fromIndex, "index", "", "name of index to reindex/copy") flag.StringVar(&toHost, "to-host", defaultElasticSearch, "elastic search host to get data from") flag.StringVar(&toIndex, "new-index", "", "name of new-index") flag.StringVar(&newMapping, "new-mapping", "", "path to new mapping file of new-index") flag.IntVar(&bulkSize, "bulk-size", 500, "amount of data to get in each request") flag.BoolVar(&forceYes, "force-yes", false, "even if destination alias already exists continue reindexing") flag.BoolVar(&forceNo, "force-no", false, "if destination alias already exists abort reindexing") flag.Parse() if fromIndex == "" { logger.Error("The `-index` parameters are required to reindex/copy") flag.Usage() os.Exit(1) } // Read new mapping file if newMapping != "" { mappingBytes, err := ioutil.ReadFile(newMapping) if err != nil { logger.Fatal("Error reading mapping file: %+v", err.Error()) } mappingContent = string(mappingBytes) logger.Debug("New mapping of %s:", newMapping) logger.Debug(mappingContent) } // Set default toIndex if toIndex == "" { toIndex = fromIndex + "-" + uuid.New()[24:] } // Connect to clients fromClient, err := getESClient(fromHost) if err != nil { logger.Fatal("Error connecting to `%s`: %+v", fromHost, err.Error()) } toClient, err := getESClient(toHost) if err != nil { logger.Fatal("Error connecting to `%s`: %+v", toHost, err.Error()) } // Verify if fromIndex exists exists, err := fromClient.IndexExists(fromIndex).Do() if err != nil { logger.Fatal("Error verifying if index <%s> exists: %+v", fromIndex, err.Error()) } if !exists { logger.Fatal("The index <%s> doesn't exists, we need a valid index or alias", fromIndex) } // Verify if toIndex already exists exists, err = toClient.IndexExists(toIndex).Do() if err != nil { logger.Fatal("Error verifying if index <%s> exists: %+v", toIndex, err.Error()) } // If toIndex don't exists we need create it if !exists { indexService := toClient.CreateIndex(toIndex) // If -new-mapping was not provided use original mapping if newMapping == "" { mapping, err := fromClient.GetMapping().Index(fromIndex).Do() if err != nil { logger.Fatal("Error getting mapping of index <%s>", fromIndex) } for _, v := range mapping { mapping = v.(map[string]interface{}) break } resp, err := fromClient.IndexGetSettings().Index(fromIndex).Do() if err != nil { logger.Fatal("Error getting settings of index <%s>", fromIndex) } for _, v := range resp { mapping["settings"] = v.Settings break } indexService.BodyJson(mapping) } else { indexService.BodyString(mappingContent) } createNewIndex, err := indexService.Do() if err != nil { logger.Fatal("Error creating new index <%s>: %+v", toIndex, err.Error()) } if !createNewIndex.Acknowledged { logger.Fatal("Was not possible create new index <%s>", toIndex) } logger.Info("New index <%s> was created!", toIndex) } else if forceNo { logger.Fatal("Index <%s> already exists in destination server", toIndex) } else if !forceYes { if !askForConfirmation(fmt.Sprintf("Index <%s> already exists, do you want index all documents without change the current mapping? (yes/no) ", toIndex)) { os.Exit(0) } } // Reindex fromIndex to toIndex reindexer := fromClient.Reindex(fromIndex, toIndex) reindexer.TargetClient(toClient) reindexer.Progress(showReindexProgress) if bulkSize > 0 { reindexer.BulkSize(bulkSize) } logger.Info("Starting reindexing...") reindexStart = time.Now() resp, err := reindexer.Do() if err != nil { logger.Fatal("Error trying reindexing: %+v", err.Error()) } logger.Info("Reindexed was completed in <%s>, %d documents successed and %d failed", time.Since(reindexStart), resp.Success, resp.Failed) if len(resp.Errors) > 0 { logger.Warn("We get errors in some documents...") for _, respItem := range resp.Errors { logger.Error("Index[%s] Type[%s] Id[%s]: %s", respItem.Index, respItem.Type, respItem.Id, respItem.Error) } } // If index is a alias, update its reference aliasesService := toClient.Aliases() aliases, err := aliasesService.Do() if err != nil { logger.Fatal("Error getting aliases: %+v", err.Error()) } indices := aliases.IndicesByAlias(fromIndex) if len(indices) > 0 { aliasService := toClient.Alias() for _, index := range indices { aliasService.Remove(index, fromIndex) } _, err = aliasService.Add(toIndex, fromIndex).Do() if err != nil { logger.Fatal("Error updating alias <%s>: %+v", fromIndex, err.Error()) } logger.Info("Alias <%s>: %+v was removed and now point to: <%s>", fromIndex, indices, toIndex) } }
func main() { flag.StringVar(&host, "host", defaultElasticSearch, "elastic search host to get data from") flag.StringVar(&index, "index", "", "name of index to export") flag.StringVar(&indexType, "type", "", "name of type inside of <index> to export [optional]") flag.StringVar(&fieldlist, "fieldlist", "", "list of fields to export") flag.StringVar(&output, "output", "", "name of file to output") flag.Parse() if host == "" || index == "" || indexType == "" || fieldlist == "" || output == "" { logger.Error("Missing some parameters") flag.Usage() os.Exit(1) } // Connect to client client, err := getESClient(host) if err != nil { logger.Fatal("Error connecting to `%s`: %+v", host, err.Error()) } // Verify if index exists exists, err := client.IndexExists(index).Do() if err != nil { logger.Fatal("Error verifying if index <%s> exists: %+v", index, err.Error()) } if !exists { logger.Fatal("The index <%s> doesn't exists, we need a valid index or alias", index) } // Verify if type exists exists, err = client.TypeExists().Index(index).Type(indexType).Do() if err != nil { logger.Fatal("Error verifying if type <%s/%s> exists: %+v", index, indexType, err.Error()) } if !exists { logger.Fatal("The type <%s/%s> doesn't exists, we need a valid type", index, indexType) } fields = strings.Split(fieldlist, ",") if len(fields) == 0 || fields[0] == "" { logger.Fatal("Fields informed is invalid") } file, err := os.Create(output) if err != nil { logger.Fatal("Cannot create output file[%s]: %s", output, err.Error()) } csvWriter := csv.NewWriter(file) csvWriter.Comma = ';' // Export index/type to output exporter := NewExporter(client, index).Size(10).BulkSize(1000) if indexType != "" { exporter = exporter.Type(indexType) } exporter.Fields(fields...) exporter.Writer(csvWriter) exporter.Progress(showExportProgress) // Implement HERE your search quey query := elastic.NewMatchAllQuery() exporter.Query(query) logger.Info("Starting exporting to <%s>...", output) exportStart = time.Now() resp, err := exporter.Do() if err != nil { logger.Fatal("Error trying exporting: %+v", err.Error()) } logger.Info("Exported was completed in <%s>, %d documents successed and %d failed", time.Since(exportStart), resp.Success, resp.Failed) if len(resp.Errors) > 0 { logger.Warn("We get errors in some documents...") for _, respItem := range resp.Errors { logger.Error("Index[%s] Type[%s] Id[%s]: %s", respItem.Index, respItem.Type, respItem.Id, respItem.Error) } } }
func main() { flag.Parse() clusterFile := flag.Arg(0) if clusterFile == "" { logger.Fatal("You need to pass the cluster file, type: %s <cluster-file.yml>\n", os.Args[0]) } clusterContent, err := ioutil.ReadFile(clusterFile) if err != nil { logger.Fatal("Error open cluster file: %s", err.Error()) } var clusters Clusters err = yaml.Unmarshal(clusterContent, &clusters) if err != nil { logger.Fatal("Error reading cluster file: %s", err.Error()) } if clusters.Default.AvailableZone == "" { if clusters.Default.DefaultAvailableZone != "" { clusters.Default.AvailableZone = clusters.Default.DefaultAvailableZone } } // First verify if I can open all machine files machines := make([]Cluster, len(clusters.Clusters)) for key := range clusters.Clusters { clusterConfig := &clusters.Clusters[key] machineContent, err := ioutil.ReadFile(clusterConfig.Machine) if err != nil { logger.Fatal("Error open machine file: %s", err.Error()) } var machineConfig machine.Machine err = yaml.Unmarshal(machineContent, &machineConfig) if err != nil { logger.Fatal("Error reading machine file: %s", err.Error()) } // Verify if cloud-config file exists if machineConfig.Instance.CloudConfig != "" { _, err := os.Stat(machineConfig.Instance.CloudConfig) if err != nil { logger.Fatal("Error reading cloud-config: %s", err.Error()) } } // Set default values of cluster to machine if machineConfig.Instance.ImageID == "" { machineConfig.Instance.ImageID = clusters.Default.ImageID } if machineConfig.Instance.Region == "" { machineConfig.Instance.Region = clusters.Default.Region } if machineConfig.Instance.KeyName == "" { machineConfig.Instance.KeyName = clusters.Default.KeyName } if len(machineConfig.Instance.SecurityGroups) == 0 { machineConfig.Instance.SecurityGroups = clusters.Default.SecurityGroups } if machineConfig.Instance.SubnetID == "" { machineConfig.Instance.SubnetID = clusters.Default.SubnetID } if machineConfig.Instance.AvailableZone == "" { if machineConfig.Instance.DefaultAvailableZone != "" { machineConfig.Instance.AvailableZone = machineConfig.Instance.DefaultAvailableZone } else { machineConfig.Instance.AvailableZone = clusters.Default.AvailableZone } } for _, tag := range clusters.Default.Tags { addTag := true for _, instanceTag := range machineConfig.Instance.Tags { if strings.EqualFold(instanceTag.Key, tag.Key) { addTag = false } } if addTag { machineConfig.Instance.Tags = append(machineConfig.Instance.Tags, tag) } addTag = true for k, volume := range machineConfig.Volumes { for _, volumeTag := range volume.Tags { if strings.EqualFold(volumeTag.Key, tag.Key) { addTag = false } } if addTag { machineConfig.Volumes[k].Tags = append(machineConfig.Volumes[k].Tags, tag) } } } machines[key] = Cluster{Machine: machineConfig, Nodes: clusterConfig.Nodes} } auth, err := AwsAuth() if err != nil { logger.Fatal("Error reading aws credentials: %s", err.Error()) } machine.SetLogger(ioutil.Discard, "", 0) for key, clusterConfig := range machines { fmt.Printf("================ Running machines of %d. cluster ================\n", key+1) for i := 1; i <= clusterConfig.Nodes; i++ { machineConfig := clusterConfig.Machine machineConfig.Volumes = make([]volume.Volume, len(machineConfig.Volumes)) // append machine number to name of instance machineConfig.Instance.Name += fmt.Sprintf("-%d", i) // append machine number to name of volume for key := range clusterConfig.Machine.Volumes { volumeRef := &clusterConfig.Machine.Volumes[key] volumeConfig := *volumeRef volumeConfig.Name += fmt.Sprintf("-%d", i) machineConfig.Volumes[key] = volumeConfig } fmt.Printf("Running machine: %s\n", machineConfig.Instance.Name) err = machine.Get(&machineConfig, auth) if err != nil { logger.Fatal("Error getting machine: %s", err.Error()) } fmt.Printf("Machine Id <%s>, IP Address <%s>\n", machineConfig.Instance.ID, machineConfig.Instance.PrivateIPAddress) if i < clusterConfig.Nodes { fmt.Println("----------------------------------") } } } fmt.Println("================================================================") }