func main() { // specify number of threads that can be used numCpu := runtime.NumCPU() runtime.GOMAXPROCS(numCpu) // parse command line arguments var config Config parseCommandLineArgs(&config) // sanity check the input if config.FilePath == "" || config.SavePath == "" { fmt.Println("Empty path specified. Usage: ./goConvert -i <interface> -f <file path> -s <save path>") return } if config.Iface == "" { fmt.Println("No interface specified. Usage: ./goConvert -i <interface> -f <file path> -s <save path>") return } // get number of lines to read in the specified file cmd := exec.Command("wc", "-l", config.FilePath) out, cmderr := cmd.Output() if cmderr != nil { fmt.Println("Could not execute line count on file", config.FilePath) return } nlString := strings.Split(string(out), " ") nl_in_file, _ := strconv.ParseInt(nlString[0], 10, 32) if int(nl_in_file) < config.NumLines && nl_in_file > 0 { config.NumLines = int(nl_in_file) } fmt.Printf("Converting %d rows in file %s\n", config.NumLines, config.FilePath) // create channel to pass to the storage writer dataChan := make(chan goDB.DBData) doneChan := make(chan bool) // init goprobe log goDB.InitDBLog() // open file var ( file *os.File err error br, bs, pr, ps []byte dip, sip []byte dport, l7proto, proto []byte ) if file, err = os.Open(config.FilePath); err != nil { fmt.Println("File open error: " + err.Error()) } // spawn database writer writer := goDB.NewDBStorageWrite(config.SavePath) writer.WriteFlowsToDatabase(int64(0), dataChan, doneChan) fmt.Print("Progress: 0% |") go func() { // scan file line by line scanner := bufio.NewScanner(file) var lines_read int var active_block_stamp int64 var perc_done, prev_perc int var prev_block_stamp int64 for scanner.Scan() { if lines_read == config.NumLines { break } perc_done = int(float64(lines_read) / float64(config.NumLines) * 100) if perc_done != prev_perc { if perc_done%50 == 0 { fmt.Print(" 50% ") runtime.GC() debug.FreeOSMemory() } else if perc_done%10 == 0 { fmt.Printf("|") runtime.GC() debug.FreeOSMemory() } else if perc_done%2 == 0 { fmt.Printf("-") runtime.GC() debug.FreeOSMemory() } } prev_perc = perc_done fields := strings.Split(scanner.Text(), ",") // handle timestamp to find out when to ship the DBData to the channel time, _ := strconv.ParseInt(fields[9], 10, 64) // ignore all those lines which do not abide by the temporal ordering if time < prev_block_stamp { prev_block_stamp = time continue } cur_block_stamp := time - (time % DB_WRITE_INTERVAL) // if the timestamp is in another interval, create a new DBData block if cur_block_stamp != active_block_stamp { if active_block_stamp != 0 { var tstampArr = []byte{uint8(active_block_stamp >> 56), uint8(active_block_stamp >> 48), uint8(active_block_stamp >> 40), uint8(active_block_stamp >> 32), uint8(active_block_stamp >> 24), uint8(active_block_stamp >> 16), uint8(active_block_stamp >> 8), uint8(active_block_stamp & 0xff)} // place the timestamp at the end of the arrays br = append(br, tstampArr...) bs = append(bs, tstampArr...) pr = append(pr, tstampArr...) ps = append(ps, tstampArr...) dip = append(dip, tstampArr...) sip = append(sip, tstampArr...) dport = append(dport, tstampArr...) l7proto = append(l7proto, tstampArr...) proto = append(proto, tstampArr...) // if the block was switched write out the current arrays dataChan <- goDB.NewDBData(br, bs, pr, ps, dip, sip, dport, l7proto, proto, active_block_stamp, config.Iface) // reset the arrays br, bs, pr, ps = []byte{}, []byte{}, []byte{}, []byte{} dip, sip = []byte{}, []byte{} dport, l7proto, proto = []byte{}, []byte{}, []byte{} // the new timestamp becomes the active timestamp active_block_stamp = cur_block_stamp var tstampArrNew = []byte{uint8(active_block_stamp >> 56), uint8(active_block_stamp >> 48), uint8(active_block_stamp >> 40), uint8(active_block_stamp >> 32), uint8(active_block_stamp >> 24), uint8(active_block_stamp >> 16), uint8(active_block_stamp >> 8), uint8(active_block_stamp & 0xff)} // place the new timestamp at the beginning of the arrays br = append(br, tstampArrNew...) bs = append(bs, tstampArrNew...) pr = append(pr, tstampArrNew...) ps = append(ps, tstampArrNew...) dip = append(dip, tstampArrNew...) sip = append(sip, tstampArrNew...) dport = append(dport, tstampArrNew...) l7proto = append(l7proto, tstampArrNew...) proto = append(proto, tstampArrNew...) } else { active_block_stamp = cur_block_stamp var tstampArr = []byte{uint8(active_block_stamp >> 56), uint8(active_block_stamp >> 48), uint8(active_block_stamp >> 40), uint8(active_block_stamp >> 32), uint8(active_block_stamp >> 24), uint8(active_block_stamp >> 16), uint8(active_block_stamp >> 8), uint8(active_block_stamp & 0xff)} // place the timestamp at the beginning of the arrays br = append(br, tstampArr...) bs = append(bs, tstampArr...) pr = append(pr, tstampArr...) ps = append(ps, tstampArr...) dip = append(dip, tstampArr...) sip = append(sip, tstampArr...) dport = append(dport, tstampArr...) l7proto = append(l7proto, tstampArr...) proto = append(proto, tstampArr...) } } // handle counters br_int, _ := strconv.ParseUint(fields[0], 10, 64) br = append(br, uint8(br_int>>56), uint8(br_int>>48), uint8(br_int>>40), uint8(br_int>>32), uint8(br_int>>24), uint8(br_int>>16), uint8(br_int>>8), uint8(br_int&0xff)) bs_int, _ := strconv.ParseUint(fields[1], 10, 64) bs = append(bs, uint8(bs_int>>56), uint8(bs_int>>48), uint8(bs_int>>40), uint8(bs_int>>32), uint8(bs_int>>24), uint8(bs_int>>16), uint8(bs_int>>8), uint8(bs_int&0xff)) pr_int, _ := strconv.ParseUint(fields[5], 10, 64) pr = append(pr, uint8(pr_int>>56), uint8(pr_int>>48), uint8(pr_int>>40), uint8(pr_int>>32), uint8(pr_int>>24), uint8(pr_int>>16), uint8(pr_int>>8), uint8(pr_int&0xff)) ps_int, _ := strconv.ParseUint(fields[6], 10, 64) ps = append(ps, uint8(ps_int>>56), uint8(ps_int>>48), uint8(ps_int>>40), uint8(ps_int>>32), uint8(ps_int>>24), uint8(ps_int>>16), uint8(ps_int>>8), uint8(ps_int&0xff)) // handle ips dip_str := strings.Split(fields[2], ".") for i := 0; i < 16; i++ { if i < 4 { octet, _ := strconv.Atoi(dip_str[i]) dip = append(dip, uint8(octet)) } else { dip = append(dip, 0x00) } } sip_str := strings.Split(fields[8], ".") for i := 0; i < 16; i++ { if i < 4 { octet, _ := strconv.Atoi(sip_str[i]) sip = append(sip, uint8(octet)) } else { sip = append(sip, 0x00) } } // handle port and protos prot_num, _ := strconv.Atoi(fields[7]) proto = append(proto, uint8(prot_num)) dport_num, _ := strconv.Atoi(fields[3]) dport = append(dport, uint8(dport_num>>8), uint8(dport_num&0xff)) l7p_num, _ := strconv.Atoi(fields[4]) l7proto = append(l7proto, uint8(l7p_num>>8), uint8(l7p_num&0xff)) lines_read++ prev_block_stamp = cur_block_stamp } // push empty DBData onto channel to signal that we are done dataChan <- goDB.DBData{} fmt.Print("| 100%") }() // return if the data write failed or exited if <-doneChan { fmt.Println("\nExiting") return } return }
// goProbe's main routine -------------------------------------------------------- func main() { // CPU Profiling Calls // runtime.SetBlockProfileRate(10000000) // PROFILING DEBUG // f, proferr := os.Create("GPCore.prof") // PROFILING DEBUG // if proferr != nil { // PROFILING DEBUG // fmt.Println("Profiling error: "+proferr.Error()) // PROFILING DEBUG // } // PROFILING DEBUG // pprof.StartCPUProfile(f) // PROFILING DEBUG // defer pprof.StopCPUProfile() // PROFILING DEBUG /// LOGGING SETUP ------------------------------------------------------------ // initialize logger if err := goProbe.InitGPLog(); err != nil { fmt.Fprintf(os.Stderr, "Failed to initialize Logger. Exiting!\n") return } goProbe.SysLog.Info("Started goProbe") // Get command line arguments (all interface names) var IfaceNames []string = os.Args[1:] // exit program if the interfaces have not been correctly passed // by the configuration file. In this case, it does not make any // sense to keep running the probe if len(IfaceNames) == 0 { goProbe.SysLog.Crit("No interfaces have been specified in the configuration file (mandatory). Exiting.") return } /// CHANNEL VARIABLE SETUP --------------------------------------------------- // channel for handling writes to the flow map and the database gpcThreadIsDoneWritingChan := make(chan bool, 1) isDoneWritingToDBChan := make(chan bool, 1) // base channel which will be filled with row data from the GPMatrix DBDataChan := make(chan goDB.DBData, 1024) // channel for handling SIGTERM, SIGINT from the OS sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGUSR1, syscall.SIGUSR2, os.Interrupt) /// DB WRITER SETUP ---------------------------------------------------------- toStorageWriter := goDB.NewDBStorageWrite(DBPath) /// CAPTURE ROUTINES MANAGER ------------------------------------------------- // channel for handling termination signals from the individual // interfaces gpcThreadTerminatedChan := make(chan string, len(IfaceNames)) // create capture routine manager capManager := goProbe.NewGPCaptureManager(IfaceNames, SnapLen, PromiscMode, BpfFilterString, gpcThreadTerminatedChan, DBDataChan, gpcThreadIsDoneWritingChan, isDoneWritingToDBChan) quitCapFailureMonitorChan := make(chan bool, 1) // initialize dpi library if dpierr := goProbe.InitDPI(); dpierr != nil { goProbe.SysLog.Crit("DPI: " + dpierr.Error()) return } // initiate capture routine spawning capManager.MonitorFailures(quitCapFailureMonitorChan) capManager.StartCapture(DBPath) goProbe.SysLog.Debug("Waiting for user signals") /// MAIN SELECT FOR WRITE OUT AND PROGRAM TERMINATION ------------------------ ticker := time.NewTicker(time.Second * time.Duration(DB_WRITE_INTERVAL)) for { select { /// TICKER WHICH INITIATES DB WRITING //// case t := <-ticker.C: goProbe.SysLog.Debug("Initiating flow data flush") // take the current timestamp and provide it to each capture thread in order // to prepare data timestamp := t.Unix() // call the data write out routine var ifaces []string for iface := range capManager.GetActive() { ifaces = append(ifaces, iface) } capManager.WriteDataToDB(ifaces, timestamp, DBPath+"/"+PcapStatsFilename, toStorageWriter) // recover unavailable interfaces capManager.RecoverInactive() // call the garbage collectors runtime.GC() debug.FreeOSMemory() /// SIGNAL HANDLING /// // read signal from signal channel case s := <-sigChan: // take the current timestamp and provide it to each capture thread in order // to prepare data timestamp := time.Now().Unix() // if SIGUSER (10) is received, the program should write out a pcap stats report // to the stats file, which can be handled by the goprobe.init script if s == syscall.SIGUSR1 { goProbe.SysLog.Info("Received SIGUSR1 signal: writing out pcap handle stats") // get the stats data from the individual maps if statsString, err := capManager.GetPcapStats(timestamp); err != nil { goProbe.SysLog.Warning(err.Error()) } else { // write pcap handle stats to file capManager.WriteToStatsFile(statsString, DBPath+"/"+PcapStatsFilename) } // reload was called } else if s == syscall.SIGUSR2 { goProbe.SysLog.Info("Received SIGUSR2 signal: updating configuration") // call config parsing and capture routine stop/start if err := capManager.UpdateRunning(CFG_PATH); err != nil { goProbe.SysLog.Err("config reload error: " + err.Error()) } // call garbage collector to clean up old activity map runtime.GC() debug.FreeOSMemory() // wait for a termination signal. If it is received, initiate a database flush // and terminate the program } else if s == syscall.SIGTERM || s == os.Interrupt { goProbe.SysLog.Info("Received SIGTERM/SIGINT signal: flushing out the last batch of flows") // call the data write out routine var ifaces []string for iface := range capManager.GetActive() { ifaces = append(ifaces, iface) } capManager.WriteDataToDB(ifaces, timestamp, DBPath+"/"+PcapStatsFilename, toStorageWriter) // terminate the capture routines capManager.StopCapturing(ifaces) // clean up goProbe.SysLog.Info("Freeing resources and exiting") // explicitly call garbage collectors runtime.GC() debug.FreeOSMemory() // stop monitoring capture failures quitCapFailureMonitorChan <- true // de-allocate the memory claimed by the dpi library goProbe.DeleteDPI() // close all channels close(gpcThreadIsDoneWritingChan) close(isDoneWritingToDBChan) close(DBDataChan) close(sigChan) close(gpcThreadTerminatedChan) return } } } }