Пример #1
0
//LoadCases will load data stored case by case from a cvs reader into a
//feature matrix that has allready been filled with the coresponding empty
//features. It is a lower level method generally called after inital setup to parse
//a fm, arff, csv etc.
func (fm *FeatureMatrix) LoadCases(data *csv.Reader, rowlabels bool) {
	count := 0
	for {
		record, err := data.Read()
		if err == io.EOF {
			break
		} else if err != nil {
			log.Print("Error:", err)
			break
		}

		caselabel := fmt.Sprintf("%v", count)
		if rowlabels {
			caselabel = record[0]
			record = record[1:]
		}
		fm.CaseLabels = append(fm.CaseLabels, caselabel)

		for i, v := range record {
			fm.Data[i].Append(v)
		}

		count++
	}

}
Пример #2
0
// Read takes a CSV reader and reads it into a typed List of structs. Each row gets read into a struct named structName, described by headers. If the original data contained headers it is expected that the input reader has already read those and are pointing at the first data row.
// If kinds is non-empty, it will be used to type the fields in the generated structs; otherwise, they will be left as string-fields.
// In addition to the list, Read returns the typeRef for the structs in the list, and last the typeDef of the structs.
func Read(r *csv.Reader, structName string, headers []string, kinds KindSlice, vrw types.ValueReadWriter) (l types.List, typeRef, typeDef types.Type) {
	typeRef, typeDef = MakeStructTypeFromHeaders(headers, structName, kinds)
	valueChan := make(chan types.Value, 128) // TODO: Make this a function param?
	listType := types.MakeCompoundType(types.ListKind, typeRef)
	listChan := types.NewStreamingTypedList(listType, vrw, valueChan)

	structFields := typeDef.Desc.(types.StructDesc).Fields

	for {
		row, err := r.Read()
		if err == io.EOF {
			close(valueChan)
			break
		} else if err != nil {
			panic(err)
		}

		fields := make(map[string]types.Value)
		for i, v := range row {
			if i < len(headers) {
				f := structFields[i]
				fields[f.Name] = StringToType(v, f.T.Kind())
			}
		}
		valueChan <- types.NewStruct(typeRef, typeDef, fields)
	}

	return <-listChan, typeRef, typeDef
}
Пример #3
0
// streamCsv
//  Streams a CSV Reader into a returned channel.  Each CSV row is streamed along with the header.
//  "true" is sent to the `done` channel when the file is finished.
//
// Args
//  csv    - The csv.Reader that will be read from.
//  buffer - The "lines" buffer factor.  Send "0" for an unbuffered channel.
func streamCsv(csv *csv.Reader, buffer int) (lines chan *CsvLine) {
	lines = make(chan *CsvLine, buffer)

	go func() {
		// get Header
		header, err := csv.Read()
		if err != nil {
			close(lines)
			return
		}

		i := 0

		for {
			line, err := csv.Read()

			if len(line) > 0 {
				i++
				lines <- &CsvLine{Header: header, Line: line}
			}

			if err != nil {
				fmt.Printf("Sent %d lines\n", i)
				close(lines)
				return
			}
		}
	}()

	return
}
Пример #4
0
// Reads the census CSV data from files
func unmarshalCensusData(reader *csv.Reader, v interface{}) error {
	record, err := reader.Read()
	if err != nil {
		return err
	}
	s := reflect.ValueOf(v).Elem()
	if s.NumField() != len(record) {
		return &csvFieldMismatch{s.NumField(), len(record)}
	}
	for i := 0; i < s.NumField(); i++ {
		f := s.Field(i)
		switch f.Type().String() {
		case "string":
			f.SetString(record[i])
		case "int":
			ival, err := strconv.ParseInt(record[i], 10, 0)
			if err != nil {
				return err
			}
			f.SetInt(ival)
		case "float64":
			fval, err := strconv.ParseFloat(record[i], 64)
			if err != nil {
				return err
			}
			f.SetFloat(fval)
		default:
			return &csvUnsupportedType{f.Type().String()}
		}
	}
	return nil
}
Пример #5
0
// Parse columns from first header row or from flags
func parseColumns(reader *csv.Reader, skipHeader bool, fields string) ([]string, error) {
	var err error
	var columns []string
	if fields != "" {
		columns = strings.Split(fields, ",")

		if skipHeader {
			reader.Read() //Force consume one row
		}
	} else {
		columns, err = reader.Read()
		if err != nil {
			return nil, err
		}
	}

	for _, col := range columns {
		if containsDelimiter(col) {
			return columns, errors.New("Please specify the correct delimiter with -d.\nHeader column contains a delimiter character: " + col)
		}
	}

	for i, col := range columns {
		columns[i] = postgresify(col)
	}

	return columns, nil
}
Пример #6
0
// User CSV
// Fields are stored in the sequence as they appear in the struct, with arrays
// being represented as semicolon separated lists.
// Create a new user read from a CSV reader
func NewUserFromCSV(reader *csv.Reader) (user *User, done bool) {
	line, err := reader.Read()
	if err != nil {
		return nil, true
	}
	if len(line) != 7 {
		return nil, false
	}
	// comment
	firstElement := strings.TrimSpace(line[0])
	if len(firstElement) > 0 && firstElement[0] == '#' {
		return nil, false
	}
	level := line[2]
	ValidFrom, _ := time.Parse("2006-01-02 15:04", line[4])
	ValidTo, _ := time.Parse("2006-01-02 15:04", line[5])
	if !isValidLevel(level) {
		log.Printf("Got invalid level '%s'", level)
		return nil, false
	}
	return &User{
			Name:        line[0],
			ContactInfo: line[1],
			UserLevel:   Level(level),
			Sponsors:    strings.Split(line[3], ";"),
			ValidFrom:   ValidFrom, // field 4
			ValidTo:     ValidTo,   // field 5
			Codes:       strings.Split(line[6], ";")},
		false
}
Пример #7
0
// Read takes a CSV reader and reads it into a typed List of structs. Each row gets read into a struct named structName, described by headers. If the original data contained headers it is expected that the input reader has already read those and are pointing at the first data row.
// If kinds is non-empty, it will be used to type the fields in the generated structs; otherwise, they will be left as string-fields.
// In addition to the list, Read returns the typeRef for the structs in the list, and last the typeDef of the structs.
func ReadToList(r *csv.Reader, structName string, headers []string, kinds KindSlice, vrw types.ValueReadWriter) (l types.List, t *types.Type) {
	t, fieldOrder, kindMap := MakeStructTypeFromHeaders(headers, structName, kinds)
	valueChan := make(chan types.Value, 128) // TODO: Make this a function param?
	listChan := types.NewStreamingList(vrw, valueChan)

	for {
		row, err := r.Read()
		if err == io.EOF {
			close(valueChan)
			break
		} else if err != nil {
			panic(err)
		}

		fields := make(types.ValueSlice, len(headers))
		for i, v := range row {
			if i < len(headers) {
				fieldOrigIndex := fieldOrder[i]
				val, err := StringToValue(v, kindMap[fieldOrigIndex])
				if err != nil {
					d.Chk.Fail(fmt.Sprintf("Error parsing value for column '%s': %s", headers[i], err))
				}
				fields[fieldOrigIndex] = val
			}
		}
		valueChan <- types.NewStructWithType(t, fields)
	}

	return <-listChan, t
}
Пример #8
0
func read_record(csv_reader *csv.Reader) (loadedRecord, error) {
	row, err := csv_reader.Read()
	if err != nil {
		return loadedRecord{}, err
	}
	if err != nil {
		panic(err)
	}
	var id, key string
	if len(row) == 2 {
		id, key = row[0], row[1]
	} else {
		key = row[0]
	}

	numeric, _ := regexp.Compile("[0-9]")
	new_key := numeric.ReplaceAllString(key, "")
	new_key = strings.ToLower(new_key)

	record := loadedRecord{
		id:           id,
		key:          new_key,
		original_key: key,
		length:       len(new_key) - 2,
	}
	record.trigrams = create_trigram(new_key, record.length)

	return record, nil
}
Пример #9
0
func readCsv(ch chan []string) {
	var reader *csv.Reader
	if inputFn == "" {
		reader = csv.NewReader(os.Stdin)
	} else {
		file, err := os.Open(inputFn)
		if err != nil {
			fmt.Println("Error:", err)
			os.Exit(1)
		}
		defer file.Close()
		reader = csv.NewReader(file)
	}
	if !strictLen {
		reader.FieldsPerRecord = -1
	}
	r, _ := utf8.DecodeRuneInString(inputSep)
	reader.Comma = r
	reader.LazyQuotes = lazyQuotes

	for {
		record, err := reader.Read()
		if err == io.EOF {
			close(ch)
			break
		} else if err != nil {
			fmt.Println("Error:", err)
			close(ch)
			break
		}
		ch <- record
	}
}
Пример #10
0
func (t *table) start(reader *csv.Reader) {
	defer t.Stop()
	defer close(t.rows)

	headers, err := reader.Read()
	if err != nil {
		if perr, ok := err.(*csv.ParseError); ok {
			// Modifies the underlying err
			perr.Err = fmt.Errorf("%s. %s", perr.Err, "This can happen when the CSV is malformed, or when the wrong delimiter is used")
		}
		t.handleErr(err)
		return
	}

	reader.FieldsPerRecord = len(headers)
	for {
		if t.stopped {
			break
		}
		line, err := reader.Read()
		if err != nil {
			t.handleErr(err)
			return
		}
		t.rows <- convertLineToRow(line, headers)
	}
}
Пример #11
0
func processFile(reader *csv.Reader, keyIndex, valueIndex int, headerRow bool) ([]MapEntry, error) {
	entries := []MapEntry{}
	for i := 0; true; i++ {
		row, err := reader.Read()
		if err == io.EOF {
			break
		}

		if err != nil {
			return nil, err
		}

		if headerRow && i == 0 {
			continue
		}

		numFields := len(row)
		if keyIndex > numFields {
			return nil, fmt.Errorf("key index '%d' > number of fields '%d'", keyIndex, numFields)
		}
		if valueIndex > numFields {
			return nil, fmt.Errorf("value index '%d' > number of fields '%d'", valueIndex, numFields)
		}

		key := row[keyIndex-1]
		value := row[valueIndex-1]
		entries = append(entries, MapEntry{key, value})
	}
	return entries, nil
}
Пример #12
0
func RecsFromCSVReader(r *csv.Reader, recs map[string][]RepoRelation) error {
	for {
		record, err := r.Read()
		if err == io.EOF {
			break
		}
		if err != nil {
			return fmt.Errorf("failed to read csv record: %v", err)
		}
		if len(record[0]) < prefixLen {
			log.Printf("not valid rec record %v", record)
			continue
		}
		sp1 := ShortPathFromURL(record[0])
		sp2 := ShortPathFromURL(record[1])
		c, err := strconv.ParseFloat(record[2], 64)
		if err != nil {
			log.Printf("Failed to ParseFloat(%q, 64): %v", record[2], err)
		}
		recs[sp1] = append(recs[sp1], RepoRelation{sp2, c})
	}
	for k, _ := range recs {
		sort.Sort(ByScore(recs[k]))
	}
	log.Printf("%v recs has been loaded", len(recs))
	return nil
}
Пример #13
0
func (d *Data) readFeatures(in csv.Reader) {
	for {
		record, err := in.Read()

		if err == io.EOF {
			break
		}

		if err != nil {
			fmt.Println("ERROR: ", err)
		}

		x := len(record)
		conv := make([]float64, x, x)
		correctData := true

		for i := 0; i < x; i++ {
			conv[i], err = strconv.ParseFloat(record[i], 64)
			if err != nil {
				fmt.Println("ERROR: ", err)
				correctData = false
			}
		}
		if correctData {
			d.AppendRow(conv)
		}
	}
}
Пример #14
0
func (d *Data) readHeader(r csv.Reader) {
	record, err := r.Read()

	if err != nil {
		fmt.Println("ERROR: ", err)
	}

	d.setFeatures(record)
}
func readOneRecordAtaTime(reader *csv.Reader) {

	for {
		row, err := reader.Read()
		if err != nil {
			break
		}
		printRow(row)
	}
}
Пример #16
0
func read(r *csv.Reader, f func(record []string)) {
	for {
		record, err := r.Read()
		if err == io.EOF {
			break
		} else if err != nil && err != io.EOF {
			log.Fatal("Error reading defects file ", err)
		}
		if record[1] != "Id" {
			f(record)
		}
	}
}
Пример #17
0
func copyCSVRows(i *Import, reader *csv.Reader, ignoreErrors bool, delimiter string, columns []string) (error, int, int) {
	success := 0
	failed := 0

	for {
		cols := make([]interface{}, len(columns))
		record, err := reader.Read()

		if err == io.EOF {
			break
		}

		if err != nil {
			line := strings.Join(record, delimiter)
			failed++

			if ignoreErrors {
				os.Stderr.WriteString(string(line))
				continue
			} else {
				err = errors.New(fmt.Sprintf("%s: %s", err, line))
				return err, success, failed
			}
		}

		//Loop ensures we don't insert too many values and that
		//values are properly converted into empty interfaces
		for i, col := range record {
			cols[i] = col
		}

		err = i.AddRow(cols...)

		if err != nil {
			line := strings.Join(record, delimiter)
			failed++

			if ignoreErrors {
				os.Stderr.WriteString(string(line))
				continue
			} else {
				err = errors.New(fmt.Sprintf("%s: %s", err, line))
				return err, success, failed
			}
		}

		success++
	}

	return nil, success, failed
}
func SMerge(r *csv.Reader, seek io.Seeker) []int {
	var m = make(map[string]int)
	var list []string
	var repeat []string
	r.Read()
	var key string
	var err error
	var line int
	for {
		list, err = r.Read()
		if err != nil {
			if err != io.EOF {
				log.Println("读取文件内容失败,错误信息:", err.Error())
			}
			break
		}
		if len(list) != 5 {
			log.Println("无效数据:", list)
			line++
			continue
		}
		key = strings.TrimSpace(list[1] + list[3])
		if key != "" {
			if _, ok := m[key]; ok {
				repeat = append(repeat, key)
				if !first {
					m[key] = line
				}
			} else {
				m[key] = line
			}
		}
		line++
	}
	var lines = make([]int, 0, len(m))
	for _, v := range repeat {
		if unrepeat {
			delete(m, v)
		} else {
			lines = append(lines, m[v])
		}
	}
	if unrepeat {
		for _, v := range m {
			lines = append(lines, v)
		}
	}
	sort.Ints(lines)
	return lines
}
Пример #19
0
func ReadToMap(r *csv.Reader, headersRaw []string, pkIdx int, kinds KindSlice, vrw types.ValueReadWriter) types.Map {
	headers := make([]string, 0, len(headersRaw)-1)
	for i, h := range headersRaw {
		if i != pkIdx {
			headers = append(headers, h)
		}
	}

	var pkKind types.NomsKind
	if len(kinds) == 0 {
		pkKind = types.StringKind
	} else {
		pkKind = kinds[pkIdx]
		kinds = append(kinds[:pkIdx], kinds[pkIdx+1:]...)
	}

	t, fieldOrder, kindMap := MakeStructTypeFromHeaders(headers, "", kinds)

	kvChan := make(chan types.Value, 128)
	mapChan := types.NewStreamingMap(vrw, kvChan)
	for {
		row, err := r.Read()
		if err == io.EOF {
			break
		} else if err != nil {
			panic(err)
		}

		fieldIndex := 0
		var pk types.Value
		fields := make(types.ValueSlice, len(headers))
		for x, v := range row {
			if x == pkIdx {
				pk, err = StringToValue(v, pkKind)
			} else if fieldIndex < len(headers) {
				fieldOrigIndex := fieldOrder[fieldIndex]
				fields[fieldOrigIndex], err = StringToValue(v, kindMap[fieldOrigIndex])
				fieldIndex++
			}
			if err != nil {
				d.Chk.Fail(fmt.Sprintf("Error parsing value for column '%s': %s", headers[x], err))
			}
		}
		kvChan <- pk
		kvChan <- types.NewStructWithType(t, fields)
	}

	close(kvChan)
	return <-mapChan
}
Пример #20
0
func (con Sif2Cx) readSIF(reader *csv.Reader, w *bufio.Writer) {
	// Set delimiter

	var netName string
	if con.Name == "" {
		netName = "CX from SIF file"
	} else {
		netName = con.Name
	}

	reader.Comma = con.Delimiter
	reader.LazyQuotes = true

	// nodes already serialized
	nodesExists := make(map[string]int64)

	nodeCounter := int64(0)

	w.Write([]byte("["))

	for {
		record, err := reader.Read()

		if err == io.EOF {
			// Add network attributes at the end of doc.
			netAttr := cx.NetworkAttribute{N: "name", V: netName}

			attrList := []cx.NetworkAttribute{netAttr}
			netAttrs := make(map[string][]cx.NetworkAttribute)

			netAttrs["networkAttributes"] = attrList

			json.NewEncoder(w).Encode(netAttrs)

			w.Write([]byte("]"))
			w.Flush()
			break
		}

		if err != nil {
			log.Fatal(err)
		}

		if len(record) == 3 {
			toJson(record, nodesExists, &nodeCounter, w)
		}

		w.Flush()
	}
}
Пример #21
0
func Signal(reader *csv.Reader) ([]float64, error) {
	fields, err := reader.Read()
	if err != nil {
		return nil, err
	}
	ndim := len(fields)
	point := make([]float64, ndim)
	for i := 0; i < ndim; i++ {
		point[i], err = strconv.ParseFloat(fields[i], 64)
		if err != nil {
			return nil, err
		}
	}
	return point, nil
}
Пример #22
0
func readHeaders(reader *csv.Reader, filter mapset.Set) (headers map[string]int) {
	line, err := reader.Read()
	util.Warn(err, "reading headers")
	headers = make(map[string]int, len(line))
	for i, k := range line {
		//if _, ok := c.fields[k]; !ok {
		k = util.Slugged(k, "_")
		if filter == nil || filter.Contains(k) {
			headers[k] = i
		}
		//}
	}
	util.Debug("Headers %v", headers)
	return
}
Пример #23
0
func inferTypes(csv *csv.Reader, fields []string,
	numLines int) (map[string]interface{}, error) {

	template := make(map[string]interface{})
	learners := make([]*typeguessing.Learner, len(fields))
	for i := 0; i < len(learners); i++ {
		learners[i] = typeguessing.NewLearner()
	}
	for i := 0; i < numLines; i++ {
		line, err := csv.Read()
		if err == io.EOF {
			break
		} else if err != nil {
			panic(err)
		}
		for j, _ := range fields {
			learners[j].Feed(line[j])
		}
	}
	for i, f := range fields {
		exampleVal := learners[i].BestGuess()
		template[f] = exampleVal
	}
	return template, nil
}
Пример #24
0
func ReadToMap(r *csv.Reader, headers_raw []string, pkIdx int, kinds KindSlice, vrw types.ValueReadWriter) (m types.Map) {
	headers := make([]string, 0, len(headers_raw)-1)
	for i, h := range headers_raw {
		if i != pkIdx {
			headers = append(headers, types.EscapeStructField(h))
		}
	}

	var pkKind types.NomsKind
	if len(kinds) == 0 {
		pkKind = types.StringKind
	} else {
		pkKind = kinds[pkIdx]
		kinds = append(kinds[:pkIdx], kinds[pkIdx+1:]...)
	}

	t := MakeStructTypeFromHeaders(headers, "", kinds)
	kindMap := make(map[string]types.NomsKind, len(headers))
	t.Desc.(types.StructDesc).IterFields(func(name string, t *types.Type) {
		kindMap[name] = t.Kind()
	})

	m = types.NewMap()
	fields := map[string]types.Value{}
	var pk types.Value
	for {
		row, err := r.Read()
		if err == io.EOF {
			break
		} else if err != nil {
			panic(err)
		}

		fieldIndex := 0
		for x, v := range row {
			if x == pkIdx {
				pk = StringToType(v, pkKind)
			} else if fieldIndex < len(headers) {
				name := headers[fieldIndex]
				fields[name] = StringToType(v, kindMap[name])
				fieldIndex++
			}
		}
		m = m.Set(pk, types.NewStructWithType(t, fields))
	}
	return
}
func QMerge(r *csv.Reader) {
	var m = make(map[string][]string)
	var list []string
	var repeat []string
	r.Read()
	var err error
	var key string
	for {
		list, err = r.Read()
		if err != nil {
			if err != io.EOF {
				log.Println("读取文件内容失败,错误信息:", err.Error())
			}
			break
		}
		if len(list) != 5 {
			log.Println("无效数据:", list)
			continue
		}
		key = strings.TrimSpace(list[1] + list[3])
		if key != "" {
			if _, ok := m[key]; ok {
				repeat = append(repeat, key)
				if !first {
					m[key] = list
				}
			} else {
				m[key] = list
			}
		}
	}
	for _, value := range repeat {
		if unrepeat {
			delete(m, value)
		} else {
			log.Println(m[value])
		}
	}

	if unrepeat {
		for _, value := range m {
			log.Println(value)
		}
	}
}
Пример #26
0
//  Start a New Table Writer with csv.Reader
// This enables customisation such as reader.Comma = ';'
// See http://golang.org/src/pkg/encoding/csv/reader.go?s=3213:3671#L94
func NewCSVReader(writer io.Writer, csvReader *csv.Reader) (*table, error) {
	// Read the first row
	headers, err := csvReader.Read()
	if err != nil {
		return &table{}, err
	}
	t := NewWriter(writer)
	t.SetHeader(headers)
	for {
		record, err := csvReader.Read()
		if err == io.EOF {
			break
		} else if err != nil {
			return &table{}, err
		}
		t.Append(record)
	}
	return t, nil
}
Пример #27
0
// readCost attempts to read a single field of a single CSV record and
// parsing it as a float64. Returns io.EOF if no record is found.
func readCost(cr *csv.Reader) (float64, error) {
	rec, err := cr.Read()
	if err == io.EOF {
		// Empty reply: cannot solve the challenges.
		return 0, io.EOF
	} else if err != nil {
		return 0, err
	}

	if len(rec) != 1 {
		return 0, fmt.Errorf("expected one field, got %v", rec)
	}
	cost, err := strconv.ParseFloat(rec[0], 64)
	if err != nil {
		return 0, fmt.Errorf("expected number, got %q", rec[0])
	}

	return cost, nil
}
Пример #28
0
func (p *Proc) procRecsFromCSVReader(r *csv.Reader) error {
	repoCount := len(p.ShortPathes)
	relationRaw := make([][]repoRelation, repoCount)
	relationNorm := make([][]repoRelation, repoCount)
	selfCounts := make([]float64, repoCount)
	n := 0
	r.Read()
	for {
		record, err := r.Read()
		if err == io.EOF {
			break
		}
		if err != nil {
			return fmt.Errorf("failed to read csv record: %v", err)
		}
		repo1 := p.repoIndexFromURL(record[0])
		repo2 := p.repoIndexFromURL(record[1])
		count, err := strconv.Atoi(record[2])
		if err != nil {
			log.Printf("Failed to ParseFloat(%q, 64): %v", record[2], err)
		}
		relationRaw[repo1] = append(relationRaw[repo1], repoRelation{repo2, float64(count)})
		if repo1 == repo2 {
			selfCounts[repo1] = float64(count)
		}
		n++
		if math.Mod(float64(n), float64(500000)) == 0 {
			log.Printf("Processed %v recs", n)
		}
	}

	for i, rs := range relationRaw {
		for _, r := range rs {
			normScore := r.score / math.Sqrt(selfCounts[i]) / math.Sqrt(selfCounts[r.i])
			relationNorm[i] = append(relationNorm[i], repoRelation{r.i, normScore})
		}
	}

	p.RecsRaw = repoRelationsToRecs(relationRaw, p.TopN)
	p.RecsNorm = repoRelationsToRecs(relationNorm, p.TopN)

	return nil
}
Пример #29
0
// Searches the first line for the given names and returns the column indices
func (r *CsvFileReader) getColIndex(reader *csv.Reader, keyColName string, valColName string) (keyColIndex int, valColIndex int, err error) {
	line, err := reader.Read()
	if err != nil {
		return
	}
	for k, v := range line {
		if v == keyColName {
			keyColIndex = k
		}
		if v == valColName {
			valColIndex = k
		}
	}
	if keyColIndex >= 0 && valColIndex >= 0 {
		return
	}
	err = fmt.Errorf("Failed to find index for columns '%s' and '%s!", keyColName, valColName)
	return
}
Пример #30
0
/* Given a CSV reader, populate a histogram table of the field counts
 */
func countFields(csvReader *csv.Reader, histogram []int64) {
	histogramLen := len(histogram)
	csvReader.FieldsPerRecord = -1 // Tell the CVS reader to expect an unknown field count
	for {
		strs, err := csvReader.Read()
		if nil != err {
			break
		}
		f := len(strs)
		if f < histogramLen {
			if 0 < f {
				histogram[f]++
			} // There's no such thing as a 0 length field record.
		} else {
			fmt.Print("\nWARNING:", histogramLen, "<", f, "histogram length.")
		}
	}
	return
}