func PathFinalOverflowKnotFromBytes(rec_n uint64, fin_ovf_bytes []byte, step uint64) (knot_zipper [][]int) { knot_zipper = make([][]int, 2) code_bytes := fin_ovf_bytes[0:rec_n] data_bytes := fin_ovf_bytes[rec_n:] dat_n := 0 for i := uint64(0); i < rec_n; i++ { if code_bytes[i] != 0 { continue } ele_anchor_step, dn := dlug.ConvertUint64(data_bytes[dat_n:]) dat_n += dn if ele_anchor_step > step { return nil } ele_nallele, dn := dlug.ConvertUint64(data_bytes[dat_n:]) dat_n += dn for a := uint64(0); a < ele_nallele; a++ { ele_knot_allele_len, dn := dlug.ConvertUint64(data_bytes[dat_n:]) dat_n += dn for knot_allele := uint64(0); knot_allele < ele_knot_allele_len; knot_allele++ { ele_var_id, dn := dlug.ConvertUint64(data_bytes[dat_n:]) dat_n += dn ele_span, dn := dlug.ConvertUint64(data_bytes[dat_n:]) dat_n += dn if ele_anchor_step == step { knot_zipper[a] = append(knot_zipper[a], int(ele_var_id)) knot_zipper[a] = append(knot_zipper[a], int(ele_span)) } } } if ele_anchor_step == step { break } } return }
func GetPathOverflow(path_bytes []byte) PathOverflowStruct { st := 0 name, dn := byte2string(path_bytes[st:]) _ = name st += dn vec_len := byte2uint64(path_bytes[st : st+8]) st += 8 st += 8 * int(vec_len) po := PathOverflowStruct{} po.Length = byte2uint64(path_bytes[st : st+8]) st += 8 po.Stride = byte2uint64(path_bytes[st : st+8]) st += 8 ofs_len := (po.Length + po.Stride - 1) / po.Stride fmt.Printf("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! %d\n", ofs_len) if (po.Length % po.Stride) != 0 { ofs_len++ } fmt.Printf("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! %d\n", ofs_len) po.Offset = make([]uint64, ofs_len) for i := (0); i < len(po.Offset); i++ { po.Offset[i] = byte2uint64(path_bytes[st : st+8]) st += 8 } pos_len := (po.Length + po.Stride - 1) / po.Stride po.Position = make([]uint64, pos_len) for i := (0); i < len(po.Position); i++ { po.Position[i] = byte2uint64(path_bytes[st : st+8]) st += 8 } start_map := st po.IntMap = make([]int, po.Length) for i := uint64(0); i < po.Length; i++ { u, dn := dlug.ConvertUint64(path_bytes[st:]) st += dn po.IntMap[i] = int(u) } po.Map = path_bytes[start_map:st] return po }
func CGFBegPeel(cgf_bytes []byte) (magic uint64, cgfver []byte, libver []byte, path_n uint64, tmap_len uint64, tmap []byte, step_per_path_b []byte, path_off_b []byte, path_b []byte) { n := uint64(0) magic = byte2uint64(cgf_bytes[n : n+8]) n += 8 _n0, dn := dlug.ConvertUint64(cgf_bytes[n:]) n += uint64(dn) cgfver = cgf_bytes[n : n+_n0] n += _n0 _n1, dn := dlug.ConvertUint64(cgf_bytes[n:]) n += uint64(dn) libver = cgf_bytes[n : n+_n1] n += _n1 path_n = byte2uint64(cgf_bytes[n : n+8]) n += 8 tmap_len = byte2uint64(cgf_bytes[n : n+8]) n += 8 tmap = cgf_bytes[n : n+tmap_len] n += tmap_len step_per_path_b = cgf_bytes[n : n+8*path_n] n += 8 * path_n path_off_b = cgf_bytes[n : n+8*(path_n+1)] n += 8 * (path_n + 1) path_b = cgf_bytes[n:] return }
func PathBegInfo(path_bytes []byte) ([]byte, uint64, []byte, uint64) { n := uint64(0) // Skip name (length + string bytes) // name_n, dn := dlug.ConvertUint64(path_bytes[n:]) n += uint64(dn) name := path_bytes[n : n+name_n] n += name_n ntile := byte2uint64(path_bytes[n : n+8]) n += 8 veclen := (ntile + 31) / 32 vec_bytes := path_bytes[n : n+8*veclen] n += 8 * veclen return name, ntile, vec_bytes, n }
//func unpack_tilemap(tilemap_bytes []byte) []TileMapEntry { func UnpackTileMap(tilemap_bytes []byte) []TileMapEntry { m := make([]TileMapEntry, 0, 1024) var n0, n1, vid, span uint64 var dn int count := 0 pos := 0 for pos < len(tilemap_bytes) { tme := TileMapEntry{} n0, dn = dlug.ConvertUint64(tilemap_bytes[pos:]) pos += dn n1, dn = dlug.ConvertUint64(tilemap_bytes[pos:]) pos += dn tme.TileMap = pos tme.Variant = make([][]int, 2) tme.Span = make([][]int, 2) for i := uint64(0); i < n0; i++ { vid, dn = dlug.ConvertUint64(tilemap_bytes[pos:]) pos += dn tme.Variant[0] = append(tme.Variant[0], int(vid)) span, dn = dlug.ConvertUint64(tilemap_bytes[pos:]) pos += dn tme.Span[0] = append(tme.Span[0], int(span)) } for i := uint64(0); i < n1; i++ { vid, dn = dlug.ConvertUint64(tilemap_bytes[pos:]) pos += dn tme.Variant[1] = append(tme.Variant[1], int(vid)) span, dn = dlug.ConvertUint64(tilemap_bytes[pos:]) pos += dn tme.Span[1] = append(tme.Span[1], int(span)) } m = append(m, tme) count++ } return m }
func Peel(cgf_bytes []byte, path, step int) { b8 := make([]byte, 8) n := uint64(0) //=========== // CGF HEADER // magic, cgfver, libver, path_n, tmap_len, tmap, step_per_path_b, path_off_b, path_b := CGFBegPeel(cgf_bytes) //DEBUG // tobyte64(b8, magic) fmt.Printf("magic: %08x %v (%s)\n", magic, b8, b8) fmt.Printf("cgfver: %s\n", cgfver) fmt.Printf("libver: %s\n", libver) fmt.Printf("path_n: %d\n", path_n) fmt.Printf("tmap_len: %d (%d)\n", tmap_len, len(tmap)) fmt.Printf("step_per_path (%d bytes)\n", len(step_per_path_b)) fmt.Printf("path_off_b (%d bytes)\n", len(path_off_b)) fmt.Printf("path_b (%d bytes)\n", len(path_b)) // //DEBUG path_b_s := byte2uint64(path_off_b[path*8:]) //DEBUG // fmt.Printf("path_b_s %d\n", path_b_s) // //DEBUG path_bytes := path_b[path_b_s:] //DEBUG // fmt.Printf(">>> path %x, s %x, e %x\n", path, path_b_s, -1) // //DEBUG //============ // PATH HEADER // pathname, ntile, vec_bytes, dn := PathBegInfo(path_bytes) n += dn //DEBUG fmt.Printf("PathBeg '%s' %v %v %v (+%v)\n", pathname, ntile, len(vec_bytes), n, dn) ovf_bytes := path_bytes[n:] vec_byte_pos := step / 32 vec_byte_pos *= 8 step_off := step % 32 vec_val := byte2uint64(vec_bytes[vec_byte_pos:]) canon_bit := (vec_val & (1 << (32 + uint(step_off)))) //DEBUG fmt.Printf("... vec_val %8x %8x\n", vec_val>>32, vec_val&0xffffffff) noncan_count := 0 for i := 0; i < step_off; i++ { fmt.Printf("(%d) %x %v\n", i, vec_val&(1<<(32+uint(i))), vec_val&(1<<(32+uint(step_off))) > 0) if vec_val&(1<<(32+uint(i))) > 0 { noncan_count++ } } zval := uint64(0) if noncan_count < 8 { zval = vec_val & (0xf << (4 * uint(noncan_count))) } fmt.Printf("... ......b %8x %8x noncan_count %d, step_off %d\n", (vec_val&(1<<(32+uint(step_off))))>>32, zval, noncan_count, step_off) if canon_bit == 0 { //DEBUG fmt.Printf("%x.%x canon (0,0)\n", path, step) return } cache_map_val := CacheMapVal(vec_val, uint(step_off)) if cache_map_val == 0 { //DEBUG fmt.Printf("%x.%x spanning (*)\n", path, step) return } if cache_map_val > 0 && cache_map_val < 0xd { //DEBUG fmt.Printf("%x.%x cache (%x)\n", path, step, cache_map_val) return } //DEBUG if cache_map_val == 0xd { fmt.Printf("complex (%x)\n", cache_map_val) } if cache_map_val == 0xd { return } //============== // OVERFLOW INFO // ovf_len, ovf_stride, ovf_mbc, ovf_off_b, ovf_pos_b, ovf_map_b, ovf_dn := PathOverflowPeel(ovf_bytes) n += ovf_dn fin_ovf_bytes := path_bytes[n:] fin_ovf_rec_n, fin_ovf_byte_len, fin_ovf_record_bytes, dn := PathFinalOverflowPeel(fin_ovf_bytes) n += dn loq_info_bytes := path_bytes[n:] loq_count, loq_code, loq_stride, loq_offset_bytes, loq_step_pos_bytes, loq_hom_flag_bytes, loq_aux_flag_count, loq_aux_flag_bytes, loq_info_byte_count, loq_bytes, dn := PathLowQualityPeel(loq_info_bytes) _ = loq_bytes fmt.Printf("loq(count %d, code %d, stride %d, aux_flag_bc %d, byte_count %d)\n", loq_count, loq_code, loq_stride, loq_aux_flag_count, loq_info_byte_count) fmt.Printf("loq_offs: %v\n", loq_offset_bytes) fmt.Printf("loq_step: %v\n", loq_step_pos_bytes) fmt.Printf("loq_homf: %v\n", loq_hom_flag_bytes) fmt.Printf("loq_auxf: %v\n", loq_aux_flag_bytes) //DEBUG if cache_map_val == -1 { } loq_flag := false //DEBUG // off_z := (ovf_len + ovf_stride - 1) / ovf_stride off64 := make([]uint64, off_z) pos64 := make([]uint64, off_z) for i := uint64(0); i < off_z; i++ { off64[i] = byte2uint64(ovf_off_b[8*i:]) pos64[i] = byte2uint64(ovf_pos_b[8*i:]) } fmt.Printf(" off: %v\n pos: %v\n", off64, pos64) // //DEBUG //DEBUG // fmt.Printf("OvfBeg %v %v %v %v %v %v\n", ovf_len, ovf_stride, ovf_mbc, len(ovf_off_b), len(ovf_pos_b), len(ovf_map_b)) // //DEBUG _bpos := _bsrch8(ovf_pos_b, uint64(step)) pos_entry := byte2uint64(ovf_pos_b[_bpos : _bpos+8]) map_offset_b := byte2uint64(ovf_off_b[_bpos : _bpos+8]) //DEBUG fmt.Printf("cache ovf: step %d, _bpos %d, pos_entry %d, map_offset_b %d\n", step, _bpos, pos_entry, map_offset_b) del_overflow := RelativeOvfCount(vec_bytes, pos_entry, uint64(step)) fmt.Printf("del_overflow: %d\n", del_overflow) map_val := uint64(0) for ovf_entry := 0; ovf_entry < del_overflow; ovf_entry++ { var dn int map_val, dn = dlug.ConvertUint64(ovf_map_b[map_offset_b:]) map_offset_b += uint64(dn) fmt.Printf("... ovf_entry %v (del_overlfow %v), map_val %v\n", ovf_entry, del_overflow, map_val) } //fin_ovf_rec_n, fin_ovf_byte_len, fin_ovf_record_bytes,dn := // PathFinalOverflowPeel(fin_ovf_bytes) //n+=dn //loq_count,loq_code,loq_stride,loq_offset_bytes,loq_step_pos_bytes,loq_hom_flag_bytes,loq_info_byte_count,loq_bytes,dn := // PathLowQualityPeel(loq_info_bytes) if map_val == 1024 { // spanning // fmt.Printf("Spanning? (map_val %v)\n", map_val) return } else if map_val == 1025 { // Final overflow lookup // fmt.Printf("FinalOvf? (map_val %v)\n", map_val) _ = fin_ovf_record_bytes knot_zipper := PathFinalOverflowKnotFromBytes(fin_ovf_rec_n, fin_ovf_bytes, uint64(step)) if knot_zipper != nil { //DEBUG fmt.Printf("fin_ovf_len %d, fin_ovf_rec_n %d, fin_ovf_byte_len %d\n", fin_ovf_rec_n, fin_ovf_byte_len, dn) fmt.Printf("cache overflow (%d)\n", cache_map_val) fmt.Printf("knot_zipper: %v\n", knot_zipper) } else { // parse fastj // fmt.Printf("FastJ output?\n") } } //var _dn int //map_val,_dn = dlug.ConvertUint64(ovf_map_b[map_offset_b:]) //map_offset_b+=uint64(_dn) //DEBUG fmt.Printf(">>>>> map_val %v, pos_entry %v, map_ffset_b %v\n", map_val, pos_entry, map_offset_b) if cache_map_val == -1 { /* for pos_entry < step { v,dn := dlug.ConvertUint64(ovf_map_b[map_offset_b:]) map_offset_b+=dn pos_entry++ } */ } else { if cache_map_val == 0xe { loq_flag = true } //DEBUG fmt.Printf("... (%x) (loq %v)\n", cache_map_val, loq_flag) } // LOW QUALITY INFO loq_flag = false loq_q := step / 8 loq_r := step % 8 if loq_aux_flag_bytes[loq_q]&(1<<uint(loq_r)) > 0 { loq_flag = true } if loq_flag { fmt.Printf("LOWQ\n") } if loq_flag { loq_knot_zipper := PathLowQualityKnotZipper(loq_info_bytes, uint64(step)) for i := 0; i < len(loq_knot_zipper); i++ { for j := 0; j < len(loq_knot_zipper[i]); j++ { fmt.Printf("[%d][%d]", i, j) for k := 0; k < len(loq_knot_zipper[i][j]); k += 2 { fmt.Printf(" {%d+%d}", loq_knot_zipper[i][j][k], loq_knot_zipper[i][j][k+1]) //fmt.Printf("[%d][%d][%d] %d\n", i, j, k, loq_knot_zipper[i][j][k]) } fmt.Printf("\n") } } } //DEBUG fmt.Printf("\n") }
func PathLowQualityKnotZipper(loq_info_bytes []byte, anchor_step uint64) [][][]int { loq_knot := make([][][]int, 2) loq_rec_count, loq_code, loq_stride, loq_offset_bytes, loq_step_pos_bytes, loq_hom_flag_bytes, loq_aux_flag_count, loq_aux_flag_bytes, loq_info_byte_count, loq_bytes, dn := PathLowQualityPeel(loq_info_bytes) _ = loq_rec_count _ = loq_code _ = loq_stride _ = loq_offset_bytes _ = loq_step_pos_bytes _ = loq_hom_flag_bytes _ = loq_aux_flag_count _ = loq_aux_flag_bytes _ = loq_info_byte_count _ = loq_bytes _ = dn //fmt.Printf(">>> loq_rec_count %d\n", loq_rec_count) _bpos := _bsrch8(loq_step_pos_bytes, anchor_step) base_step := byte2uint64(loq_step_pos_bytes[_bpos : _bpos+8]) _ = base_step byte_offset := byte2uint64(loq_offset_bytes[_bpos : _bpos+8]) _ = byte_offset _pos := _bpos / 8 loq_count := uint64(0) /* for s:=base_step ; s<anchor_step; s++ { if loq_aux_flag_bytes[s/8] & (1<<uint(s%8)) > 0 { loq_count++ } } */ for s := base_step; s < anchor_step; s++ { if loq_aux_flag_bytes[s/8]&(1<<uint(s%8)) > 0 { loq_count++ } } /* fmt.Printf(">>> _bpos %d (%d), base_step %d, byte_offset %d, loq_count %d\n", _bpos, _bpos/8, base_step, byte_offset, loq_count) fmt.Printf(">>> loq_base_pos %d\n", loq_stride*_bpos) */ loq_base_pos := loq_stride * _pos for i := uint64(0); i <= loq_count; i++ { loq_pos := loq_base_pos + i hom_flag := false if loq_hom_flag_bytes[loq_pos/8]&(1<<uint(loq_pos%8)) > 0 { hom_flag = true } //DEBUG //fmt.Printf(">>>> loq_pos %d [%d,%d] hom %v\n", loq_pos, loq_pos/8, loq_pos%8, hom_flag) if hom_flag { ntile, dn := dlug.ConvertUint64(loq_bytes[byte_offset:]) byte_offset += uint64(dn) if i == loq_count { loq_knot[0] = make([][]int, ntile) loq_knot[1] = make([][]int, ntile) } //DEBUG //fmt.Printf(" ntile %d\n", ntile) for tile_idx := uint64(0); tile_idx < ntile; tile_idx++ { ent_len, dn := dlug.ConvertUint64(loq_bytes[byte_offset:]) byte_offset += uint64(dn) //DEBUG //fmt.Printf(" t%d[%d]", tile_idx, ent_len) for ent_idx := uint64(0); ent_idx < ent_len; ent_idx += 2 { delpos, dn := dlug.ConvertUint64(loq_bytes[byte_offset:]) _ = delpos byte_offset += uint64(dn) loqlen, dn := dlug.ConvertUint64(loq_bytes[byte_offset:]) _ = loqlen byte_offset += uint64(dn) //DEBUG //fmt.Printf(" {%d+%d}", delpos, loqlen) if i == loq_count { loq_knot[0][tile_idx] = append(loq_knot[0][tile_idx], int(delpos)) loq_knot[0][tile_idx] = append(loq_knot[0][tile_idx], int(loqlen)) loq_knot[1][tile_idx] = append(loq_knot[1][tile_idx], int(delpos)) loq_knot[1][tile_idx] = append(loq_knot[1][tile_idx], int(loqlen)) } } //DEBUG //fmt.Printf("\n") } } else { // het var dn int ntile := [2]uint64{0, 0} ntile[0], dn = dlug.ConvertUint64(loq_bytes[byte_offset:]) byte_offset += uint64(dn) ntile[1], dn = dlug.ConvertUint64(loq_bytes[byte_offset:]) byte_offset += uint64(dn) //DEBUG //fmt.Printf(" ntile [%d,%d]\n", ntile[0], ntile[1]) if i == loq_count { loq_knot[0] = make([][]int, ntile[0]) loq_knot[1] = make([][]int, ntile[1]) } for zz := 0; zz < 2; zz++ { for tile_idx := uint64(0); tile_idx < ntile[zz]; tile_idx++ { ent_len, dn := dlug.ConvertUint64(loq_bytes[byte_offset:]) byte_offset += uint64(dn) //DEBUG //fmt.Printf(" t(%d)%d[%d]", zz, tile_idx, ent_len) for ent_idx := uint64(0); ent_idx < ent_len; ent_idx += 2 { delpos, dn := dlug.ConvertUint64(loq_bytes[byte_offset:]) _ = delpos byte_offset += uint64(dn) loqlen, dn := dlug.ConvertUint64(loq_bytes[byte_offset:]) _ = loqlen byte_offset += uint64(dn) //DEBUG //fmt.Printf(" {%d+%d}", delpos, loqlen) if i == loq_count { loq_knot[zz][tile_idx] = append(loq_knot[zz][tile_idx], int(delpos)) loq_knot[zz][tile_idx] = append(loq_knot[zz][tile_idx], int(loqlen)) } } //DEBUG //fmt.Printf("\n") } } } } return loq_knot }
func debug_print_loq_bytes(loq_bytes []byte) (int, error) { var dummy uint64 var dn int _ = dn n := 0 dummy = byte2uint64(loq_bytes[n : n+8]) n += 8 count := int(dummy) fmt.Printf("countc: %d\n", count) dummy = byte2uint64(loq_bytes[n : n+8]) n += 8 code := int(dummy) fmt.Printf("code: %d\n", code) dummy = byte2uint64(loq_bytes[n : n+8]) n += 8 stride := int(dummy) fmt.Printf("stride: %d\n", stride) n_offset := (count + stride - 1) / stride offset := make([]int, n_offset) for i := 0; i < n_offset; i++ { dummy = byte2uint64(loq_bytes[n : n+8]) n += 8 offset[i] = int(dummy) } fmt.Printf("offset[%d]:", len(offset)) for i := 0; i < len(offset); i++ { fmt.Printf(" %d", offset[i]) } fmt.Printf("\n") n_step_pos := ((count + stride - 1) / stride) step_pos := make([]int, n_step_pos) for i := 0; i < n_step_pos; i++ { dummy = byte2uint64(loq_bytes[n : n+8]) n += 8 step_pos[i] = int(dummy) } fmt.Printf("step_pos[%d]:", len(step_pos)) for i := 0; i < len(step_pos); i++ { fmt.Printf(" %d", step_pos[i]) } fmt.Printf("\n") n_hom_bytes := (count + 7) / 8 hom_flag := loq_bytes[n : n+n_hom_bytes] n += n_hom_bytes fmt.Printf("homflag[%d]:\n", n_hom_bytes) for i := 0; i < len(hom_flag); i++ { if (i % 16) == 0 { fmt.Printf("\n") } fmt.Printf(" %2x |", hom_flag[i]) } fmt.Printf("\n\n") dummy = byte2uint64(loq_bytes[n : n+8]) n += 8 loq_flag_byte_count := int(dummy) fmt.Printf("loq_flag_byte_count[%d]:\n", loq_flag_byte_count) loq_flag := loq_bytes[n : n+loq_flag_byte_count] n += loq_flag_byte_count for i := 0; i < len(loq_flag); i++ { if (i % 16) == 0 { fmt.Printf("\n") } fmt.Printf(" %2x |", loq_flag[i]) } fmt.Printf("\n") dummy = byte2uint64(loq_bytes[n : n+8]) n += 8 loq_info_byte_count := int(dummy) fmt.Printf("loq_inf_byte_count: %d\n", loq_info_byte_count) for cur_rec := 0; cur_rec < count; cur_rec++ { fmt.Printf(" [%d]\n", cur_rec) nallele := 1 if (hom_flag[cur_rec/8] & (1 << uint(cur_rec%8))) == 0 { nallele = 2 } dummy, dn = dlug.ConvertUint64(loq_bytes[n:]) n += dn ntile0 := int(dummy) ntile1 := 0 if nallele == 2 { dummy, dn = dlug.ConvertUint64(loq_bytes[n:]) n += dn ntile1 = int(dummy) } fmt.Printf(" [ntile0:%d]\n", ntile0) for tileidx := 0; tileidx < ntile0; tileidx++ { dummy, dn = dlug.ConvertUint64(loq_bytes[n:]) n += dn m0 := int(dummy) fmt.Printf(" [%d] {len:%d}", tileidx, m0) for ii := 0; ii < m0; ii += 2 { dummy, dn = dlug.ConvertUint64(loq_bytes[n:]) n += dn delpos := int(dummy) dummy, dn = dlug.ConvertUint64(loq_bytes[n:]) n += dn loqlen := int(dummy) fmt.Printf(" %d+%d", delpos, loqlen) } fmt.Printf("\n") } if ntile1 > 0 { fmt.Printf(" [ntile1:%d]\n", ntile1) for tileidx := 0; tileidx < ntile1; tileidx++ { dummy, dn = dlug.ConvertUint64(loq_bytes[n:]) n += dn m1 := int(dummy) fmt.Printf(" [%d] {len:%d}", tileidx, m1) for ii := 0; ii < m1; ii += 2 { dummy, dn = dlug.ConvertUint64(loq_bytes[n:]) n += dn delpos := int(dummy) dummy, dn = dlug.ConvertUint64(loq_bytes[n:]) n += dn loqlen := int(dummy) fmt.Printf(" %d+%d", delpos, loqlen) } fmt.Printf("\n") } } } return n, nil }
func debug_unpack_bytes(cgf_bytes []byte) error { var s string var dn int buf := make([]byte, 128) _ = buf n := 0 magic := byte2uint64(cgf_bytes[n : n+8]) n += 8 fmt.Printf("Magic: %08x (", magic) for i := 0; i < 8; i++ { b := uint8((magic & (0xff << (8 * uint(i)))) >> (8 * uint(i))) fmt.Printf(" %c", b) } fmt.Printf(" )\n") s, dn = byte2string(cgf_bytes[n:]) n += dn fmt.Printf("Version(%d): %s\n", len(s), s) s, dn = byte2string(cgf_bytes[n:]) n += dn fmt.Printf("LibraryVersion(%d): %s\n", len(s), s) pathcount := byte2uint64(cgf_bytes[n : n+8]) n += 8 fmt.Printf("PathCount: %d\n", pathcount) tmaplen := byte2uint64(cgf_bytes[n : n+8]) n += 8 fmt.Printf("TileMapLen: %d\n", tmaplen) //tmea := unpack_tilemap(cgf_bytes[n:n+int(tmaplen)]) tmea := UnpackTileMap(cgf_bytes[n : n+int(tmaplen)]) n += int(tmaplen) fmt.Printf("TileMap(%d):", len(tmea)) for i := 0; i < len(tmea); i++ { if (i % 16) == 0 { fmt.Printf("\n [%03x]", i) } fmt.Printf(" ") for j := 0; j < len(tmea[i].Variant); j++ { if j > 0 { fmt.Printf(":") } for k := 0; k < len(tmea[i].Variant[j]); k++ { if k > 0 { fmt.Printf(";") } fmt.Printf("%x", tmea[i].Variant[j][k]) if tmea[i].Span[j][k] > 1 { fmt.Printf("+%x", tmea[i].Span[j][k]) } } } } fmt.Printf("\n") StepPerPath := make([]uint64, pathcount) for i := uint64(0); i < pathcount; i++ { StepPerPath[i] = byte2uint64(cgf_bytes[n : n+8]) n += 8 } fmt.Printf("StepPerPath(%d):", pathcount) for i := uint64(0); i < pathcount; i++ { fmt.Printf(" %d", StepPerPath[i]) } fmt.Printf("\n") path_struct_offset := make([]uint64, pathcount+1) for i := uint64(0); i <= pathcount; i++ { path_struct_offset[i] = byte2uint64(cgf_bytes[n : n+8]) n += 8 } fmt.Printf("PathOffset(%d, offset %d):", len(path_struct_offset), n) for i := uint64(0); i <= pathcount; i++ { fmt.Printf(" %d", path_struct_offset[i]) } fmt.Printf("\n") skip_dots := 0 path_bytes := cgf_bytes[n:] for i := 1; i < len(path_struct_offset); i++ { var z uint64 tn := 0 offset := path_struct_offset[i] _ = offset if path_struct_offset[i-1] == path_struct_offset[i] { continue } path_b := path_bytes[path_struct_offset[i-1]:path_struct_offset[i]] s, dn = byte2string(path_b) tn += dn ntile := byte2uint64(path_b[tn : tn+8]) tn += 8 vectorlen := (ntile + 31) / 32 if vectorlen == 0 { if skip_dots%40 == 0 { fmt.Printf("\n") } fmt.Printf(".") skip_dots++ continue } skip_dots = 0 fmt.Printf("\n") fmt.Printf(" [%x] %s\n", i-1, s) fmt.Printf(" NTile: %d\n", ntile) fmt.Printf(" VectorLen: %d", vectorlen) if vectorlen > 0 { for ii := uint64(0); ii < vectorlen; ii++ { if ii%8 == 0 { fmt.Printf("\n [%4x]", ii*8) } b := tn + int(ii*8) e := tn + int(ii*8) + 8 //vec_val := byte2uint64(path_b[tn+int(ii):tn+int(ii)+8]) vec_val := byte2uint64(path_b[b:e]) fmt.Printf(" %8x %8x |", (vec_val >> 32), vec_val&0xffffffff) } fmt.Printf("\n") tn += int(vectorlen) * 8 } else { fmt.Printf("\n") } // Overflow // path_byte_begin := tn z = byte2uint64(path_b[tn : tn+8]) tn += 8 fmt.Printf(" Overflow.Length: %d\n", z) ovf_len := z z = byte2uint64(path_b[tn : tn+8]) tn += 8 fmt.Printf(" Overflow.Stride: %d\n", z) ovf_stride := z z = byte2uint64(path_b[tn : tn+8]) tn += 8 fmt.Printf(" Overflow.MapByteCount: %d\n", z) map_byte_count := z _ = map_byte_count map_byte_n := 0 if ovf_len > 0 { n_pos := (ovf_len + ovf_stride - 1) / ovf_stride n_offset := n_pos //if (ovf_len%ovf_stride)!=0 { n_offset++ } fmt.Printf(" Offset:") for i := uint64(0); i < n_offset; i++ { x := byte2uint64(path_b[tn : tn+8]) fmt.Printf(" [%d,%d]", i, x) tn += 8 } fmt.Printf("\n") fmt.Printf(" Position:") for i := uint64(0); i < n_pos; i++ { x := byte2uint64(path_b[tn : tn+8]) fmt.Printf(" [%d,%d]", i, x) tn += 8 } fmt.Printf("\n") //fmt.Printf(" Map(%d):", int(ovf_len) - (tn-path_byte_begin)) fmt.Printf(" Map(# %d,b %d, e %d):", ovf_len, map_byte_count, (tn - path_byte_begin)) //for uint64(tn - path_byte_begin) < ovf_len { for ele_count := uint64(0); ele_count < ovf_len; ele_count++ { map_ele, dn := dlug.ConvertUint64(path_b[tn:]) //map_ele := byte2uint64(path_b[tn:tn+8]) //dn:=8 tn += dn map_byte_n += dn fmt.Printf(" %x", map_ele) } fmt.Printf("\n") } fmt.Printf("read map bytes: %d\n", map_byte_n) // Final Overflow // z = byte2uint64(path_b[tn : tn+8]) tn += 8 fmt.Printf(" FinalOverflow.Length: %d\n", z) ovf_len = z z = byte2uint64(path_b[tn : tn+8]) tn += 8 fmt.Printf(" FinalOverflow.ByteLength: %d\n", z) //ovf_byte_len := z /* z = byte2uint64(path_b[tn:tn+8]) tn+=8 fmt.Printf(" FinalOverflow.Stride: %d\n", z) ovf_stride = z */ if ovf_len > 0 { code_bytes := make([]byte, ovf_len) for i := uint64(0); i < ovf_len; i++ { code_bytes[i] = path_b[tn] tn++ } var dummy uint64 var dn int for i := 0; i < int(ovf_len); i++ { dummy, dn = dlug.ConvertUint64(path_b[tn:]) tn += dn anchor_step := int(dummy) dummy, dn = dlug.ConvertUint64(path_b[tn:]) tn += dn nallele := int(dummy) fmt.Printf(" %x (%d)\n", anchor_step, nallele) for allele := 0; allele < nallele; allele++ { dummy, dn = dlug.ConvertUint64(path_b[tn:]) tn += dn allele_rec_len := int(dummy) fmt.Printf(" [%d] {%d}", allele, allele_rec_len) for ii := 0; ii < allele_rec_len; ii++ { dummy, dn = dlug.ConvertUint64(path_b[tn:]) tn += dn varid := int(dummy) dummy, dn = dlug.ConvertUint64(path_b[tn:]) tn += dn span := int(dummy) fmt.Printf(" %x+%x", varid, span) } fmt.Printf("\n") } } } /* if ovf_len > 0 { n_pos := (ovf_len + ovf_stride - 1) / ovf_stride n_offset := n_pos if (ovf_len%ovf_stride)!=0 { n_offset++ } fmt.Printf(" FinalOverflow.Offset:") for i:=uint64(0); i<n_offset; i++ { x := byte2uint64(path_b[tn:tn+8]) fmt.Printf(" [%d,%d]", i, x) tn+=8 } fmt.Printf("\n") fmt.Printf(" FinalOverflow.Position:") for i:=uint64(0); i<n_pos; i++ { x := byte2uint64(path_b[tn:tn+8]) fmt.Printf(" [%d,%d]", i, x) tn+=8 } fmt.Printf("\n") code_bytes := make([]byte, ovf_len) for i:=uint64(0); i<ovf_len; i++ { code_bytes[i] = path_b[tn] tn++ } for i:=uint64(0); i<ovf_len; i++ { a0len,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn a1len,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn fmt.Printf(" Code:%d ", code_bytes[i]) fmt.Printf(" A[%d]{", a0len) for ii:=uint64(0); ii<a0len; ii++ { a0var,dn := dlug.ConvertUint64(path_b[tn:]) tn += dn a0span,dn := dlug.ConvertUint64(path_b[tn:]) tn += dn fmt.Printf(" %x+%x", a0var, a0span) } fmt.Printf(" }") fmt.Printf(" B[%d]{", a1len) for ii:=uint64(0); ii<a1len; ii++ { a1var,dn := dlug.ConvertUint64(path_b[tn:]) tn += dn a1span,dn := dlug.ConvertUint64(path_b[tn:]) tn += dn fmt.Printf(" %x+%x", a1var, a1span) } fmt.Printf(" }\n") } } */ /* z = byte2uint64(path_b[tn:tn+8]) tn+=8 fmt.Printf(" LowQualityInfo.Length: %d\n", z) loq_len := z ; _ = loq_len */ var e error dn, e = debug_print_loq_bytes(path_b[tn:]) if e != nil { return e } tn += dn /* z = byte2uint64(path_b[tn:tn+8]) tn+=8 fmt.Printf(" LowQualityInfo.Count: %d\n", z) loq_count := z ; _ = loq_count z = byte2uint64(path_b[tn:tn+8]) tn+=8 fmt.Printf(" LowQualityInfo.Code: %d\n", z) code := z ; _ = code z = byte2uint64(path_b[tn:tn+8]) tn+=8 fmt.Printf(" LowQualityInfo.Stride: %d\n", z) stride := z ; _ = stride if loq_count > 0 { n_loq_offset := (loq_count + stride - 1) / stride fmt.Printf(" LoqQualityInfo.Offset[%d]:", n_loq_offset) for i:=uint64(0); i<n_loq_offset; i++ { z = byte2uint64(path_b[tn:tn+8]) tn+=8 fmt.Printf(" [%d,%d]", i, z) } fmt.Printf("\n") fmt.Printf(" LoqQualityInfo.StepPosition[%d]:", n_loq_offset) for i:=uint64(0); i<n_loq_offset; i++ { z = byte2uint64(path_b[tn:tn+8]) tn+=8 fmt.Printf(" [%d,%d]", i, z) } fmt.Printf("\n") hom_flag_n := loq_count/8 if (loq_count%8) != 0 { hom_flag_n++ } hom_flag := make([]bool, 0, 1024) fmt.Printf(" LoqQualityInfo.HomFlag[%d]:", hom_flag_n) for i:=uint64(0); i<hom_flag_n; i++ { z := path_b[tn] tn++ fmt.Printf(" %02x", z) for ii:=0; ii<8; ii++ { if (z & (1<<uint(ii))) > 0 { hom_flag = append(hom_flag, true) } else { hom_flag = append(hom_flag, false) } } } fmt.Printf("\n") z = byte2uint64(path_b[tn:tn+8]) tn+=8 fmt.Printf(" LowQualityInfo.LoqFlagBytecount: %d\n", z) loq_flag_byte_count := int(z) loqflag := path_b[tn:tn+loq_flag_byte_count] tn+=loq_flag_byte_count for i:=0; i<len(loqflag); i++ { if (i%16)==0 { fmt.Printf("\n") } fmt.Printf(" %2x |", loqflag[i]) } fmt.Printf("\n") //-- z = byte2uint64(path_b[tn:tn+8]) tn+=8 fmt.Printf(" LowQualityInfo.LoqFlagBytecount: %d\n", z) loq_struct_byte_count := int(z) fmt.Printf(" LowQualityInfo.LoqInfoByteCount: %d\n", loq_struct_byte_count) for i:=uint64(0); i<loq_count; i++ { if hom_flag[i] { ntile,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn fmt.Printf(" [%d] Hom[ntile:%d]:", i, ntile) for ii:=uint64(0); ii<ntile; ii++ { entry_len,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn fmt.Printf(" [%d]{", entry_len) for jj:=uint64(0); jj<entry_len; jj++ { delpos,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn loqlen,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn fmt.Printf(" %x+%x", delpos, loqlen) } fmt.Printf(" }\n") } } else { ntilea,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn ntileb,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn fmt.Printf(" [%d] Het[%d,%d]:", i, ntilea,ntileb) for ii:=uint64(0); ii<ntilea; ii++ { entry_len,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn fmt.Printf(" A[%d]{", entry_len) for jj:=uint64(0); jj<entry_len; jj++ { delpos,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn loqlen,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn fmt.Printf(" %x+%x", delpos, loqlen) } fmt.Printf("}") } for ii:=uint64(0); ii<ntileb; ii++ { entry_len,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn fmt.Printf(" B[%d]{", entry_len) for jj:=uint64(0); jj<entry_len; jj++ { delpos,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn loqlen,dn := dlug.ConvertUint64(path_b[tn:]) tn+=dn fmt.Printf(" %x+%x", delpos, loqlen) } fmt.Printf(" }\n") } } } } */ } return nil }
func debug_print_final_overflow_bytes(fovf_bytes []byte) error { var dummy uint64 var dn int n := 0 dummy = byte2uint64(fovf_bytes[n : n+8]) n += 8 datarecordn := int(dummy) dummy = byte2uint64(fovf_bytes[n : n+8]) n += 8 datarecordbytelen := int(dummy) code_bytes := fovf_bytes[n : n+datarecordn] n += datarecordn fmt.Printf("datarecordn: %d\n", datarecordn) fmt.Printf("datarecordbytelen: %d\n", datarecordbytelen) cur_rec := 0 for (cur_rec < datarecordn) && (n < len(fovf_bytes)) { dummy, dn = dlug.ConvertUint64(fovf_bytes[n:]) n += dn anchor_step := int(dummy) dummy, dn = dlug.ConvertUint64(fovf_bytes[n:]) n += dn nallele := int(dummy) fmt.Printf("[%d] %x (nallele %d) code[%d]: %d\n", cur_rec, anchor_step, nallele, cur_rec, code_bytes[cur_rec]) for allele := 0; allele < nallele; allele++ { dummy, dn = dlug.ConvertUint64(fovf_bytes[n:]) n += dn m := int(dummy) fmt.Printf(" [%d] (%d):", allele, m) for i := 0; i < m; i++ { dummy, dn = dlug.ConvertUint64(fovf_bytes[n:]) n += dn varid := int(dummy) dummy, dn = dlug.ConvertUint64(fovf_bytes[n:]) n += dn span := int(dummy) fmt.Printf(" %x+%d", varid, span) } fmt.Printf("\n") } fmt.Printf("\n") cur_rec++ } return nil }
func byte2string(buf []byte) (string, int) { sn, dn := dlug.ConvertUint64(buf) s := string(buf[dn : sn+uint64(dn)]) return s, int(sn) + dn }
// Will overwrite cgf path structure if it exists, create a new path if it doesn't. // It will create a new PathStruct if one doesn't already exist. // //func update_vector_path_simple(ctx *CGFContext, path_idx int, allele_path [][]TileInfo) error { func (ctx *CGFContext) UpdateVectorPathSimple(path_idx int, allele_path [][]TileInfo) error { cgf := ctx.CGF sglf := ctx.SGLF //DEBUG //fmt.Printf("INTERMEDIATE\n") //emit_intermediate(ctx, path_idx, allele_path) //path_bytes,err := emit_path_bytes(ctx, path_idx, allele_path) path_bytes, err := ctx.EmitPathBytes(path_idx, allele_path) _ = path_bytes _ = err if err != nil { return err } g_debug := false if len(cgf.Path) < path_idx { tpath := make([]PathStruct, path_idx-len(cgf.Path)+1) cgf.Path = append(cgf.Path, tpath...) if g_debug { fmt.Printf(">>>>> len cgf.Path %d, path_idx %d\n", len(cgf.Path), path_idx) } } var ok bool var path0 int var path1 int var step_idx0 int var step_idx1 int var step0 int var step1 int // Overflow tier of variants. // These are variants that can't fit // into the packed vector but still // appear in the tile map. // overflow := OverflowStruct{} overflow.Stride = 256 overflow.Offset = make([]uint64, 0, 16) overflow.Position = make([]uint64, 0, 16) overflow.Map = make([]byte, 0, 256) overflow_count := 0 final_overflow := FinalOverflowStruct{} final_overflow.Stride = 256 final_overflow.Offset = make([]uint64, 0, 16) final_overflow.Position = make([]uint64, 0, 16) final_overflow.DataRecord.Code = make([]byte, 0, 256) final_overflow.DataRecord.Data = make([]byte, 0, 2048) final_overflow_count := 0 _ = final_overflow loq_info := LowQualityInfoStruct{} _ = loq_info buf := make([]byte, 1024) //sglf_info0 := SGLFInfo{} //sglf_info1 := SGLFInfo{} sglf_info0 := cglf.SGLFInfo{} sglf_info1 := cglf.SGLFInfo{} //tile_zipper := make([][]SGLFInfo, 2) tile_zipper := make([][]cglf.SGLFInfo, 2) tile_zipper_seq := make([][]string, 2) span_sum := 0 var_a0 := make([]int, 0, 16) var_a1 := make([]int, 0, 16) span_a0 := make([]int, 0, 16) span_a1 := make([]int, 0, 16) ivec := make([]int, 0, 1024*16) anchor_step := 0 update_anchor := true end_check := false loq_tile := false step_idx_info_for_loq := make([][]int, 2) step_idx_info_for_loq[0] = make([]int, 0, 1024) step_idx_info_for_loq[1] = make([]int, 0, 1024) loq_byte_length := uint64(0) loq_bytes := make([]byte, 0, 1024) loq_offset_bytes := make([]byte, 0, 1024) _ = loq_offset_bytes loq_position_bytes := make([]byte, 0, 1024) _ = loq_position_bytes loq_hom_flag_bytes := make([]byte, 0, 1024) _ = loq_hom_flag_bytes // We might need to fill this in after the fact // loq_offset := make([]uint64, 0, 1024) _ = loq_offset loq_position := make([]uint64, 0, 1024) _ = loq_position loq_hom_flag := make([]bool, 0, 1024) loq_count := uint64(0) loq_stride := uint64(256) cur_hexit_count := 0 // First is allele // second is 3 element (varid,span,start,len) // //AlleleNocallInfo := make([][]int, 2) // //for (step_idx0<len(allele_path[0])) && (step_idx1<len(allele_path[1])) { for (step_idx0 < len(allele_path[0])) || (step_idx1 < len(allele_path[1])) { end_check = false if span_sum >= 0 { ti0 := allele_path[0][step_idx0] if len(ti0.NocallStartLen) > 0 { loq_tile = true } step_idx_info_for_loq[0] = append(step_idx_info_for_loq[0], step_idx0) // sglf_info1 only holds a valid path and step // if step_idx0 > 0 { sglf_info0, ok = sglf.PfxTagLookup[ti0.PfxTag] } else { sglf_info0, ok = sglf.SfxTagLookup[ti0.SfxTag] } if !ok { return fmt.Errorf("could not find prefix (%s) in sglf (allele_idx %d, step_idx %d (%x))\n", ti0.PfxTag, 0, step_idx0, step_idx0) } path0 = sglf_info0.Path step0 = sglf_info0.Step if update_anchor { anchor_step = step0 update_anchor = false } // We need to search for the variant in the Lib to find // the rest of the information, including span // var_idx0, e := lookup_variant_index(ti0.Seq, sglf.Lib[path0][step0]) if e != nil { return e } sglf_info0 = sglf.LibInfo[path0][step0][var_idx0] span0 := sglf_info0.Span sglf_info0.Variant = var_idx0 seq0 := sglf.Lib[path0][step0][var_idx0] tile_zipper[0] = append(tile_zipper[0], sglf_info0) tile_zipper_seq[0] = append(tile_zipper_seq[0], seq0) var_a0 = append(var_a0, var_idx0) span_a0 = append(span_a0, span0) allele_path[0][step_idx0].VarId = var_idx0 span_sum -= span0 step_idx0++ } else { ti1 := allele_path[1][step_idx1] if len(ti1.NocallStartLen) > 0 { loq_tile = true } step_idx_info_for_loq[1] = append(step_idx_info_for_loq[1], step_idx1) // sglf_info1 only holds a valid path and step // if step_idx1 > 0 { sglf_info1, ok = sglf.PfxTagLookup[ti1.PfxTag] } else { sglf_info1, ok = sglf.SfxTagLookup[ti1.SfxTag] } if !ok { return fmt.Errorf("could not find prefix (%s) in sglf (allele_idx %d, step_idx %d (%x))\n", ti1.PfxTag, 1, step_idx1, step_idx1) } path1 = sglf_info1.Path step1 = sglf_info1.Step // We need to search for the variant in the Lib to find // the rest of the information, including span // var_idx1, e := lookup_variant_index(ti1.Seq, sglf.Lib[path1][step1]) if e != nil { return e } sglf_info1 = sglf.LibInfo[path1][step1][var_idx1] sglf_info1.Variant = var_idx1 seq1 := sglf.Lib[path1][step1][var_idx1] tile_zipper[1] = append(tile_zipper[1], sglf_info1) tile_zipper_seq[1] = append(tile_zipper_seq[1], seq1) span1 := sglf_info1.Span var_a1 = append(var_a1, var_idx1) span_a1 = append(span_a1, span1) allele_path[1][step_idx1].VarId = var_idx1 span_sum += span1 step_idx1++ } if span_sum == 0 { // ===================== // ===================== // --------------------- // STORE LOQ INFORMATION // if loq_tile { buf := make([]byte, 16) var dn int if (loq_count % loq_stride) == 0 { tobyte64(buf, uint64(len(loq_bytes))) loq_offset_bytes = append(loq_offset_bytes, buf[0:8]...) tobyte64(buf, uint64(anchor_step)) loq_position_bytes = append(loq_position_bytes, buf[0:8]...) } loq_count++ hom_flag := true if len(step_idx_info_for_loq[0]) != len(step_idx_info_for_loq[1]) { hom_flag = false } else { for ii := 0; ii < len(step_idx_info_for_loq[0]); ii++ { step_idx0 := step_idx_info_for_loq[0][ii] step_idx1 := step_idx_info_for_loq[1][ii] step0 := allele_path[0][step_idx0].Step step1 := allele_path[1][step_idx1].Step if step0 != step1 { hom_flag = false break } if len(allele_path[0][step_idx0].NocallStartLen) != len(allele_path[1][step_idx1].NocallStartLen) { hom_flag = false break } for jj := 0; jj < len(allele_path[0][step_idx0].NocallStartLen); jj++ { if allele_path[0][step_idx0].NocallStartLen[jj] != allele_path[1][step_idx1].NocallStartLen[jj] { hom_flag = false break } } } } loq_hom_flag = append(loq_hom_flag, hom_flag) // (NTile) // Number of tile in this record // dn = dlug.FillSliceUint64(buf, uint64(len(step_idx_info_for_loq[0]))) loq_bytes = append(loq_bytes, buf[0:dn]...) if !hom_flag { // (NTile) (B allele) // Number of Allele B tiles in this record // dn = dlug.FillSliceUint64(buf, uint64(len(step_idx_info_for_loq[1]))) loq_bytes = append(loq_bytes, buf[0:dn]...) //fmt.Printf("++ Ballele %d\n", len(step_idx_info_for_loq[1])) } for i := 0; i < len(step_idx_info_for_loq[0]); i++ { step_idx0 := step_idx_info_for_loq[0][i] ti := allele_path[0][step_idx0] // (LoqTile[].Len) // Number of noc entries for this tile // dn = dlug.FillSliceUint64(buf, uint64(len(ti.NocallStartLen)/2)) loq_bytes = append(loq_bytes, buf[0:dn]...) prev_start := 0 for jj := 0; jj < len(ti.NocallStartLen); jj += 2 { // (LoqTile[].LoqEntry[].DelPos) // Start position // dn = dlug.FillSliceUint64(buf, uint64(ti.NocallStartLen[jj]-prev_start)) loq_bytes = append(loq_bytes, buf[0:dn]...) // (LoqTile[].LoqEntry[].LoqLen) // Length of noc // dn = dlug.FillSliceUint64(buf, uint64(ti.NocallStartLen[jj+1])) loq_bytes = append(loq_bytes, buf[0:dn]...) prev_start = ti.NocallStartLen[jj] } } if !hom_flag { for i := 0; i < len(step_idx_info_for_loq[1]); i++ { step_idx1 := step_idx_info_for_loq[1][i] ti := allele_path[1][step_idx1] // (LoqTile[].Len) // Number of noc entries // dn = dlug.FillSliceUint64(buf, uint64(len(ti.NocallStartLen)/2)) loq_bytes = append(loq_bytes, buf[0:dn]...) prev_start := 0 for jj := 0; jj < len(ti.NocallStartLen); jj += 2 { // (LoqTile[].LoqEntry[].DelPos) // Start of nocall // dn = dlug.FillSliceUint64(buf, uint64(ti.NocallStartLen[jj]-prev_start)) loq_bytes = append(loq_bytes, buf[0:dn]...) // (LoqTile[].LoqEntry[].LoqLen) // Noc length // dn = dlug.FillSliceUint64(buf, uint64(ti.NocallStartLen[jj+1])) loq_bytes = append(loq_bytes, buf[0:dn]...) prev_start = ti.NocallStartLen[jj] } } } } // // STORE LOQ INFORMATION // --------------------- // ===================== // ===================== end_check = true tilemap_key := create_tilemap_string_lookup2(var_a0, span_a0, var_a1, span_a1) if g_debug { fmt.Printf(">> (%d,%x) {%v,%v} {%v,%v} %s\n", anchor_step, anchor_step, var_a0, span_a0, var_a1, span_a1, tilemap_key) } n := len(ivec) // Fill in spanning // for ; n <= anchor_step; n++ { ivec = append(ivec, -1) OVERFLOW_INDICATOR_SPAN_VALUE := 1025 if (n % 32) == 0 { cur_hexit_count = 0 } cur_hexit_count++ if cur_hexit_count >= (32 / 4) { PathOverflowAdd(&overflow, overflow_count, anchor_step, OVERFLOW_INDICATOR_SPAN_VALUE) overflow_count++ } } final_overflow_flag := true tilemap_pos := len(ctx.TileMapLookup) if _, ok := ctx.TileMapLookup[tilemap_key]; ok { final_overflow_flag = false tilemap_pos = ctx.TileMapPosition[tilemap_key] } if (!loq_tile) && (tilemap_pos < 13) && (cur_hexit_count < (32 / 4)) { // It's not a low quality tile and it can fit in a hexit // ivec[anchor_step] = tilemap_pos if (anchor_step % 32) == 0 { cur_hexit_count = 0 } if cur_hexit_count >= (32 / 4) { PathOverflowAdd(&overflow, overflow_count, anchor_step, tilemap_pos) overflow_count++ cur_hexit_count++ } } else { // It's overflown. // If the entry doesn't appear in the tile map, the tilemap_pos // will be set to the length of the tilemap (e.g. 1024) as an indicator // that the final overflow table should be consulted for the entry // // overflow // if loq_tile { ivec[anchor_step] = -254 } else { ivec[anchor_step] = 254 } // -------- // OVERFLOW // if (anchor_step % 32) == 0 { cur_hexit_count = 0 } PathOverflowAdd(&overflow, overflow_count, anchor_step, tilemap_pos) overflow_count++ cur_hexit_count++ // // OVERFLOW // -------- } if final_overflow_flag { //final overflow // if loq_tile { ivec[anchor_step] = -255 } else { ivec[anchor_step] = 255 } // -------------- // FINAL OVERFLOW // // We haven't found the TileMap entry, so store the entry // here. // TODO: store raw sequence if it's not found in the Tile Library // p := final_overflow_count if (uint64(p) % overflow.Stride) == 0 { //final_overflow.Offset = append(final_overflow.Offset, uint64(len(final_overflow.Offset))) final_overflow.Offset = append(final_overflow.Offset, uint64(len(final_overflow.DataRecord.Data))) //final_overflow.Position = append(final_overflow.Position, uint64(final_overflow_count)) final_overflow.Position = append(final_overflow.Position, uint64(anchor_step)) } final_overflow.DataRecord.Code = append(final_overflow.DataRecord.Code, uint8(1)) dn := dlug.FillSliceUint64(buf, uint64(len(var_a0))) final_overflow.DataRecord.Data = append(final_overflow.DataRecord.Data, buf[:dn]...) dn = dlug.FillSliceUint64(buf, uint64(len(var_a1))) final_overflow.DataRecord.Data = append(final_overflow.DataRecord.Data, buf[:dn]...) for ii := 0; ii < len(var_a0); ii++ { dn = dlug.FillSliceUint64(buf, uint64(var_a0[ii])) final_overflow.DataRecord.Data = append(final_overflow.DataRecord.Data, buf[:dn]...) dn = dlug.FillSliceUint64(buf, uint64(span_a0[ii])) final_overflow.DataRecord.Data = append(final_overflow.DataRecord.Data, buf[:dn]...) } for ii := 0; ii < len(var_a1); ii++ { dn = dlug.FillSliceUint64(buf, uint64(var_a1[ii])) final_overflow.DataRecord.Data = append(final_overflow.DataRecord.Data, buf[:dn]...) dn = dlug.FillSliceUint64(buf, uint64(span_a1[ii])) final_overflow.DataRecord.Data = append(final_overflow.DataRecord.Data, buf[:dn]...) } final_overflow_count++ // // FINAL OVERFLOW // -------------- } var_a0 = var_a0[0:0] var_a1 = var_a1[0:0] span_a0 = span_a0[0:0] span_a1 = span_a1[0:0] update_anchor = true tile_zipper[0] = tile_zipper[0][0:0] tile_zipper[1] = tile_zipper[1][0:0] tile_zipper_seq[0] = tile_zipper_seq[0][0:0] tile_zipper_seq[1] = tile_zipper_seq[1][0:0] loq_tile = false step_idx_info_for_loq[0] = step_idx_info_for_loq[0][0:0] step_idx_info_for_loq[1] = step_idx_info_for_loq[1][0:0] } } if !end_check { return fmt.Errorf("There are trailing tiles that could not be matched up") } if g_debug { for i := 0; i < len(ivec); i++ { fmt.Printf("[%d] %d\n", i, ivec[i]) } } // Now we know the final size of the overflow structures so // fill in their length // /* overflow.Length = 8 + 8 + uint64(len(overflow.Offset)*8) + uint64(len(overflow.Position)*8) + uint64(len(overflow.Map)) */ overflow.Length = uint64(overflow_count) // Add in final byte position of Map //if int( (uint64(len(overflow.Offset)) + overflow.Stride - 1) / overflow.Stride ) > len(overflow.Offset) { if (overflow.Length % overflow.Stride) != 0 { overflow.Offset = append(overflow.Offset, uint64(len(overflow.Map))) } /* final_overflow.Length = 8 + 8 + uint64(len(final_overflow.Offset)*8) + uint64(len(final_overflow.Position)*8) + uint64(len(final_overflow.DataRecord.Code)) + uint64(len(final_overflow.DataRecord.Data)) */ final_overflow.Length = uint64(final_overflow_count) // Add in final byte position of Map //if int( (uint64(len(final_overflow.Offset)) + final_overflow.Stride - 1) / final_overflow.Stride ) > len(final_overflow.Offset) { if (final_overflow.Length % final_overflow.Stride) != 0 { final_overflow.Offset = append(final_overflow.Offset, uint64(len(final_overflow.DataRecord.Data))) } packed_len := (len(ivec) + 31) / 32 packed_vec := make([]uint64, packed_len) for i := 0; i < (packed_len - 1); i++ { hexit_ovf_count := uint(0) for jj := 0; jj < 32; jj++ { ivec_pos := 32*i + jj if ivec[ivec_pos] == 0 { continue } packed_vec[i] |= (1 << (32 + uint(jj))) // 32/4 hexits available // fill in from right to left // if hexit_ovf_count < (32 / 4) { if ivec[ivec_pos] == -1 { // spanning tile, 0 hexit } else if (ivec[ivec_pos] == 255) || (ivec[ivec_pos] == 254) { // hiq overflow // packed_vec[i] |= (0xf << (4 * hexit_ovf_count)) } else if (ivec[ivec_pos] == -255) || (ivec[ivec_pos] == -254) { // loq overflow // packed_vec[i] |= (0xe << (4 * hexit_ovf_count)) } else if ivec[ivec_pos] == 256 { // complex // packed_vec[i] |= (0xd << (4 * hexit_ovf_count)) } else { // otherwise we can encode the tile lookup in the hexit // //packed_vec[i] |= (uint64(ivec[ivec_pos]&0xff)<<(4*hexit_ovf_count)) packed_vec[i] |= (uint64(ivec[ivec_pos]&0xf) << (4 * hexit_ovf_count)) } } hexit_ovf_count++ } } fin_loq_bytes := make([]byte, 0, 1024) //TODO: // final packed bit vector population if g_debug { for i := 0; i < len(packed_vec); i++ { fmt.Printf("[%d,%x (%d)] |%8x|%8x|\n", 32*i, 32*i, i, packed_vec[i]>>32, packed_vec[i]&0xffffffff) } fmt.Printf("\n\n") fmt.Printf("Overflow.Length: %d (0x%x)\n", overflow.Length, overflow.Length) fmt.Printf("Overflow.Stride: %d\n", overflow.Stride) fmt.Printf("Overflow.Offset:") for ii := 0; ii < len(overflow.Offset); ii++ { fmt.Printf(" [%d] %d\n", ii, overflow.Offset[ii]) } fmt.Printf("\n") fmt.Printf("Overflow.Position:") for ii := 0; ii < len(overflow.Position); ii++ { fmt.Printf(" [%d] %d\n", ii, overflow.Position[ii]) } idx := 0 for b_offset := 0; b_offset < len(overflow.Map); { tm, dn := dlug.ConvertUint64(overflow.Map[b_offset:]) fmt.Printf(" [%d] %d (0x%x)\n", idx, tm, tm) b_offset += dn idx++ } fmt.Printf("\n\n") fmt.Printf("FinalOverflow.Length: %d (0x%x)\n", final_overflow.Length, final_overflow.Length) fmt.Printf("FinalOverflow.Stride: %d\n", final_overflow.Stride) fmt.Printf("FinalOverflow.Offset:\n") for ii := 0; ii < len(final_overflow.Offset); ii++ { fmt.Printf(" [%d] %d\n", ii, final_overflow.Offset[ii]) } fmt.Printf("\n") fmt.Printf("FinalOverflow.Position:\n") for ii := 0; ii < len(final_overflow.Position); ii++ { fmt.Printf(" [%d] %d\n", ii, final_overflow.Position[ii]) } fmt.Printf("\n") fmt.Printf("FinalOverflow.DataRecord:\n") byte_map_offset := 0 for ii := 0; ii < len(final_overflow.DataRecord.Code); ii++ { fmt.Printf(" [%d] Code: %d, Data:", ii, final_overflow.DataRecord.Code[ii]) if final_overflow.DataRecord.Code[ii] == 1 { n0, dn := dlug.ConvertUint64(final_overflow.DataRecord.Data[byte_map_offset:]) byte_map_offset += dn n1, dn := dlug.ConvertUint64(final_overflow.DataRecord.Data[byte_map_offset:]) byte_map_offset += dn fmt.Printf(" (%d,%d)", n0, n1) fmt.Printf(" [") for jj := uint64(0); jj < n0; jj++ { vid, dn := dlug.ConvertUint64(final_overflow.DataRecord.Data[byte_map_offset:]) byte_map_offset += dn span, dn := dlug.ConvertUint64(final_overflow.DataRecord.Data[byte_map_offset:]) byte_map_offset += dn fmt.Printf(" %x+%x", vid, span) } fmt.Printf(" ]") fmt.Printf(" [") for jj := uint64(0); jj < n1; jj++ { vid, dn := dlug.ConvertUint64(final_overflow.DataRecord.Data[byte_map_offset:]) byte_map_offset += dn span, dn := dlug.ConvertUint64(final_overflow.DataRecord.Data[byte_map_offset:]) byte_map_offset += dn fmt.Printf(" %x+%x", vid, span) } fmt.Printf(" ]") fmt.Printf("\n") } else { panic("unsupported") } } // ------------- var byt byte for i := 0; i < len(loq_hom_flag); i++ { if (i > 0) && ((i % 8) == 0) { loq_hom_flag_bytes = append(loq_hom_flag_bytes, byt) byt = 0 } if loq_hom_flag[i] { byt |= (1 << uint8(i%8)) } } if len(loq_hom_flag) > 0 { loq_hom_flag_bytes = append(loq_hom_flag_bytes, byt) } fmt.Printf("# loq_bytes(%d)", len(loq_bytes)) // Create final low quality information // //fin_loq_bytes := make([]byte, 0, 1024) loq_byte_length = 0 loq_byte_length += 8 // length (this field) loq_byte_length += 8 // NRecord loq_byte_length += 8 // code loq_byte_length += 8 // stride loq_byte_length += uint64(len(loq_offset_bytes)) loq_byte_length += uint64(len(loq_position_bytes)) loq_byte_length += uint64(len(loq_hom_flag_bytes)) loq_byte_length += uint64(len(loq_bytes)) tobyte64(buf, loq_byte_length) fin_loq_bytes = append(fin_loq_bytes, buf[0:8]...) tobyte64(buf, uint64(loq_count)) fin_loq_bytes = append(fin_loq_bytes, buf[0:8]...) code := uint64(0) tobyte64(buf, code) fin_loq_bytes = append(fin_loq_bytes, buf[0:8]...) stride := uint64(256) tobyte64(buf, stride) fin_loq_bytes = append(fin_loq_bytes, buf[0:8]...) fin_loq_bytes = append(fin_loq_bytes, loq_offset_bytes...) fin_loq_bytes = append(fin_loq_bytes, loq_position_bytes...) fin_loq_bytes = append(fin_loq_bytes, loq_hom_flag_bytes...) fin_loq_bytes = append(fin_loq_bytes, loq_bytes...) for i := 0; i < len(fin_loq_bytes); i++ { if (i % 16) == 0 { fmt.Printf("\n") } fmt.Printf(" %2x", fin_loq_bytes[i]) } fmt.Printf("\n") print_low_quality_information(fin_loq_bytes) //DEBUG //err := ioutil.WriteFile("./loq_bytes.bin", fin_loq_bytes, 0644) //if err!=nil { panic(err); } } ctx.CGF.Path[path_idx].Name = fmt.Sprintf("%04x", path_idx) ctx.CGF.Path[path_idx].Vector = packed_vec ctx.CGF.Path[path_idx].Overflow = overflow ctx.CGF.Path[path_idx].FinalOverflow = final_overflow ctx.CGF.Path[path_idx].LowQualityBytes = fin_loq_bytes return nil }
func print_low_quality_information(b []byte) { n := 0 loq_len := byte2uint64(b[n : n+8]) n += 8 fmt.Printf("Length: %d (len(b) %d)\n", loq_len, len(b)) loq_rec := byte2uint64(b[n : n+8]) n += 8 fmt.Printf("NRecord: %d\n", loq_rec) code := byte2uint64(b[n : n+8]) n += 8 fmt.Printf("Code: %d\n", code) stride := byte2uint64(b[n : n+8]) n += 8 fmt.Printf("Stride: %d\n", stride) n_ele := uint64((loq_rec + (stride - 1)) / stride) fmt.Printf("Offset:") for i := uint64(0); i < n_ele; i++ { k := byte2uint64(b[n : n+8]) n += 8 fmt.Printf(" %d", k) } fmt.Printf("\n") fmt.Printf("StepPosition:") for i := uint64(0); i < n_ele; i++ { k := byte2uint64(b[n : n+8]) n += 8 fmt.Printf(" %d", k) } fmt.Printf("\n") hom_flag := make([]bool, 0, 8) fmt.Printf("HomFlag:") for i := uint64(0); i < ((loq_rec + 7) / 8); i++ { fmt.Printf(" (%d)(%d):%02x", i*8, i, b[n:n+1]) v := uint8(b[n]) for ii := uint8(0); ii < 8; ii++ { if (v & (1 << ii)) != 0 { hom_flag = append(hom_flag, true) } else { hom_flag = append(hom_flag, false) } } n++ } fmt.Printf("\n") fmt.Printf("LoqInfo[]:\n") dn := 0 var ntile uint64 rec_no := 0 var n_entry, delpos, loqlen uint64 var ntilea, ntileb uint64 for n < len(b) { if hom_flag[rec_no] { ntile, dn = dlug.ConvertUint64(b[n:]) n += dn fmt.Printf(" [%d] (rec_no: %d, (rec_no/8): %d, byte: %d/%d)\n", ntile, rec_no, rec_no/8, n, len(b)) for i := uint64(0); i < ntile; i++ { n_entry, dn = dlug.ConvertUint64(b[n:]) n += dn fmt.Printf(" [%d]", n_entry) for j := uint64(0); j < n_entry; j++ { delpos, dn = dlug.ConvertUint64(b[n:]) n += dn loqlen, dn = dlug.ConvertUint64(b[n:]) n += dn fmt.Printf(" %d+%d", delpos, loqlen) } fmt.Printf("\n") } } else { ntilea, dn = dlug.ConvertUint64(b[n:]) n += dn ntileb, dn = dlug.ConvertUint64(b[n:]) n += dn fmt.Printf(" [%d,%d] (rec_no: %d, (rec_no/8): %d, byte: %d/%d)\n", ntilea, ntileb, rec_no, rec_no/8, n, len(b)) for i := uint64(0); i < ntilea; i++ { n_entry, dn = dlug.ConvertUint64(b[n:]) n += dn fmt.Printf(" A [%d]", n_entry) for j := uint64(0); j < n_entry; j++ { delpos, dn = dlug.ConvertUint64(b[n:]) n += dn loqlen, dn = dlug.ConvertUint64(b[n:]) n += dn fmt.Printf(" %d+%d", delpos, loqlen) } fmt.Printf("\n") } for i := uint64(0); i < ntileb; i++ { n_entry, dn = dlug.ConvertUint64(b[n:]) n += dn fmt.Printf(" B [%d]", n_entry) for j := uint64(0); j < n_entry; j++ { delpos, dn = dlug.ConvertUint64(b[n:]) n += dn loqlen, dn = dlug.ConvertUint64(b[n:]) n += dn fmt.Printf(" %d+%d", delpos, loqlen) } fmt.Printf("\n") } } rec_no++ fmt.Printf("\n") } }
func LookupTileMap(cgf_bytes []byte, path, ver, step int) (int, error) { path_bytes, e := CGFPathBytes(cgf_bytes, path) if e != nil { return -1, e } st := 0 path_name, dn := byte2string(path_bytes[st:]) _ = path_name st += dn vec_len := byte2uint64(path_bytes[st:]) st += 8 vec_entry := step / 32 vecbits := byte2uint64(path_bytes[st+vec_entry*8 : st+(vec_entry+1)*8]) st += int(vec_len) * 8 m := uint8(step % 32) //DEBUG fmt.Printf(">>>>>> step %d (%d)\n", step, m) // Canonical, return 0 // if (vecbits & (1 << (32 + m))) == 0 { return 0, nil } //DEBUG fmt.Printf(">>>>>> vecbits %8x\n", vecbits) // See if the value is stored in the hexit // hexit_shift := uint(0) for i := uint8(0); i < m; i++ { if (vecbits & (1 << (32 + i))) != 0 { hexit_shift++ } } //DEBUG fmt.Printf(">>>>>> hexitpos %d\n", hexit_shift) // Return with the hexit if we can. // loq_flag := false _ = loq_flag if hexit_shift < (32 / 8) { hexit := (vecbits & (0xf << (4 * hexit_shift))) >> (4 * hexit_shift) if hexit == 0 { return -2, nil } if hexit < 0xd { return int(hexit), nil } if hexit == 0xe { loq_flag = true } } // Find it in the Overflow bytes // po := GetPathOverflow(path_bytes) debug_print_path_overflow(&po) start_idx := 0 start_step := po.Position[0] for i := 1; i < len(po.Position); i++ { if step >= int(po.Position[i-1]) { break } start_idx = i start_step = po.Position[i] } fmt.Printf(">>> start_idx %d, start_step %d\n", start_idx, start_step) n_ovf, e := CountOverflow(cgf_bytes, path, int(start_step), step) _ = n_ovf if e != nil { return -1, nil } n_ovf++ fmt.Printf(">> n_ovf %d\n", n_ovf) tme := 0 seen_rec := 0 byte_offset := po.Offset[start_idx] for p := int(byte_offset); (p < len(po.Map)) && (seen_rec < n_ovf); seen_rec++ { u, dn := dlug.ConvertUint64(po.Map[p:]) p += dn fmt.Printf("????? %d\n", u) tme = int(u) } fmt.Printf(">>>> seen_rec %d, n_ovf %d\n", seen_rec, n_ovf) fmt.Printf(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> tme %d\n", tme) return tme, nil return -1, nil }