Esempio n. 1
0
// Seek implements http.File.
func (f *HTTPFile) Seek(offset int64, whence int) (int64, error) {
	if f.closed {
		return 0, os.ErrInvalid
	}

	if offset < 0 {
		return int64(f.off), fmt.Errorf("cannot seek before start of file")
	}

	switch whence {
	case 0:
		noff := int64(f.off) + offset
		if noff > mathutil.MaxInt {
			return int64(f.off), fmt.Errorf("seek target overflows int: %d", noff)
		}

		f.off = mathutil.Min(int(offset), len(f.content))
		if f.off == int(offset) {
			return offset, nil
		}

		return int64(f.off), io.EOF
	case 1:
		noff := int64(f.off) + offset
		if noff > mathutil.MaxInt {
			return int64(f.off), fmt.Errorf("seek target overflows int: %d", noff)
		}

		off := mathutil.Min(f.off+int(offset), len(f.content))
		if off == f.off+int(offset) {
			f.off = off
			return int64(off), nil
		}

		f.off = off
		return int64(off), io.EOF
	case 2:
		noff := int64(f.off) - offset
		if noff < 0 {
			return int64(f.off), fmt.Errorf("cannot seek before start of file")
		}

		f.off = len(f.content) - int(offset)
		return int64(f.off), nil
	default:
		return int64(f.off), fmt.Errorf("seek: invalid whence %d", whence)
	}
}
Esempio n. 2
0
//Pick cells to form distal connections to.
func (tm *TemporalMemory) pickCellsToLearnOn(n int, segment int,
	winnerCells []int, connections *TemporalMemoryConnections) []int {

	candidates := make([]int, len(winnerCells))
	copy(candidates, winnerCells)

	for _, val := range connections.SynapsesForSegment(segment) {
		syn := connections.DataForSynapse(val)
		for idx, val := range candidates {
			if val == syn.SourceCell {
				candidates = append(candidates[:idx], candidates[idx+1:]...)
				break
			}
		}
	}

	//Shuffle candidates
	for i := range candidates {
		j := rand.Intn(i + 1)
		candidates[i], candidates[j] = candidates[j], candidates[i]
	}

	n = mathutil.Min(n, len(candidates))
	return candidates[:n]
}
Esempio n. 3
0
func (f *mem) ReadAt(b []byte, off int64) (n int, err error) {
	avail := f.size - off
	pi := off >> f.pgBits
	po := int(off) & f.pgMask
	rem := len(b)
	if int64(rem) >= avail {
		rem = int(avail)
		err = io.EOF
	}
	var zeroPage *[]byte
	for rem != 0 && avail > 0 {
		pg := f.m[pi]
		if pg == nil {
			if zeroPage == nil {
				zeroPage = buffer.CGet(f.pgSize)
				defer buffer.Put(zeroPage)
			}
			pg = zeroPage
		}
		nc := copy(b[:mathutil.Min(rem, f.pgSize)], (*pg)[po:])
		pi++
		po = 0
		rem -= nc
		n += nc
		b = b[nc:]
	}
	return n, err
}
Esempio n. 4
0
func (f *file) ReadAt(b []byte, off int64) (n int, err error) {
	avail := f.size - off
	pi := off >> f.pgBits
	po := int(off) & f.pgMask
	rem := len(b)
	if int64(rem) >= avail {
		rem = int(avail)
		err = io.EOF
	}
	for rem != 0 && avail > 0 {
		pg := f.m[pi]
		if pg == nil {
			if pg, err = f.page(pi); err != nil {
				return n, err
			}
		}
		nc := copy(b[:mathutil.Min(rem, f.pgSize)], pg[po:])
		pi++
		po = 0
		rem -= nc
		n += nc
		b = b[nc:]
	}
	return n, err
}
Esempio n. 5
0
func (f *bitFiler) ReadAt(b []byte, off int64) (n int, err error) {
	avail := f.size - off
	pgI := off >> bfBits
	pgO := int(off & bfMask)
	rem := len(b)
	if int64(rem) >= avail {
		rem = int(avail)
		err = io.EOF
	}
	for rem != 0 && avail > 0 {
		pg := f.m[pgI]
		if pg == nil {
			pg = &bitPage{}
			pg.pdata = buffer.CGet(bfSize)
			pg.data = *pg.pdata
			if f.parent != nil {
				_, err = f.parent.ReadAt(pg.data, off&^bfMask)
				if err != nil && !fileutil.IsEOF(err) {
					return
				}

				err = nil
			}
			f.m[pgI] = pg
		}
		nc := copy(b[:mathutil.Min(rem, bfSize)], pg.data[pgO:])
		pgI++
		pgO = 0
		rem -= nc
		n += nc
		b = b[nc:]
		off += int64(nc)
	}
	return
}
Esempio n. 6
0
File: etc.go Progetto: pmezard/exp
func (t *treeCache) getTree(db *DB, prefix int, name string, canCreate bool, cacheSize int) (r *lldb.BTree, err error) {
	m := t.get()
	r, ok := m[name]
	if ok {
		return
	}

	root, err := db.root()
	if err != nil {
		return
	}

	val, err := root.get(prefix, name)
	if err != nil {
		return
	}

	switch x := val.(type) {
	case nil:
		if !canCreate {
			return
		}

		var h int64
		r, h, err = lldb.CreateBTree(db.alloc, collate)
		if err != nil {
			return nil, err
		}

		if err = root.set(h, prefix, name); err != nil {
			return nil, err
		}
	case int64:
		if r, err = lldb.OpenBTree(db.alloc, collate, x); err != nil {
			return nil, err
		}
	default:
		return nil, &lldb.ErrINVAL{Src: "corrupted root directory value for", Val: fmt.Sprintf("%q, %q", prefix, name)}
	}

	if len(m) > cacheSize {
		i, j, n := 0, cacheSize/2, mathutil.Min(cacheSize/20, 10)
	loop:
		for k := range m {
			if i++; i >= j {
				delete(m, k)
				if n == 0 {
					break loop
				}

				n--
			}
		}
	}

	m[name] = r
	return
}
Esempio n. 7
0
func (c *cache) get2(n int) (r *node, isZeroed bool) {
	s := *c
	lens := len(s)
	if lens == 0 {
		return &node{b: make([]byte, n, mathutil.Min(2*n, maxBuf))}, true
	}

	i := sort.Search(lens, func(x int) bool { return len(s[x].b) >= n })
	if i == lens {
		i--
		s[i].b, isZeroed = make([]byte, n, mathutil.Min(2*n, maxBuf)), true
	}

	r = s[i]
	r.b = r.b[:n]
	copy(s[i:], s[i+1:])
	s = s[:lens-1]
	*c = s
	return
}
Esempio n. 8
0
File: vm.go Progetto: JamesLinus/pl0
// Execute implements Instruction.
//
// Stack frame before Execute:
//
//	        +----------------+
//	        | Return address |
//	        +----------------+
//	  SP -> |                |
//
//
// Stack frame after Execute:
//
//	        +----------------+
//	        | Return address |
//	        +----------------+
//	        | Previous FP    |
//	        +----------------+
//	        | [LNL]int       | Parent Frame Pointers
//	        +----------------+
//	        | LNL            | Lexical Nesting Level
//	        +----------------+
//	  FP -> | [NVars]int     | Local variables
//	        +----------------+
//	  SP -> |                ! Evaluation stack
func (n *Enter) Execute(m *VM) {
	m.push(m.FP)
	cur := 0
	if n.LNL != 0 {
		cur = m.read(m.FP - 1)
		fp0x := m.FP - 1 - cur
		for i := 0; i < mathutil.Min(cur, n.LNL); i++ {
			m.push(m.read(fp0x))
			fp0x++
		}
	}
	if n.LNL > cur {
		m.push(m.FP)
	}
	m.push(n.LNL)
	m.FP = m.SP()
	m.Stack = append(m.Stack, make([]int, n.NVars)...)
}
Esempio n. 9
0
File: db.go Progetto: cznic/xc
// PutBytes stores b in the DB and returns its id.  Zero length byte slices are
// guaranteed to return zero ID. PutBytes Locks the DB before updating it.
func (d *MemDB) PutBytes(b []byte) int {
	if len(b) == 0 {
		return 0
	}

	if len(b) == 1 {
		return int(b[0]) + 1
	}

	d.mu.Lock() // W+

	id := d.nextID
	pi := id >> dbPageShift
	if pi < len(d.pages) {
		p := d.pages[pi]
		off := id & dbPageMask
		if n := cap(p) - off - maxUvarint; n >= len(b) {
			p = p[:cap(p)]
			l := binary.PutUvarint(p[off:], uint64(len(b)))
			copy(p[off+l:], b)
			n = l + len(b)
			d.pages[pi] = p[:off+n]
			d.nextID += n

			d.mu.Unlock() // W-

			return id + 257
		}

		pi++
	}

	p := make([]byte, mathutil.Max(dbPageSize, maxUvarint+len(b)))
	p = p[:binary.PutUvarint(p, uint64(len(b)))]
	p = append(p, b...)
	d.pages = append(d.pages, p)
	id = pi << dbPageShift
	d.nextID = id + mathutil.Min(dbPageSize, len(p))

	d.mu.Unlock() // W-

	return id + 257
}
Esempio n. 10
0
// Readdir implements http.File.
func (f *HTTPFile) Readdir(count int) ([]os.FileInfo, error) {
	if f.isFile {
		return nil, fmt.Errorf("not a directory: %s", f.name)
	}

	if count <= 0 {
		r := f.dirEntries
		f.dirEntries = f.dirEntries[:0]
		return r, nil
	}

	rq := mathutil.Min(count, len(f.dirEntries))
	r := f.dirEntries[:rq]
	f.dirEntries = f.dirEntries[rq:]
	if len(r) != 0 {
		return r, nil
	}

	return nil, io.EOF
}
Esempio n. 11
0
// ReadAt implements Filer.
func (f *MemFiler) ReadAt(b []byte, off int64) (n int, err error) {
	avail := f.size - off
	pgI := off >> pgBits
	pgO := int(off & pgMask)
	rem := len(b)
	if int64(rem) >= avail {
		rem = int(avail)
		err = io.EOF
	}
	for rem != 0 && avail > 0 {
		pg := f.m[pgI]
		if pg == nil {
			pg = &zeroPage
		}
		nc := copy(b[:mathutil.Min(rem, pgSize)], pg[pgO:])
		pgI++
		pgO = 0
		rem -= nc
		n += nc
		b = b[nc:]
	}
	return
}
Esempio n. 12
0
File: file.go Progetto: pmezard/exp
func (f *File) readAt(b []byte, off int64, bits bool) (n int, err error) {
	var fsize int64
	if !bits {
		fsize, err = f.Size()
		if err != nil {
			return
		}
	}

	avail := fsize - off
	pgI := off >> pgBits
	pgO := int(off & pgMask)
	rem := len(b)
	if !bits && int64(rem) >= avail {
		rem = int(avail)
		err = io.EOF
	}
	for rem != 0 && (bits || avail > 0) {
		v, err := (*Array)(f).Get(pgI)
		if err != nil {
			return n, err
		}

		pg, _ := v.([]byte)
		if len(pg) == 0 {
			pg = zeroPage[:]
		}

		nc := copy(b[:mathutil.Min(rem, pgSize)], pg[pgO:])
		pgI++
		pgO = 0
		rem -= nc
		n += nc
		b = b[nc:]
	}
	return
}
Esempio n. 13
0
func BenchmarkRollbackFiler(b *testing.B) {
	rng := rand.New(rand.NewSource(42))
	type t struct {
		off int64
		b   []byte
	}
	a := []t{}
	for rem := b.N; rem > 0; {
		off := rng.Int63()
		n := mathutil.Min(rng.Intn(1e3)+1, rem)
		a = append(a, t{off, rndBytes(rng, n)})
		rem -= n
	}

	var r *RollbackFiler
	f := NewMemFiler()

	checkpoint := func(sz int64) (err error) {
		return f.Truncate(sz)
	}

	r, err := NewRollbackFiler(f, checkpoint, f)
	if err != nil {
		b.Fatal(err)
	}

	if err := r.BeginUpdate(); err != nil {
		b.Fatal(err)
	}

	b.ResetTimer()
	for _, v := range a {
		if _, err := r.WriteAt(v.b, v.off); err != nil {
			b.Fatal(err)
		}
	}
}
Esempio n. 14
0
func main1(in string) (err error) {
	var out io.Writer
	if nm := *oOut; nm != "" {
		var f *os.File
		var e error
		if f, err = os.Create(nm); err != nil {
			return err
		}

		defer func() {
			if e1 := f.Close(); e1 != nil && err == nil {
				err = e1
			}
		}()
		w := bufio.NewWriter(f)
		defer func() {
			if e1 := w.Flush(); e1 != nil && err == nil {
				err = e1
			}
		}()
		buf := bytes.NewBuffer(nil)
		out = buf
		defer func() {
			var dest []byte
			if dest, e = format.Source(buf.Bytes()); e != nil {
				dest = buf.Bytes()
			}

			if _, e = w.Write(dest); e != nil && err == nil {
				err = e
			}
		}()
	}

	var rep io.Writer
	if nm := *oReport; nm != "" {
		f, err1 := os.Create(nm)
		if err1 != nil {
			return err1
		}

		defer func() {
			if e := f.Close(); e != nil && err == nil {
				err = e
			}
		}()
		w := bufio.NewWriter(f)
		defer func() {
			if e := w.Flush(); e != nil && err == nil {
				err = e
			}
		}()
		rep = w
	}

	var xerrors []byte
	if nm := *oXErrors; nm != "" {
		b, err1 := ioutil.ReadFile(nm)
		if err1 != nil {
			return err1
		}

		xerrors = b
	}

	p, err := y.ProcessFile(token.NewFileSet(), in, &y.Options{
		//NoDefault:   *oNoDefault,
		AllowConflicts: true,
		Closures:       *oClosures,
		LA:             *oLA,
		Reducible:      *oReducible,
		Report:         rep,
		Resolved:       *oResolved,
		XErrorsName:    *oXErrors,
		XErrorsSrc:     xerrors,
	})
	if err != nil {
		return err
	}

	if fn := *oXErrorsGen; fn != "" {
		f, err := os.OpenFile(fn, os.O_RDWR|os.O_CREATE, 0666)
		if err != nil {
			return err
		}

		b := bufio.NewWriter(f)
		if err := p.SkeletonXErrors(b); err != nil {
			return err
		}

		if err := b.Flush(); err != nil {
			return err
		}

		if err := f.Close(); err != nil {
			return err
		}
	}

	msu := make(map[*y.Symbol]int, len(p.Syms)) // sym -> usage
	for nm, sym := range p.Syms {
		if nm == "" || nm == "ε" || nm == "$accept" || nm == "#" {
			continue
		}

		msu[sym] = 0
	}
	var minArg, maxArg int
	for _, state := range p.Table {
		for _, act := range state {
			msu[act.Sym]++
			k, arg := act.Kind()
			if k == 'a' {
				continue
			}

			if k == 'r' {
				arg = -arg
			}
			minArg, maxArg = mathutil.Min(minArg, arg), mathutil.Max(maxArg, arg)
		}
	}
	su := make(symsUsed, 0, len(msu))
	for sym, used := range msu {
		su = append(su, symUsed{sym, used})
	}
	sort.Sort(su)

	// ----------------------------------------------------------- Prologue
	f := strutil.IndentFormatter(out, "\t")
	f.Format("// CAUTION: Generated file - DO NOT EDIT.\n\n")
	f.Format("%s", injectImport(p.Prologue))
	f.Format(`
type %[1]sSymType %i%s%u

type %[1]sXError struct {
	state, xsym int
}
`, *oPref, p.UnionSrc)

	// ---------------------------------------------------------- Constants
	nsyms := map[string]*y.Symbol{}
	a := make([]string, 0, len(msu))
	maxTokName := 0
	for sym := range msu {
		nm := sym.Name
		if nm == "$default" || nm == "$end" || sym.IsTerminal && nm[0] != '\'' && sym.Value > 0 {
			maxTokName = mathutil.Max(maxTokName, len(nm))
			a = append(a, nm)
		}
		nsyms[nm] = sym
	}
	sort.Strings(a)
	f.Format("\nconst (%i\n")
	for _, v := range a {
		nm := v
		switch nm {
		case "error":
			nm = *oPref + "ErrCode"
		case "$default":
			nm = *oPref + "Default"
		case "$end":
			nm = *oPref + "EofCode"
		}
		f.Format("%s%s = %d\n", nm, strings.Repeat(" ", maxTokName-len(nm)+1), nsyms[v].Value)
	}
	minArg-- // eg: [-13, 42], minArg -14 maps -13 to 1 so zero cell values -> empty.
	f.Format("\n%sMaxDepth = 200\n", *oPref)
	f.Format("%sTabOfs   = %d\n", *oPref, minArg)
	f.Format("%u)")

	// ---------------------------------------------------------- Variables
	f.Format("\n\nvar (%i\n")

	// Lex translation table
	f.Format("%sXLAT = map[int]int{%i\n", *oPref)
	xlat := make(map[int]int, len(su))
	var errSym int
	for i, v := range su {
		if v.sym.Name == "error" {
			errSym = i
		}
		xlat[v.sym.Value] = i
		f.Format("%6d: %3d, // %s (%dx)\n", v.sym.Value, i, v.sym.Name, msu[v.sym])
	}
	f.Format("%u}\n")

	// Symbol names
	f.Format("\n%sSymNames = []string{%i\n", *oPref)
	for _, v := range su {
		f.Format("%q,\n", v.sym.Name)
	}
	f.Format("%u}\n")

	// Reduction table
	f.Format("\n%sReductions = []struct{xsym, components int}{%i\n", *oPref)
	for _, rule := range p.Rules {
		f.Format("{%d, %d},\n", xlat[rule.Sym.Value], len(rule.Components))
	}
	f.Format("%u}\n")

	// XError table
	f.Format("\n%[1]sXErrors = map[%[1]sXError]string{%i\n", *oPref)
	for _, xerr := range p.XErrors {
		state := xerr.Stack[len(xerr.Stack)-1]
		xsym := -1
		if xerr.Lookahead != nil {
			xsym = xlat[xerr.Lookahead.Value]
		}
		f.Format("%[1]sXError{%d, %d}: \"%s\",\n", *oPref, state, xsym, xerr.Msg)
	}
	f.Format("%u}\n\n")

	// Parse table
	tbits := 32
	switch n := mathutil.BitLen(maxArg - minArg + 1); {
	case n < 8:
		tbits = 8
	case n < 16:
		tbits = 16
	}
	f.Format("%sParseTab = [%d][]uint%d{%i\n", *oPref, len(p.Table), tbits)
	nCells := 0
	var tabRow sortutil.Uint64Slice
	for si, state := range p.Table {
		tabRow = tabRow[:0]
		max := 0
		for _, act := range state {
			sym := act.Sym
			xsym, ok := xlat[sym.Value]
			if !ok {
				panic("internal error 001")
			}

			max = mathutil.Max(max, xsym)
			kind, arg := act.Kind()
			switch kind {
			case 'a':
				arg = 0
			case 'r':
				arg *= -1
			}
			tabRow = append(tabRow, uint64(xsym)<<32|uint64(arg-minArg))
		}
		nCells += max
		tabRow.Sort()
		col := -1
		if si%5 == 0 {
			f.Format("// %d\n", si)
		}
		f.Format("{")
		for i, v := range tabRow {
			xsym := int(uint32(v >> 32))
			arg := int(uint32(v))
			if col+1 != xsym {
				f.Format("%d: ", xsym)
			}
			switch {
			case i == len(tabRow)-1:
				f.Format("%d", arg)
			default:
				f.Format("%d, ", arg)
			}
			col = xsym
		}
		f.Format("},\n")
	}
	f.Format("%u}\n")
	fmt.Fprintf(os.Stderr, "Parse table entries: %d of %d, x %d bits == %d bytes\n", nCells, len(p.Table)*len(msu), tbits, nCells*tbits/8)
	if n := p.ConflictsSR; n != 0 {
		fmt.Fprintf(os.Stderr, "conflicts: %d shift/reduce\n", n)
	}
	if n := p.ConflictsRR; n != 0 {
		fmt.Fprintf(os.Stderr, "conflicts: %d reduce/reduce\n", n)
	}

	f.Format(`%u)

var %[1]sDebug = 0

type %[1]sLexer interface {
	Lex(lval *%[1]sSymType) int
	Errorf(format string, a ...interface{})
	Errors() []error
}

type %[1]sLexerEx interface {
	%[1]sLexer
	Reduced(rule, state int, lval *%[1]sSymType) bool
}

func %[1]sSymName(c int) (s string) {
	x, ok := %[1]sXLAT[c]
	if ok {
		return %[1]sSymNames[x]
	}

	return __yyfmt__.Sprintf("%%d", c)
}

func %[1]slex1(yylex %[1]sLexer, lval *%[1]sSymType) (n int) {
	n = yylex.Lex(lval)
	if n <= 0 {
		n = %[1]sEofCode
	}
	if %[1]sDebug >= 3 {
		__yyfmt__.Printf("\nlex %%s(%%#x %%d), %[4]s: %[3]s\n", %[1]sSymName(n), n, n, %[4]s)
	}
	return n
}

func %[1]sParse(yylex %[1]sLexer, parser *Parser) int {
	const yyError = %[2]d

	yyEx, _ := yylex.(%[1]sLexerEx)
	var yyn int
	parser.yylval = %[1]sSymType{}
	parser.yyVAL = %[1]sSymType{}
	yyS := parser.cache

	Nerrs := 0   /* number of errors */
	Errflag := 0 /* error recovery flag */
	yyerrok := func() { 
		if %[1]sDebug >= 2 {
			__yyfmt__.Printf("yyerrok()\n")
		}
		Errflag = 0
	}
	_ = yyerrok
	yystate := 0
	yychar := -1
	var yyxchar int
	var yyshift int
	yyp := -1
	goto yystack

ret0:
	return 0

ret1:
	return 1

yystack:
	/* put a state and value onto the stack */
	yyp++
	if yyp >= len(yyS) {
		nyys := make([]%[1]sSymType, len(yyS)*2)
		copy(nyys, yyS)
		yyS = nyys
		parser.cache = yyS
	}
	yyS[yyp] = parser.yyVAL
	yyS[yyp].yys = yystate

yynewstate:
	if yychar < 0 {
		yychar = %[1]slex1(yylex, &parser.yylval)
		var ok bool
		if yyxchar, ok = %[1]sXLAT[yychar]; !ok {
			yyxchar = len(%[1]sSymNames) // > tab width
		}
	}
	if %[1]sDebug >= 4 {
		var a []int
		for _, v := range yyS[:yyp+1] {
			a = append(a, v.yys)
		}
		__yyfmt__.Printf("state stack %%v\n", a)
	}
	row := %[1]sParseTab[yystate]
	yyn = 0
	if yyxchar < len(row) {
		if yyn = int(row[yyxchar]); yyn != 0 {
			yyn += %[1]sTabOfs
		}
	}
	switch {
	case yyn > 0: // shift
		yychar = -1
		parser.yyVAL = parser.yylval
		yystate = yyn
		yyshift = yyn
		if %[1]sDebug >= 2 {
			__yyfmt__.Printf("shift, and goto state %%d\n", yystate)
		}
		if Errflag > 0 {
			Errflag--
		}
		goto yystack
	case yyn < 0: // reduce
	case yystate == 1: // accept
		if %[1]sDebug >= 2 {
			__yyfmt__.Println("accept")
		}
		goto ret0
	}

	if yyn == 0 {
		/* error ... attempt to resume parsing */
		switch Errflag {
		case 0: /* brand new error */
			if %[1]sDebug >= 1 {
				__yyfmt__.Printf("no action for %%s in state %%d\n", %[1]sSymName(yychar), yystate)
			}
			msg, ok := %[1]sXErrors[%[1]sXError{yystate, yyxchar}]
			if !ok {
				msg, ok = %[1]sXErrors[%[1]sXError{yystate, -1}]
			}
			if !ok && yyshift != 0 {
				msg, ok = %[1]sXErrors[%[1]sXError{yyshift, yyxchar}]
			}
			if !ok {
				msg, ok = %[1]sXErrors[%[1]sXError{yyshift, -1}]
			}
			if !ok || msg == "" {
				msg = "syntax error"
			}
			// ignore goyacc error message
			yylex.Errorf("")
			Nerrs++
			fallthrough

		case 1, 2: /* incompletely recovered error ... try again */
			Errflag = 3

			/* find a state where "error" is a legal shift action */
			for yyp >= 0 {
				row := %[1]sParseTab[yyS[yyp].yys]
				if yyError < len(row) {
					yyn = int(row[yyError])+%[1]sTabOfs
					if yyn > 0 { // hit
						if %[1]sDebug >= 2 {
							__yyfmt__.Printf("error recovery found error shift in state %%d\n", yyS[yyp].yys)
						}
						yystate = yyn /* simulate a shift of "error" */
						goto yystack
					}
				}

				/* the current p has no shift on "error", pop stack */
				if %[1]sDebug >= 2 {
					__yyfmt__.Printf("error recovery pops state %%d\n", yyS[yyp].yys)
				}
				yyp--
			}
			/* there is no state on the stack with an error shift ... abort */
			if %[1]sDebug >= 2 {
				__yyfmt__.Printf("error recovery failed\n")
			}
			goto ret1

		case 3: /* no shift yet; clobber input char */
			if %[1]sDebug >= 2 {
				__yyfmt__.Printf("error recovery discards %%s\n", %[1]sSymName(yychar))
			}
			if yychar == %[1]sEofCode {
				goto ret1
			}

			yychar = -1
			goto yynewstate /* try again in the same state */
		}
	}

	r := -yyn
	x0 := %[1]sReductions[r]
	x, n := x0.xsym, x0.components
	yypt := yyp
	_ = yypt // guard against "declared and not used"

	yyp -= n
	if yyp+1 >= len(yyS) {
		nyys := make([]%[1]sSymType, len(yyS)*2)
		copy(nyys, yyS)
		yyS = nyys
		parser.cache = yyS
	}
	parser.yyVAL = yyS[yyp+1]

	/* consult goto table to find next state */
	exState := yystate
	yystate = int(%[1]sParseTab[yyS[yyp].yys][x])+%[1]sTabOfs
	/* reduction by production r */
	if %[1]sDebug >= 2 {
		__yyfmt__.Printf("reduce using rule %%v (%%s), and goto state %%d\n", r, %[1]sSymNames[x], yystate)
	}

	switch r {%i
`,
		*oPref, errSym, *oDlvalf, *oDlval)
	for r, rule := range p.Rules {
		if rule.Action == nil {
			continue
		}

		action := rule.Action.Values
		if len(action) == 0 {
			continue
		}

		if len(action) == 1 {
			part := action[0]
			if part.Type == parser.ActionValueGo {
				src := part.Src
				src = src[1 : len(src)-1] // Remove lead '{' and trail '}'
				if strings.TrimSpace(src) == "" {
					continue
				}
			}
		}

		components := rule.Components
		typ := rule.Sym.Type
		max := len(components)
		if p1 := rule.Parent; p1 != nil {
			max = rule.MaxParentDlr
			components = p1.Components
		}
		f.Format("case %d: ", r)
		for _, part := range action {
			num := part.Num
			switch part.Type {
			case parser.ActionValueGo:
				f.Format("%s", part.Src)
			case parser.ActionValueDlrDlr:
				f.Format("parser.yyVAL.%s", typ)
				if typ == "" {
					panic("internal error 002")
				}
			case parser.ActionValueDlrNum:
				typ := p.Syms[components[num-1]].Type
				if typ == "" {
					panic("internal error 003")
				}
				f.Format("yyS[yypt-%d].%s", max-num, typ)
			case parser.ActionValueDlrTagDlr:
				f.Format("parser.yyVAL.%s", part.Tag)
			case parser.ActionValueDlrTagNum:
				f.Format("yyS[yypt-%d].%s", max-num, part.Tag)
			}
		}
		f.Format("\n")
	}
	f.Format(`%u
	}

	if yyEx != nil && yyEx.Reduced(r, exState, &parser.yyVAL) {
		return -1
	}
	goto yystack /* stack new state and value */
}

%[2]s
`, *oPref, p.Tail)
	_ = oNoLines //TODO Ignored for now
	return nil
}
Esempio n. 15
0
File: file.go Progetto: uabassguy/ql
// []interface{}{qltype, ...}->[]interface{}{lldb scalar type, ...}
// + long blobs are (pre)written to a chain of chunks.
func (s *file) flatten(data []interface{}) (err error) {
	for i, v := range data {
		tag := 0
		var b []byte
		switch x := v.(type) {
		case []byte:
			tag = qBlob
			b = x
		case *big.Int:
			tag = qBigInt
			b, err = s.codec.encode(x)
		case *big.Rat:
			tag = qBigRat
			b, err = s.codec.encode(x)
		case time.Time:
			tag = qTime
			b, err = s.codec.encode(x)
		case time.Duration:
			tag = qDuration
			b, err = s.codec.encode(x)
		default:
			continue
		}
		if err != nil {
			return
		}

		const chunk = 1 << 16
		chunks := 0
		var next int64
		var buf []byte
		for rem := len(b); rem > shortBlob; {
			n := mathutil.Min(rem, chunk)
			part := b[rem-n:]
			b = b[:rem-n]
			rem -= n
			switch next {
			case 0: // last chunk
				buf, err = lldb.EncodeScalars([]interface{}{part}...)
			default: // middle chunk
				buf, err = lldb.EncodeScalars([]interface{}{next, part}...)
			}
			if err != nil {
				return
			}

			h, err := s.a.Alloc(buf)
			if err != nil {
				return err
			}

			next = h
			chunks++
		}

		switch next {
		case 0: // single chunk
			buf, err = lldb.EncodeScalars([]interface{}{tag, b}...)
		default: // multi chunks
			buf, err = lldb.EncodeScalars([]interface{}{tag, next, b}...)
		}
		if err != nil {
			return
		}

		data[i] = buf
	}
	return
}
Esempio n. 16
0
/*
Free up some synapses in this segment. We always free up inactive
synapses (lowest permanence freed up first) before we start to free up
active ones.

param numToFree number of synapses to free up
param inactiveSynapseIndices list of the inactive synapse indices.
*/
func (s *Segment) freeNSynapses(numToFree int, inactiveSynapseIndices []int) {
	//Make sure numToFree isn't larger than the total number of syns we have
	if numToFree > len(s.syns) {
		panic("Number to free cannot be larger than existing synapses.")
	}

	if s.tp.params.Verbosity >= 5 {
		fmt.Println("freeNSynapses with numToFree=", numToFree)
		fmt.Println("inactiveSynapseIndices= ", inactiveSynapseIndices)
	}

	var candidates []int
	// Remove the lowest perm inactive synapses first
	if len(inactiveSynapseIndices) > 0 {
		perms := make([]float64, len(inactiveSynapseIndices))
		for idx, _ := range perms {
			perms[idx] = s.syns[idx].Permanence
		}
		var indexes []int
		floats.Argsort(perms, indexes)
		//sort perms
		cSize := mathutil.Min(numToFree, len(perms))
		candidates = make([]int, cSize)
		//indexes[0:cSize]
		for i := 0; i < cSize; i++ {
			candidates[i] = inactiveSynapseIndices[indexes[i]]
		}
	}

	// Do we need more? if so, remove the lowest perm active synapses too
	var activeSynIndices []int
	if len(candidates) < numToFree {
		for i := 0; i < len(s.syns); i++ {
			if !utils.ContainsInt(i, inactiveSynapseIndices) {
				activeSynIndices = append(activeSynIndices, i)
			}
		}

		perms := make([]float64, len(activeSynIndices))
		for i := range perms {
			perms[i] = s.syns[i].Permanence
		}
		var indexes []int
		floats.Argsort(perms, indexes)

		moreToFree := numToFree - len(candidates)
		//moreCandidates := make([]int, moreToFree)
		for i := 0; i < moreToFree; i++ {
			candidates = append(candidates, activeSynIndices[indexes[i]])
		}
	}

	if s.tp.params.Verbosity >= 4 {
		fmt.Printf("Deleting %v synapses from segment to make room for new ones: %v \n",
			len(candidates), candidates)
		fmt.Println("Before:", s.ToString())
	}

	// Delete candidate syns by copying undeleted to new slice
	var newSyns []Synapse
	for idx, val := range s.syns {
		if !utils.ContainsInt(idx, candidates) {
			newSyns = append(newSyns, val)
		}
	}
	s.syns = newSyns

	if s.tp.params.Verbosity >= 4 {
		fmt.Println("After:", s.ToString())
	}

}
Esempio n. 17
0
func minmax(a, b int) {
	fmt.Println("Min:", mathutil.Min(a, b))
	fmt.Println("Max:", mathutil.Max(a, b))
}
Esempio n. 18
0
func (tp *TrivialPredictor) infer(activeColumns []int) {

	numColsToPredict := int(0.5 + tp.AverageDensity*float64(tp.NumOfCols))

	//for method in self.methods:
	for _, method := range tp.Methods {
		// Copy t-1 into t
		copy(tp.State[method].ActiveStateLast, tp.State[method].ActiveState)
		copy(tp.State[method].PredictedStateLast, tp.State[method].PredictedState)
		copy(tp.State[method].ConfidenceLast, tp.State[method].Confidence)

		utils.FillSliceBool(tp.State[method].ActiveState, false)
		utils.FillSliceBool(tp.State[method].PredictedState, false)
		utils.FillSliceFloat64(tp.State[method].Confidence, 0.0)

		for _, val := range activeColumns {
			tp.State[method].ActiveState[val] = true
		}

		var predictedCols []int

		switch method {
		case Random:
			// Randomly predict N columns
			//predictedCols = RandomInts(numColsToPredict, tp.NumOfCols)
			break
		case Zeroth:
			// Always predict the top N most frequent columns
			var inds []int
			ints.Argsort(tp.ColumnCount, inds)
			predictedCols = inds[len(inds)-numColsToPredict:]
			break
		case Last:
			// Always predict the last input
			for idx, val := range tp.State[method].ActiveState {
				if val {
					predictedCols = append(predictedCols, idx)
				}
			}
			break
		case All:
			// Always predict all columns
			for i := 0; i < tp.NumOfCols; i++ {
				predictedCols = append(predictedCols, i)
			}
			break
		case Lots:
			// Always predict 2 * the top N most frequent columns
			numColsToPredict := mathutil.Min(2*numColsToPredict, tp.NumOfCols)
			var inds []int
			ints.Argsort(tp.ColumnCount, inds)
			predictedCols = inds[len(inds)-numColsToPredict:]

			break
		default:
			panic("prediction method not implemented")
		}

		for _, val := range predictedCols {
			tp.State[method].PredictedState[val] = true
			tp.State[method].Confidence[val] = 1.0
		}

		if tp.Verbosity > 1 {
			fmt.Println("Random prediction:", method)
			fmt.Println(" numColsToPredict:", numColsToPredict)
			fmt.Println(predictedCols)
		}

	}

}
Esempio n. 19
0
//Creates a new spatial pooler
func NewSpatialPooler(spParams SpParams) *SpatialPooler {
	sp := SpatialPooler{}
	//Validate inputs
	sp.numColumns = utils.ProdInt(spParams.ColumnDimensions)
	sp.numInputs = utils.ProdInt(spParams.InputDimensions)

	if sp.numColumns < 1 {
		panic("Must have at least 1 column")
	}
	if sp.numInputs < 1 {
		panic("must have at least 1 input")
	}
	if spParams.NumActiveColumnsPerInhArea < 1 && (spParams.LocalAreaDensity < 1) && (spParams.LocalAreaDensity >= 0.5) {
		panic("Num active colums invalid")
	}

	sp.InputDimensions = spParams.InputDimensions
	sp.ColumnDimensions = spParams.ColumnDimensions
	sp.PotentialRadius = int(mathutil.Min(spParams.PotentialRadius, sp.numInputs))
	sp.PotentialPct = spParams.PotentialPct
	sp.GlobalInhibition = spParams.GlobalInhibition
	sp.LocalAreaDensity = spParams.LocalAreaDensity
	sp.NumActiveColumnsPerInhArea = spParams.NumActiveColumnsPerInhArea
	sp.StimulusThreshold = spParams.StimulusThreshold
	sp.SynPermInactiveDec = spParams.SynPermInactiveDec
	sp.SynPermActiveInc = spParams.SynPermActiveInc
	sp.SynPermBelowStimulusInc = spParams.SynPermConnected / 10.0
	sp.SynPermConnected = spParams.SynPermConnected
	sp.MinPctOverlapDutyCycles = spParams.MinPctOverlapDutyCycle
	sp.MinPctActiveDutyCycles = spParams.MinPctActiveDutyCycle
	sp.DutyCyclePeriod = spParams.DutyCyclePeriod
	sp.MaxBoost = spParams.MaxBoost
	sp.Seed = spParams.Seed
	sp.SpVerbosity = spParams.SpVerbosity

	// Extra parameter settings
	sp.SynPermMin = 0
	sp.SynPermMax = 1
	sp.SynPermTrimThreshold = sp.SynPermActiveInc / 2.0
	if sp.SynPermTrimThreshold >= sp.SynPermConnected {
		panic("Syn perm threshold >= syn connected.")
	}
	sp.UpdatePeriod = 50
	sp.InitConnectedPct = 0.5

	/*
			# Internal state
		    version = 1.0
		    iterationNum = 0
		    iterationLearnNum = 0
	*/

	/*
			 Store the set of all inputs that are within each column's potential pool.
		     'potentialPools' is a matrix, whose rows represent cortical columns, and
		     whose columns represent the input bits. if potentialPools[i][j] == 1,
		     then input bit 'j' is in column 'i's potential pool. A column can only be
		     connected to inputs in its potential pool. The indices refer to a
		     falttenned version of both the inputs and columns. Namely, irrespective
		     of the topology of the inputs and columns, they are treated as being a
		     one dimensional array. Since a column is typically connected to only a
		     subset of the inputs, many of the entries in the matrix are 0. Therefore
		     the the potentialPool matrix is stored using the SparseBinaryMatrix
		     class, to reduce memory footprint and compuation time of algorithms that
		     require iterating over the data strcuture.
	*/
	sp.potentialPools = NewDenseBinaryMatrix(sp.numColumns, sp.numInputs)

	/*
			 Initialize the permanences for each column. Similar to the
		     'potentialPools', the permances are stored in a matrix whose rows
		     represent the cortial columns, and whose columns represent the input
		     bits. if permanences[i][j] = 0.2, then the synapse connecting
		     cortical column 'i' to input bit 'j' has a permanence of 0.2. Here we
		     also use the SparseMatrix class to reduce the memory footprint and
		     computation time of algorithms that require iterating over the data
		     structure. This permanence matrix is only allowed to have non-zero
		     elements where the potential pool is non-zero.
	*/
	//Assumes 70% sparsity
	elms := make(map[int]float64, int(float64(sp.numColumns*sp.numInputs)*0.3))
	sp.permanences = matrix.MakeSparseMatrix(elms, sp.numColumns, sp.numInputs)

	/*
			 Initialize a tiny random tie breaker. This is used to determine winning
		     columns where the overlaps are identical.
	*/

	sp.tieBreaker = make([]float64, sp.numColumns)
	for i := 0; i < len(sp.tieBreaker); i++ {
		sp.tieBreaker[i] = 0.01 * rand.Float64()
	}

	/*
			 'connectedSynapses' is a similar matrix to 'permanences'
		     (rows represent cortial columns, columns represent input bits) whose
		     entries represent whether the cortial column is connected to the input
		     bit, i.e. its permanence value is greater than 'synPermConnected'. While
		     this information is readily available from the 'permanence' matrix,
		     it is stored separately for efficiency purposes.
	*/
	sp.connectedSynapses = NewDenseBinaryMatrix(sp.numColumns, sp.numInputs)

	/*
			 Stores the number of connected synapses for each column. This is simply
		     a sum of each row of 'ConnectedSynapses'. again, while this
		     information is readily available from 'ConnectedSynapses', it is
		     stored separately for efficiency purposes.
	*/
	sp.connectedCounts = make([]int, sp.numColumns)

	/*
			 Initialize the set of permanence values for each columns. Ensure that
		     each column is connected to enough input bits to allow it to be
		     activated
	*/

	for i := 0; i < sp.numColumns; i++ {
		potential := sp.mapPotential(i, true)
		sp.potentialPools.ReplaceRow(i, potential)
		perm := sp.initPermanence(potential, sp.InitConnectedPct)
		sp.updatePermanencesForColumn(perm, i, true)
	}

	sp.overlapDutyCycles = make([]float64, sp.numColumns)
	sp.activeDutyCycles = make([]float64, sp.numColumns)
	sp.minOverlapDutyCycles = make([]float64, sp.numColumns)
	sp.minActiveDutyCycles = make([]float64, sp.numColumns)
	sp.boostFactors = make([]float64, sp.numColumns)
	for i := 0; i < len(sp.boostFactors); i++ {
		sp.boostFactors[i] = 1.0
	}

	/*
			The inhibition radius determines the size of a column's local
		    neighborhood. of a column. A cortical column must overcome the overlap
		    score of columns in his neighborhood in order to become actives. This
		    radius is updated every learning round. It grows and shrinks with the
		    average number of connected synapses per column.
	*/
	sp.inhibitionRadius = 0
	sp.updateInhibitionRadius(sp.avgConnectedSpanForColumnND, sp.avgColumnsPerInput)

	if sp.spVerbosity > 0 {
		sp.printParameters()
	}

	return &sp
}
Esempio n. 20
0
func TestMarshal(t *testing.T) {
	now := time.Now()
	dur := time.Millisecond
	schema8 := testSchema8{
		A: true,
		//B: 1,
		C: 2,
		D: 3,
		E: 4,
		F: 5,
		//G: 6,
		H: 7,
		I: 8,
		J: 9,
		K: 10,
		L: 11,
		M: 12,
		N: -1,
		O: -2,
		P: []byte("abc"),
		Q: *big.NewInt(1),
		R: *big.NewRat(3, 2),
		S: "string",
		T: now,
		U: dur,
	}
	schema8.PA = &schema8.A
	//schema8.PB = &schema8.B
	schema8.PC = &schema8.C
	schema8.PD = &schema8.D
	schema8.PE = &schema8.E
	schema8.PF = &schema8.F
	//schema8.PG = &schema8.G
	schema8.PH = &schema8.H
	schema8.PI = &schema8.I
	schema8.PJ = &schema8.J
	schema8.PK = &schema8.K
	schema8.PL = &schema8.L
	schema8.PM = &schema8.M
	schema8.PN = &schema8.N
	schema8.PO = &schema8.O
	schema8.PP = &schema8.P
	schema8.PQ = &schema8.Q
	schema8.PR = &schema8.R
	schema8.PS = &schema8.S
	schema8.PT = &schema8.T
	schema8.PU = &schema8.U

	type u int
	tab := []struct {
		inst interface{}
		err  bool
		r    []interface{}
	}{
		{42, true, nil},
		{new(u), true, nil},
		{testSchema8{}, false, []interface{}{
			false,
			//int64(0),
			int8(0),
			int16(0),
			int32(0),
			int64(0),
			//uint64(0),
			uint8(0),
			uint16(0),
			uint32(0),
			uint64(0),
			float32(0),
			float64(0),
			complex64(0),
			complex128(0),
			[]byte(nil),
			big.Int{},
			big.Rat{},
			"",
			time.Time{},
			time.Duration(0),
			nil,
			//nil,
			nil,
			nil,
			nil,
			nil,
			//nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
		}},
		{&testSchema8{}, false, []interface{}{
			false,
			//int64(0),
			int8(0),
			int16(0),
			int32(0),
			int64(0),
			//uint64(0),
			uint8(0),
			uint16(0),
			uint32(0),
			uint64(0),
			float32(0),
			float64(0),
			complex64(0),
			complex128(0),
			[]byte(nil),
			big.Int{},
			big.Rat{},
			"",
			time.Time{},
			time.Duration(0),
			nil,
			//nil,
			nil,
			nil,
			nil,
			nil,
			//nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
			nil,
		}},
		{schema8, false, []interface{}{
			true,
			//int64(1),
			int8(2),
			int16(3),
			int32(4),
			int64(5),
			//uint64(6),
			uint8(7),
			uint16(8),
			uint32(9),
			uint64(10),
			float32(11),
			float64(12),
			complex64(-1),
			complex128(-2),
			[]byte("abc"),
			*big.NewInt(1),
			*big.NewRat(3, 2),
			"string",
			now,
			dur,
			true,
			//int64(1),
			int8(2),
			int16(3),
			int32(4),
			int64(5),
			//uint64(6),
			uint8(7),
			uint16(8),
			uint32(9),
			uint64(10),
			float32(11),
			float64(12),
			complex64(-1),
			complex128(-2),
			[]byte("abc"),
			*big.NewInt(1),
			*big.NewRat(3, 2),
			"string",
			now,
			dur,
		}},
		{&schema8, false, []interface{}{
			true,
			//int64(1),
			int8(2),
			int16(3),
			int32(4),
			int64(5),
			//uint64(6),
			uint8(7),
			uint16(8),
			uint32(9),
			uint64(10),
			float32(11),
			float64(12),
			complex64(-1),
			complex128(-2),
			[]byte("abc"),
			*big.NewInt(1),
			*big.NewRat(3, 2),
			"string",
			now,
			dur,
			true,
			//int64(1),
			int8(2),
			int16(3),
			int32(4),
			int64(5),
			//uint64(6),
			uint8(7),
			uint16(8),
			uint32(9),
			uint64(10),
			float32(11),
			float64(12),
			complex64(-1),
			complex128(-2),
			[]byte("abc"),
			*big.NewInt(1),
			*big.NewRat(3, 2),
			"string",
			now,
			dur,
		}},
	}
	for iTest, test := range tab {
		r, err := Marshal(test.inst)
		if g, e := err != nil, test.err; g != e {
			t.Fatal(iTest, g, e)
		}

		if err != nil {
			t.Log(err)
			continue
		}

		for i := 0; i < mathutil.Min(len(r), len(test.r)); i++ {
			g, e := r[i], test.r[i]
			use(e)
			switch x := g.(type) {
			case bool:
				switch y := e.(type) {
				case bool:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case int:
				switch y := e.(type) {
				case int64:
					if int64(x) != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case int8:
				switch y := e.(type) {
				case int8:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case int16:
				switch y := e.(type) {
				case int16:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case int32:
				switch y := e.(type) {
				case int32:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case int64:
				switch y := e.(type) {
				case int64:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case uint:
				switch y := e.(type) {
				case uint64:
					if uint64(x) != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case uint8:
				switch y := e.(type) {
				case uint8:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case uint16:
				switch y := e.(type) {
				case uint16:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case uint32:
				switch y := e.(type) {
				case uint32:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case uint64:
				switch y := e.(type) {
				case uint64:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case float32:
				switch y := e.(type) {
				case float32:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case float64:
				switch y := e.(type) {
				case float64:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case complex64:
				switch y := e.(type) {
				case complex64:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case complex128:
				switch y := e.(type) {
				case complex128:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case []byte:
				switch y := e.(type) {
				case []byte:
					if bytes.Compare(x, y) != 0 {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case big.Int:
				switch y := e.(type) {
				case big.Int:
					if x.Cmp(&y) != 0 {
						t.Fatal(iTest, &x, &y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case big.Rat:
				switch y := e.(type) {
				case big.Rat:
					if x.Cmp(&y) != 0 {
						t.Fatal(iTest, &x, &y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case string:
				switch y := e.(type) {
				case string:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case time.Time:
				switch y := e.(type) {
				case time.Time:
					if !x.Equal(y) {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case time.Duration:
				switch y := e.(type) {
				case time.Duration:
					if x != y {
						t.Fatal(iTest, x, y)
					}
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			case nil:
				switch y := e.(type) {
				case nil:
					// ok
				default:
					t.Fatalf("%d: %T <-> %T", iTest, x, y)
				}
			default:
				panic(fmt.Errorf("%T", x))
			}
		}

		if g, e := len(r), len(test.r); g != e {
			t.Fatal(iTest, g, e)
		}

	}
}