コード例 #1
0
ファイル: earley_test.go プロジェクト: dtromb/parser
func TestEarleyDFAEpsilons(t *testing.T) {
	// Build a simple BNF grammar description grammar.
	gb := parser.OpenGrammarBuilder()

	gb.Name("a4").
		Terminals("a").
		Nonterminals("S", "A", "E").
		Rule().Lhs("`*").Rhs("S", "`.").
		Rule().Lhs("S").Rhs("A", "A", "A", "A").
		Rule().Lhs("A").Rhs("a").
		Rule().Lhs("A").Rhs("E").
		Rule().Lhs("E").Rhs("`e")

	g, err := gb.Build()
	if err != nil {
		t.Error(err)
	}
	grammar := parser.GetIndexedGrammar(g)

	dfa, err := BuildEpsilonLR0Dfa(grammar)
	if err != nil {
		t.Error()
		return
	}

	fmt.Printf("DFA has %d states\n", len(dfa.states))
	for i := 0; i < len(dfa.states); i++ {
		fmt.Println(dfa.states[i].String())
	}
}
コード例 #2
0
ファイル: earley_test.go プロジェクト: dtromb/parser
func NewSimpleBnfLexer(g parser.Grammar) (*SimpleBnfLexer, error) {
	ig := parser.GetIndexedGrammar(g)
	idx, err := ig.GetIndex(index.NAME_INDEX)
	if err != nil {
		return nil, err
	}
	nidx := idx.(*index.NameIndex)
	identifier := nidx.Terminal("IDENTIFIER")
	if identifier == nil {
		return nil, errors.New("bnf grammar missing IDENTIFIER terminal")
	}
	nonterm := nidx.Terminal("NONTERM")
	if nonterm == nil {
		return nil, errors.New("bnf grammar missing NONTERM terminal")
	}
	coleq := nidx.Terminal("COLEQ")
	if coleq == nil {
		return nil, errors.New("bnf grammar missing COLEQ terminal")
	}
	pipe := nidx.Terminal("PIPE")
	if pipe == nil {
		return nil, errors.New("bnf grammar missing PIPE terminal")
	}
	return &SimpleBnfLexer{
		grammar:    g,
		identifier: identifier.(parser.GrammarTerminal),
		nonterm:    nonterm.(parser.GrammarTerminal),
		coleq:      coleq.(parser.GrammarTerminal),
		pipe:       pipe.(parser.GrammarTerminal),
	}, nil
}
コード例 #3
0
ファイル: earley_test.go プロジェクト: dtromb/parser
func TestEarleyDFA(t *testing.T) {
	gb := parser.OpenGrammarBuilder()

	gb.Terminals("NONTERM", "COLEQ", "PIPE", "IDENTIFIER").
		Nonterminals("bnf", "ntdecl", "def", "ntort").
		Rule().Lhs("bnf").Rhs("ntdecl").
		Rule().Lhs("bnf").Rhs("ntdecl", "bnf").
		Rule().Lhs("ntdecl").Rhs("NONTERM", "COLEQ", "def").
		Rule().Lhs("ntdecl").Rhs("ntdecl", "PIPE", "def").
		Rule().Lhs("def").Rhs("ntort").
		Rule().Lhs("def").Rhs("ntort", "def").
		Rule().Lhs("ntort").Rhs("IDENTIFIER").
		Rule().Lhs("ntort").Rhs("NONTERM").
		Rule().Lhs("`*").Rhs("bnf", "`.").
		Name("simple-bnf")

	g, err := gb.Build()
	if err != nil {
		t.Error(err)
	}

	grammar := parser.GetIndexedGrammar(g)
	dfa, err := BuildEpsilonLR0Dfa(grammar)
	if err != nil {
		t.Error()
		return
	}

	fmt.Printf("DFA has %d states\n", len(dfa.states))
	for i := 0; i < len(dfa.states); i++ {
		fmt.Println(dfa.states[i].String())
	}
}
コード例 #4
0
ファイル: lr0.go プロジェクト: dtromb/parser
func BuildDfa(g parser.Grammar, passEpsilons bool) (*LR0Dfa, error) {

	nextId := 1

	ig := parser.GetIndexedGrammar(g)
	idx, err := ig.GetIndex(index.GRAMMAR_CLASS_INDEX)
	if err != nil {
		return nil, err
	}
	gcidx := idx.(*index.GrammarClassIndex)
	if !gcidx.Class().ContextFree() {
		return nil, errors.New("cannot create lr0 dfa for a non-context-free grammar")
	}
	idx, err = ig.GetIndex(index.BASIC_INDEX)
	if err != nil {
		return nil, err
	}
	bidx := idx.(*index.BasicGrammarIndex)

	canon := tree.NewTree()

	start := bidx.LhsStart(ig.Asterisk(), 0)
	initState := &LR0State{id: 0, items: tree.NewTree(), transitions: make(map[parser.GrammarParticle]*LR0State)}
	initState.items.Insert(InitialLR0Item(start))
	CloseLR0State(initState, ig, passEpsilons)
	canon.Insert(initState)
	newStates := tree.NewTree()
	newStates.Insert(initState)
	for newStates.Size() > 0 {
		ns := newStates.First().Next().(*LR0State)
		newStates.Delete(ns)
		if MakeTransitions(ns, ig) != nil {
			return nil, err
		}
		for part, next := range ns.transitions {
			CloseLR0State(next, ig, passEpsilons)
			if cn, has := canon.Lookup(c.LTE, next); has {
				next = cn.(*LR0State)
			} else {
				next.id = nextId
				nextId++
				canon.Insert(next)
				newStates.Insert(next)
			}
			ns.transitions[part] = next
		}
	}

	dfa := &LR0Dfa{}
	dfa.states = make([]*LR0State, nextId)
	for x := canon.First(); x.HasNext(); {
		sx := x.Next().(*LR0State)
		dfa.states[sx.id] = sx
	}
	return dfa, nil
}
コード例 #5
0
ファイル: nnf.go プロジェクト: dtromb/parser
func IsNihilistic(gp parser.GrammarParticle) (bool, error) {
	if gp.Epsilon() {
		return true, nil
	}
	idxg := parser.GetIndexedGrammar(gp.Grammar())
	idx, err := idxg.GetIndex(index.FIRST_FOLLOW_INDEX)
	if err != nil {
		return false, err
	}
	ffidx := idx.(*index.FFIndex)
	return ffidx.NumIns(gp) == 0, nil
}
コード例 #6
0
ファイル: nnf.go プロジェクト: dtromb/parser
func IsNihilisticNormalForm(g parser.Grammar) (bool, error) {
	idxg := parser.GetIndexedGrammar(g)
	idx, err := idxg.GetIndex(index.BASIC_INDEX)
	if err != nil {
		return false, err
	}
	bidx := idx.(*index.BasicGrammarIndex)
	for _, nt := range idxg.Nonterminals() {
		n, err := IsNihilistic(nt)
		if err != nil {
			return false, err
		}
		if bidx.Epsilon(nt) && !n {
			return false, nil
		}
	}
	return true, nil
}
コード例 #7
0
ファイル: lr0_test.go プロジェクト: dtromb/parser
func TestGrammarIndex(t *testing.T) {
	// Build a simple BNF grammar description grammar.
	gb := parser.OpenGrammarBuilder()

	gb.Terminals("NONTERM", "COLEQ", "PIPE", "IDENTIFIER").
		Nonterminals("bnf", "ntdecl", "def", "ntort").
		Rule().Lhs("bnf").Rhs("ntdecl").
		Rule().Lhs("bnf").Rhs("ntdecl", "bnf").
		Rule().Lhs("ntdecl").Rhs("NONTERM", "COLEQ", "def").
		Rule().Lhs("ntdecl").Rhs("ntdecl", "PIPE", "def").
		Rule().Lhs("def").Rhs("ntort").
		Rule().Lhs("def").Rhs("ntort", "def").
		Rule().Lhs("ntort").Rhs("IDENTIFIER").
		Rule().Lhs("ntort").Rhs("NONTERM").
		Rule().Lhs("`*").Rhs("bnf", "`.").
		Name("simple-bnf")

	g, err := gb.Build()
	if err != nil {
		t.Error(err)
	}
	grammar := parser.GetIndexedGrammar(g)
	fmt.Printf("`*: 0x%8.8X\n", grammar.Asterisk())
	dfa, err := BuildDfa(grammar, false)

	if err != nil {
		t.Error(err)
		return
	}
	fmt.Printf("DFA has %d states\n", len(dfa.states))

	for i := 0; i < len(dfa.states); i++ {
		fmt.Println(dfa.states[i])
	}
	fmt.Println("Done.")
}
コード例 #8
0
ファイル: lexl.go プロジェクト: dtromb/parser
func (dfa *stdDfa) GenerateLexer(grammar parser.Grammar) (parser.Lexer, error) {
	g := parser.GetIndexedGrammar(grammar)
	termIndexIf, err := g.GetIndex(parser.GrammarIndexTypeTerm)
	if err != nil {
		return nil, err
	}
	termIndex := termIndexIf.(parser.TermGrammarIndex)
	for _, tn := range termIndex.GetTerminalNames() {
		fmt.Println(" * " + tn)
	}
	lexer := &stdDfaLexer{
		grammar:   grammar,
		dfa:       dfa,
		terminals: make([]parser.Term, len(dfa.terminals)),
	}
	for i, termName := range dfa.terminals {
		term, err := termIndex.GetTerminal(termName)
		if err != nil {
			return nil, err
		}
		lexer.terminals[i] = term
	}
	return lexer, nil
}
コード例 #9
0
ファイル: ffset.go プロジェクト: dtromb/parser
func (ff *FFIndex) Initialize(g parser.Grammar) error {
	ff.grammar = g
	index := parser.GetIndexedGrammar(g)
	idx, err := index.GetIndex(GRAMMAR_CLASS_INDEX)
	if err != nil {
		return err
	}
	cidx := idx.(*GrammarClassIndex)
	if cidx.Class() >= parser.CONTEXT_SENSITIVE {
		return errors.New("cannot first/follow index a non-context-free grammar")
	}
	idx, err = index.GetIndex(BASIC_INDEX)
	bidx := idx.(*BasicGrammarIndex)
	if err != nil {
		return err
	}

	// FIRST set calculation
	ff.firstSets = make(map[parser.GrammarParticle][]parser.GrammarParticle)
	for _, nt := range index.Nonterminals() {
		fs := tree.NewTree()
		ntseen := tree.NewTree()
		ntpending := []parser.GrammarParticle{nt}
		for len(ntpending) > 0 {
			cnt := ntpending[0]
			ntpending = ntpending[1:]
			for i := 0; i < bidx.NumLhsStarts(cnt); i++ {
				p := bidx.LhsStart(cnt, i)
				for j := 0; j < p.RhsLen(); j++ {
					rt := p.Rhs(j)
					if rt.Terminal() {
						fs.Insert(rt)
						break
					} else if rt.Nonterminal() {
						if _, has := ntseen.Lookup(c.LTE, rt); !has {
							ntseen.Insert(rt)
							fs.Insert(rt)
							ntpending = append(ntpending, rt)
						}
						if !bidx.Epsilon(rt) {
							break
						}
					} else {
						break
					}
				}
			}
		}
		ff.firstSets[nt] = make([]parser.GrammarParticle, 0, fs.Size())
		for c := fs.First(); c.HasNext(); {
			ff.firstSets[nt] = append(ff.firstSets[nt], c.Next().(parser.GrammarParticle))
		}
	}

	// LAST set calculation
	ff.lastSets = make(map[parser.GrammarParticle][]parser.GrammarParticle)
	for _, nt := range index.Nonterminals() {
		fs := tree.NewTree()
		ntseen := tree.NewTree()
		ntpending := []parser.GrammarParticle{nt}
		for len(ntpending) > 0 {
			cnt := ntpending[0]
			ntpending = ntpending[1:]
			for i := 0; i < bidx.NumLhsStarts(cnt); i++ {
				p := bidx.LhsStart(cnt, i)
				for j := p.RhsLen() - 1; j >= 0; j-- {
					rt := p.Rhs(j)
					if rt.Terminal() {
						fs.Insert(rt)
						break
					}
					if rt.Nonterminal() {
						if _, has := ntseen.Lookup(c.LTE, rt); !has {
							ntseen.Insert(rt)
							fs.Insert(rt)
							ntpending = append(ntpending, rt)
							if !bidx.Epsilon(rt) {
								break
							}
						}
					}
				}
			}
		}
		ff.lastSets[nt] = make([]parser.GrammarParticle, 0, fs.Size())
		for c := fs.First(); c.HasNext(); {
			ff.lastSets[nt] = append(ff.lastSets[nt], c.Next().(parser.GrammarParticle))
		}
	}

	// IN set calculation
	ff.inSets = make(map[parser.GrammarParticle][]parser.GrammarParticle)
	for _, nt := range index.Nonterminals() {
		fs := tree.NewTree()
		ntseen := tree.NewTree()
		ntpending := []parser.GrammarParticle{nt}
		for len(ntpending) > 0 {
			cnt := ntpending[0]
			ntpending = ntpending[1:]
			for i := 0; i < bidx.NumLhsStarts(cnt); i++ {
				p := bidx.LhsStart(cnt, i)
				for j := p.RhsLen() - 1; j >= 0; j-- {
					rt := p.Rhs(j)
					if rt.Terminal() {
						fs.Insert(rt)
					}
					if rt.Nonterminal() {
						if _, has := ntseen.Lookup(c.LTE, rt); !has {
							ntseen.Insert(rt)
							fs.Insert(rt)
							ntpending = append(ntpending, rt)
						}
					}
				}
			}
		}
		ff.inSets[nt] = make([]parser.GrammarParticle, 0, fs.Size())
		for c := fs.First(); c.HasNext(); {
			ff.inSets[nt] = append(ff.inSets[nt], c.Next().(parser.GrammarParticle))
		}
	}

	// FOLLOW set calculation
	followRefs := make(map[parser.GrammarParticle]tree.Tree)
	followSets := make(map[parser.GrammarParticle]tree.Tree)
	for _, p := range g.Productions() { // First-pass.
		for i := 0; i < p.RhsLen()-1; i++ {
			for j := i + 1; j < p.RhsLen(); j++ {
				if _, has := followSets[p.Rhs(i)]; !has {
					followSets[p.Rhs(i)] = tree.NewTree()
				}
				followSets[p.Rhs(i)].Insert(p.Rhs(j))
				if !bidx.Epsilon(p.Rhs(j)) {
					break
				}
			}
		}
		tp := p.Rhs(p.RhsLen() - 1)
		if _, has := followRefs[tp]; !has {
			followRefs[tp] = tree.NewTree()
		}
		followRefs[tp].Insert(p.Lhs(0))
	}
	var changed bool = true
	for changed { // Take closure.
		changed = false
		for p, prt := range followRefs {
			for cr := prt.First(); cr.HasNext(); {
				fp := cr.Next().(parser.GrammarParticle) // x in Follow(fp) -> x in Follow(p)
				if fromSet, has := followSets[fp]; has {
					if _, has := followSets[p]; !has {
						followSets[p] = tree.NewTree()
					}
					for k := fromSet.First(); k.HasNext(); {
						x := k.Next().(parser.GrammarParticle)
						if _, has := followSets[p].Lookup(c.LTE, x); !has {
							changed = true
							followSets[p].Insert(x)
						}
					}
				}
			}
		}
	}
	ff.followSets = make(map[parser.GrammarParticle][]parser.GrammarParticle)
	for r, v := range followSets { // Collect results.
		ff.followSets[r] = make([]parser.GrammarParticle, 0, v.Size())
		for c := v.First(); c.HasNext(); {
			ff.followSets[r] = append(ff.followSets[r], c.Next().(parser.GrammarParticle))
		}
	}

	return nil
}
コード例 #10
0
ファイル: earley.go プロジェクト: dtromb/parser
func BuildEpsilonLR0Dfa(g parser.Grammar) (*Elr0Dfa, error) {
	ig := parser.GetIndexedGrammar(g)
	idx, err := ig.GetIndex(index.BASIC_INDEX)
	if err != nil {
		return nil, err
	}
	bidx := idx.(*index.BasicGrammarIndex)
	idx, err = ig.GetIndex(index.GRAMMAR_CLASS_INDEX)
	if err != nil {
		return nil, err
	}
	cidx := idx.(*index.GrammarClassIndex)
	if !cidx.Class().ContextFree() {
		return nil, errors.New("cannot build an ε-lr0 dfa for a non-context-free grammar")
	}

	advanceIndexForward := func(item *lr0.LR0Item) *lr0.LR0Item {
		if !item.HasNext() {
			return nil
		}
		if bidx.Epsilon(item.Caret()) {
			return item.Next()
		}
		return nil
	}

	canon := tree.NewTree()
	newStates := tree.NewTree()

	nextId := 1
	start := bidx.LhsStart(ig.Asterisk(), 0)
	initState := lr0.SeedState(0, start)
	lr0.CloseLR0State(initState, ig, false)
	eInit := lr0.SplitLR0State(nextId, initState, advanceIndexSelect, advanceIndexForward)
	if eInit != nil {
		//nextId++  Tweaky juggling... this is the first new id, we want to use #1
		lr0.CloseLR0State(eInit, ig, true)
		lr0.SetTransition(initState, eInit, ig.Epsilon())
	}
	canon.Insert(initState)
	newStates.Insert(initState)
	//	if eInit != nil {
	//		canon.Insert(eInit)
	//		newStates.Insert(eInit)
	//	}

	fmt.Println("INIT STATE: " + initState.String())
	fmt.Println("INIT-e STATE: " + eInit.String())
	for newStates.Size() > 0 {
		fmt.Printf("NS LEN: %d\n", newStates.Size())
		cs := newStates.First().Next().(*lr0.LR0State)
		_, has := newStates.Delete(cs)
		if !has {
			fmt.Println("LOOKING AT " + cs.String())
			tree.DumpTree(newStates.(*tree.AvlTree))
			panic("FAILED DELETE")
		}
		fmt.Printf("NS LEN after del : %d\n", newStates.Size())
		fmt.Println("TOOK STATE: " + cs.String())
		if lr0.MakeTransitions(cs, ig) != nil {
			return nil, err
		}
		for part, next := range lr0.GetTransitions(cs) {
			fmt.Println("A")
			lr0.CloseLR0State(next, ig, false)
			if nxt, has := canon.Lookup(c.LTE, next); has {
				next = nxt.(*lr0.LR0State)
				lr0.SetTransition(cs, next, part)
				continue
			}
			if !part.Epsilon() {
				fmt.Println("B?")
				eNext := lr0.SplitLR0State(nextId, next, advanceIndexSelect, advanceIndexForward)
				if eNext == nil {
					fmt.Println("B-")
					canon.Insert(next)
					newStates.Insert(next)
					lr0.AssignId(next, nextId)
					nextId++
					continue
				}
				fmt.Println("B+")
				lr0.CloseLR0State(eNext, ig, true)
				if nxt, has := canon.Lookup(c.LTE, eNext); has {
					eNext = nxt.(*lr0.LR0State)
				} else {
					canon.Insert(eNext)
					newStates.Insert(eNext)
					nextId++
				}
				if nxt, has := canon.Lookup(c.LTE, next); has {
					next = nxt.(*lr0.LR0State)
				} else {
					canon.Insert(next)
					newStates.Insert(next)
					lr0.AssignId(next, nextId)
					nextId++
				}
				lr0.SetTransition(next, eNext, ig.Epsilon())
			} else {
				if nxt, has := canon.Lookup(c.LTE, next); has {
					next = nxt.(*lr0.LR0State)
				} else {
					canon.Insert(next)
					newStates.Insert(next)
					lr0.AssignId(next, nextId)
					nextId++
				}
				lr0.SetTransition(cs, next, part)
			}
		}
	}
	dfa := &Elr0Dfa{}
	dfa.states = make([]*lr0.LR0State, nextId)
	for x := canon.First(); x.HasNext(); {
		sx := x.Next().(*lr0.LR0State)
		dfa.states[sx.Id()] = sx
	}
	return dfa, nil
}
コード例 #11
0
ファイル: nnf.go プロジェクト: dtromb/parser
func GetNihilisticAugmentGrammar(g parser.Grammar) (parser.Grammar, parser.SyntaxTreeTransform, error) {
	exceptionalEpsilons := []parser.GrammarParticle{}
	idxg := parser.GetIndexedGrammar(g)
	idx, err := idxg.GetIndex(index.BASIC_INDEX)
	if err != nil {
		return nil, nil, err
	}
	bidx := idx.(*index.BasicGrammarIndex)
	idx, err = idxg.GetIndex(index.NAME_INDEX)
	if err != nil {
		return nil, nil, err
	}
	nidx := idx.(*index.NameIndex)

	for _, nt := range idxg.Nonterminals() {
		n, err := IsNihilistic(nt)
		if err != nil {
			return nil, nil, err
		}
		if bidx.Epsilon(nt) && !n {
			exceptionalEpsilons = append(exceptionalEpsilons, nt)
		}
	}

	gb := parser.OpenGrammarBuilder()
	for _, nt := range g.Nonterminals() {
		if nt.Asterisk() {
			continue
		}
		gb.Nonterminals(nt.Name())
	}
	for _, t := range g.Terminals() {
		if t.Epsilon() {
			continue
		}
		gb.Terminals(t.Name())
	}
	augmentMap := make(map[string]string)
	invMap := make(map[string]string)
	for _, e := range exceptionalEpsilons {
		eName := e.Name() + "-ε"
		augmentMap[e.Name()] = eName
		invMap[eName] = e.Name()
		gb.Nonterminals(eName)
		//gb.Rule().Lhs(eName).Rhs("`e")
	}
	for _, nt := range g.Nonterminals() {
		for i := 0; i < bidx.NumLhsStarts(nt); i++ {
			prod := bidx.LhsStart(nt, i)
			//fmt.Printf("LHSTART(%s,%d): %s\n", nt.String(), i, prod.String())
			exIdx := []int{}
			rhs := []string{}
			for j := 0; j < prod.RhsLen(); j++ {
				t := prod.Rhs(j)
				if _, has := augmentMap[t.Name()]; has {
					exIdx = append(exIdx, j)
					//fmt.Println("exidx gets "+t.String())
				}
				rhs = append(rhs, t.Name())
			}
			if nt.Asterisk() { // Initial rule is special-cased.
				//fmt.Println("rule transfers:  "+prod.String())
				//fmt.Println("Args: {"+prod.Lhs(0).Name()+"}, {"+strings.Join(rhs,",")+"}")
				gb.Rule().Lhs(prod.Lhs(0).Name()).Rhs(rhs...)
				initSym := nidx.Nonterminal(rhs[0])
				if initSym != nil {
					if bidx.Epsilon(initSym) {
						gb.Rule().Lhs(initSym.Name()).Rhs(augmentMap[initSym.Name()])
					}
				}
			} else {
				rhsinst := make([]string, len(rhs))
				//fmt.Printf("index len %d\n", len(exIdx))
				s := cnt.FirstSelection(uint(len(exIdx)))
				for {
					nnCount := 0
					for j := 0; j < len(rhs); j++ {
						rhsinst[j] = rhs[j]
						if nidx.Terminal(rhs[j]) != nil {
							nnCount++
						} else {
							nt := nidx.Nonterminal(rhs[j])
							nihil, err := IsNihilistic(nt)
							if err != nil {
								return nil, nil, err
							}
							if !nihil {
								nnCount++
							}
						}
					}
					copy(rhsinst, rhs)
					for j := 0; j < len(exIdx); j++ {
						if s.Test(j) {
							//fmt.Printf("idx %d replacement: %s->%s\n", exIdx[j], rhsinst[exIdx[j]], augmentMap[rhsinst[exIdx[j]]])
							rhsinst[exIdx[j]] = augmentMap[rhsinst[exIdx[j]]]
							nnCount--
						}
					}
					var head string
					if nnCount == 0 {
						head = augmentMap[prod.Lhs(0).Name()]
					} else {
						head = prod.Lhs(0).Name()
					}
					//fmt.Println("rule transforms:  "+prod.String())
					//fmt.Println("Args: {"+head+"}, {"+strings.Join(rhs,",")+"}")
					gb.Rule().Lhs(head).Rhs(rhsinst...)
					if s.HasNext() {
						s = s.Next()
					} else {
						break
					}
				}
			}
		}
	}
	gb.Name(g.Name() + "-ε")
	augmentedGrammar, err := gb.Build()
	if err != nil {
		return nil, nil, err
	}
	augidx := parser.GetIndexedGrammar(augmentedGrammar)
	idx, err = augidx.GetIndex(index.NAME_INDEX)
	if err != nil {
		return nil, nil, err
	}
	anidx := idx.(*index.NameIndex)

	idx, err = augidx.GetIndex(index.BASIC_INDEX)
	if err != nil {
		return nil, nil, err
	}
	abidx := idx.(*index.BasicGrammarIndex)

	reverseMap := make(map[parser.GrammarParticle]parser.GrammarParticle)
	for k, v := range augmentMap {
		reverseMap[anidx.Nonterminal(v)] = nidx.Nonterminal(k)
	}
	prodMap := make(map[parser.Production]parser.Production)
	for _, p := range augmentedGrammar.Productions() {

		// Ignore the special case start rule for grammars that accept nil input.
		if p.LhsLen() == 1 && p.RhsLen() == 1 {
			initSym := abidx.LhsStart(augmentedGrammar.Asterisk(), 0).Rhs(0)
			if p.Lhs(0) == initSym && invMap[p.Rhs(0).Name()] == initSym.Name() {
				continue
			}
		}
		if p.RhsLen() == 1 && p.Rhs(0).Epsilon() {
			continue
		}
		rhs := make([]string, 0, p.RhsLen())
		for i := 0; i < p.RhsLen(); i++ {
			rp := p.Rhs(i)
			if ot, has := reverseMap[rp]; has {
				rhs = append(rhs, ot.Name())
			} else {
				rhs = append(rhs, rp.Name())
			}
		}
		//fmt.Printf("Searching for preimage rhs %s\n", strings.Join(rhs,","))
		var target parser.Production
		for _, cp := range nidx.RhsNames(rhs) {
			//fmt.Println("  considering "+cp.String())
			//for _, p := range g.Productions() {
			//fmt.Println(p.String())
			//}
			if nt, has := reverseMap[p.Lhs(0)]; has {
				if cp.Lhs(0) == nt {
					target = cp
					break
				}
			} else {
				if cp.Lhs(0).Name() == p.Lhs(0).Name() {
					target = cp
					break
				}
			}
		}
		if target == nil {
			return nil, nil, errors.New("Could not find preimage of augmented production rule: " + p.String())
		}
		prodMap[p] = target
	}
	var rtrans func(treeNode parser.SyntaxTreeNode) (parser.SyntaxTreeNode, error)

	rtrans = func(treeNode parser.SyntaxTreeNode) (parser.SyntaxTreeNode, error) {
		part := treeNode.Part()
		if op, has := reverseMap[part]; has {
			part = op
		}
		exp := make([]parser.SyntaxTreeNode, treeNode.NumChildren())
		for i := 0; i < len(exp); i++ {
			st := treeNode.Child(i)
			if _, has := reverseMap[st.Part()]; has {
				r, err := rtrans(st)
				if err != nil {
					return nil, err
				}
				exp[i] = r
			} else {
				exp[i] = st
			}
		}
		return &parser.BasicSyntaxTreeNode{
			Particle:       part,
			FirstTokenIdx:  treeNode.First(),
			LastTokenIdx:   treeNode.Last(),
			SyntacticValue: treeNode.Value(),
			Prod:           prodMap[treeNode.Rule()],
			Expansion:      exp,
		}, nil
	}
	return augmentedGrammar, rtrans, nil
}
コード例 #12
0
ファイル: index_test.go プロジェクト: dtromb/parser
func TestGrammarIndex(t *testing.T) {
	// Build a simple BNF grammar description grammar.
	gb := parser.OpenGrammarBuilder()

	gb.Terminals("NONTERM", "COLEQ", "PIPE", "IDENTIFIER").
		Nonterminals("bnf", "ntdecl", "def", "ntort").
		Rule().Lhs("bnf").Rhs("ntdecl").
		Rule().Lhs("bnf").Rhs("ntdecl", "bnf").
		Rule().Lhs("ntdecl").Rhs("NONTERM", "COLEQ", "def").
		Rule().Lhs("ntdecl").Rhs("ntdecl", "PIPE", "def").
		Rule().Lhs("def").Rhs("ntort").
		Rule().Lhs("def").Rhs("ntort", "def").
		Rule().Lhs("ntort").Rhs("IDENTIFIER").
		Rule().Lhs("ntort").Rhs("NONTERM").
		Rule().Lhs("`*").Rhs("bnf", "`.").
		Name("simple-bnf")

	g, err := gb.Build()
	if err != nil {
		t.Error(err)
	}
	grammar := parser.GetIndexedGrammar(g)

	fmt.Println("Name: " + grammar.Name())
	terms := make([]string, grammar.NumTerminals())
	for i, t := range grammar.Terminals() {
		terms[i] = t.String()
	}
	nterms := make([]string, grammar.NumNonterminals())
	for i, t := range grammar.Nonterminals() {
		nterms[i] = t.String()
	}
	fmt.Println("Terminals: " + strings.Join(terms, ", "))
	fmt.Println("Nonterminals: " + strings.Join(nterms, ", "))
	fmt.Println("Productions:")
	for _, p := range grammar.Productions() {
		fmt.Println("   " + p.String())
	}

	idx, err := grammar.GetIndex("basic")
	if err != nil {
		t.Error()
		return
	}
	basicIndex := idx.(*BasicGrammarIndex)
	fmt.Println("Basic index type: '" + basicIndex.IndexName() + "'")
	fmt.Println("Production RHS starts: ")
	for idx := 0; idx < grammar.NumTerminals(); idx++ {
		term := grammar.Terminal(idx)
		starts := basicIndex.RhsStarts(term)
		if len(starts) == 0 {
			continue
		}
		fmt.Println("  " + term.String() + ":")
		for _, p := range starts {
			fmt.Println("   " + p.String())
		}
	}
	fmt.Println("\nProduction RHS ends: ")
	for idx := 0; idx < grammar.NumTerminals(); idx++ {
		term := grammar.Terminal(idx)
		starts := basicIndex.RhsEnds(term)
		if len(starts) == 0 {
			continue
		}
		fmt.Println("  " + term.String() + ":")
		for _, p := range starts {
			fmt.Println("   " + p.String())
		}
	}
	fmt.Println("\nProduction RHS contains: ")
	for idx := 0; idx < grammar.NumTerminals(); idx++ {
		term := grammar.Terminal(idx)
		starts := basicIndex.RhsContains(term)
		if len(starts) == 0 {
			continue
		}
		fmt.Println("  " + term.String() + ":")
		for _, p := range starts {
			fmt.Println("   " + p.String())
		}
	}
	fmt.Println("Grammar class:")
	idx, err = grammar.GetIndex(GRAMMAR_CLASS_INDEX)
	if err != nil {
		t.Error(err)
		return
	}
	gcidx := idx.(*GrammarClassIndex)
	fmt.Println("       type: " + gcidx.Class().String())
	fmt.Println(" regularity: " + gcidx.Regularity().String())
	idx, err = grammar.GetIndex(FIRST_FOLLOW_INDEX)
	if err != nil {
		t.Error(err)
		return
	}
	ffidx := idx.(*FFIndex)
	fmt.Println("FIRST(x): ")
	for _, p := range g.Nonterminals() {
		fmt.Println("  " + p.String())
		for _, k := range ffidx.Firsts(p) {
			fmt.Println("    " + k.String())
		}
	}
	fmt.Println("FOLLOW(x): ")
	for _, p := range g.Nonterminals() {
		fmt.Println("  " + p.String())
		for _, k := range ffidx.Follows(p) {
			fmt.Println("    " + k.String())
		}
	}
	for _, p := range g.Terminals() {
		fmt.Println("  " + p.String())
		for _, k := range ffidx.Firsts(p) {
			fmt.Println("    " + k.String())
		}
	}
}