From 2add8f71fb5604d86b232a981fcb441a9d171e62 Mon Sep 17 00:00:00 2001
From: Kia <kia@special-circumstanc.es>
Date: Tue, 19 Jan 2021 00:08:02 -0700
Subject: [PATCH] start work on state machine, based on previous iteration of
 the parser

---
 unoptimized_lr/simple_lr_automaton.py | 288 ++++++++++++++++++++++++++
 1 file changed, 288 insertions(+)
 create mode 100644 unoptimized_lr/simple_lr_automaton.py

diff --git a/unoptimized_lr/simple_lr_automaton.py b/unoptimized_lr/simple_lr_automaton.py
new file mode 100644
index 0000000..574af2b
--- /dev/null
+++ b/unoptimized_lr/simple_lr_automaton.py
@@ -0,0 +1,288 @@
+from nmigen import *
+from nmigen.cli import main
+
+
+from functools import reduce
+
+
+# We know this is not really likely to synthesize well, but we implement here so
+# we can get a gold-standard/reference implementation, against which we can test
+# our further optimizations, including compression and non-standard representations.
+
+# TODO PLAN:
+# 1) get this working in simulation
+# 2) make it synthesizable (so it can use FPGA block RAM with reasonable bit-width)
+# 3) make it optimized
+
+class LRAutomaton(Elaboratable):
+    def __init__(self):
+        
+        # Stack
+        self.mem = Memory(width=self.table_width, depth=self.table_depth, init=rasterized)
+
+
+    def elaborate(self, platform):
+        m = Module()
+        m.submodules.rport = rport = (self.mem).read_port()
+        m.submodules.wport = wport = (self.mem).write_port()
+
+
+        return m
+
+
+class MasterStateMachine(Elaboratable):
+    def __init__(self, item_width, indices_width, stack_depth, serialized_tree_length, stack_state_descriptions, validitem_ruleset, forceshift_ruleset, pairwise_priority_ruleset, reduce_ruleset, execute_rules, startofparse_marker, endofparse_marker):
+        self.item_width    = item_width
+        self.stack_depth   = stack_depth
+        self.indices_width = indices_width
+
+        # Rule pack
+        self.validitem_ruleset         = validitem_ruleset
+        self.forceshift_ruleset        = forceshift_ruleset
+        self.pairwise_priority_ruleset = pairwise_priority_ruleset
+        self.reduce_ruleset            = reduce_ruleset
+        self.execute_rules             = execute_rules
+        self.stack_state_descriptions  = stack_state_descriptions
+
+        self.endofparse_marker = endofparse_marker
+        self.startofparse_marker = startofparse_marker
+
+        # Data stream in
+        self.data_in = Signal(item_width)
+        self.data_in_valid = Signal(1)
+        self.data_in_ready = Signal(1)
+
+        # Data stream out
+        #self.data_out = Signal(item_width)
+        #self.data_out_valid = Signal(1)
+        #self.data_out_ready = Signal(1)
+
+        # Sideband signals
+        self.parse_complete_out = Signal(1)
+        self.parse_success_out  = Signal(1)
+        self.internal_fault     = Signal(1)
+        self.last_index_to_smem = Signal(32)
+        self.serializer = TreeSerializer(item_width=self.item_width, indices_width=(self.indices_width+1), stack_depth=self.stack_depth, serialized_tree_length=serialized_tree_length, serializing_ruleset=[])
+        self.tapir = self.serializer.mem
+
+
+
+
+    def elaborate(self, platform):
+        m = Module()
+        #stack         = InspectableStack(item_width=self.item_width,    stack_depth=self.stack_depth)
+        doublestacks  = StacksInSync(bigstack_item_width=self.item_width, sidestack_item_width=self.indices_width + 1, stack_depth = self.stack_depth)
+
+        rule_matcher = HitOrMiss(item_width=self.item_width, stack_depth=self.stack_depth,
+            validitem_ruleset         = self.validitem_ruleset,
+            forceshift_ruleset        = self.forceshift_ruleset,
+            pairwise_priority_ruleset = self.pairwise_priority_ruleset,
+            reduce_ruleset            = self.reduce_ruleset,
+            reduction_rule_count=len(self.execute_rules), stack_state_descriptions = self.stack_state_descriptions, endofparse_marker = self.endofparse_marker)
+        rex = RuleExecutor(item_width=self.item_width, stack_depth=self.stack_depth, execution_ruleset=self.execute_rules)
+        skbuffer = RegisteredSkidBuffer(width = self.item_width)
+#        serializer = TreeSerializer(item_width=self.item_width, indices_width=(self.indices_width+1), stack_depth=self.stack_depth, serializing_ruleset=[])
+        #m.submodules.Stack = stack
+        m.submodules.Stacks = doublestacks
+        m.submodules.RuleMatcher = rule_matcher
+        m.submodules.RuleExecute = rex
+        m.submodules.skidbuffer = skbuffer
+
+        serializer = m.submodules.Serializer = self.serializer
+
+
+        # Skid buffer
+        fsm_ready = Signal(1)
+        new_item_valid = Signal(1)
+        new_item = Signal(self.item_width)
+
+
+        m.d.comb += skbuffer.upstream_valid_in.eq(self.data_in_valid)
+        m.d.comb += self.data_in_ready.eq(skbuffer.upstream_ready_out)
+        m.d.comb += skbuffer.upstream_data_in.eq(self.data_in)
+
+        m.d.comb += skbuffer.downstream_ready_in.eq(fsm_ready)
+        m.d.comb += new_item.eq(skbuffer.downstream_data_out)
+        m.d.comb += new_item_valid.eq(skbuffer.downstream_valid_out)
+
+
+        execution_result = Signal(self.item_width)
+        number_to_pop = Signal(8)
+
+
+        m.d.comb += rex.match_index_in.eq(rule_matcher.match_index_out)
+        m.d.comb += rule_matcher.occupancy_bitmap_in.eq(doublestacks.occupancy_bitmap)
+        for idx, x in enumerate(doublestacks.bigperm_out):
+            m.d.comb += rule_matcher.stack_view_in[idx].eq(x)
+            m.d.comb += rex.stack_view_in[idx].eq(x)
+
+        m.d.comb += rule_matcher.new_item_in.eq(new_item)
+
+
+        # Rule Matcher output signals are:
+        #
+        # internal_fault  (one bit)
+        # invalid_item    (one bit)
+        # force_shift     (one bit)
+        # match_index_out (one hot)
+
+
+        # LR parser state machine
+        with m.FSM() as fsm:
+
+            with m.State("INITIALIZE"):
+                m.next = "SHIFTREDUCE"
+                m.d.comb += fsm_ready.eq(0)
+                m.d.comb += doublestacks.command_in_strobe.eq(1)
+                m.d.comb += doublestacks.big_push_port.eq(self.startofparse_marker)
+                m.d.comb += doublestacks.side_push_port.eq(0x0) # XXX FIXME
+                m.d.comb += doublestacks.command_in.eq(2)
+
+                with m.If(doublestacks.internal_fault | serializer.internal_fault == 1):
+                    m.next="ABORT"
+
+
+            with m.State("SHIFTREDUCE"):
+                with m.If(new_item_valid == 1):
+                    with m.If(rule_matcher.internal_fault == 1):
+                        m.next = "ABORT"
+                        m.d.comb += fsm_ready.eq(0)
+                        m.d.comb += doublestacks.command_in_strobe.eq(0)
+
+                    with m.If(doublestacks.internal_fault | serializer.internal_fault == 1):
+                        m.next="ABORT"
+
+                    with m.If(rule_matcher.invalid_item   == 1):
+                        m.next = "ABORT"
+                        m.d.comb += fsm_ready.eq(0)
+                        m.d.comb += doublestacks.command_in_strobe.eq(0)
+
+                    with m.If(rule_matcher.force_shift    == 1):
+                        m.d.comb += doublestacks.command_in.eq(2)
+                        m.d.comb += doublestacks.command_in_strobe.eq(1)
+                        m.d.comb += doublestacks.big_push_port.eq(new_item)
+                        m.d.comb += doublestacks.side_push_port.eq(0x0) # XXX FIXME
+                        m.d.comb += fsm_ready.eq(1)
+
+                    with m.If((rule_matcher.force_shift == 0) & (rule_matcher.match_index_out == 0)):
+                        m.next = "ABORT"
+                        m.d.comb += fsm_ready.eq(0)
+                        m.d.comb += doublestacks.command_in_strobe.eq(0)
+
+                    with m.If((serializer.ready_out == 1)& (rule_matcher.match_index_out != 0)):
+                        m.next = "REDUCE"
+                        m.d.comb += fsm_ready.eq(0)
+                        #m.d.comb += stack.command_in_strobe.eq(0)
+                        # register the data from the combinatorial CAMs since the stack will change when we manipulate it
+
+                        m.d.sync += number_to_pop.eq(rex.number_to_pop - 1)
+                        m.d.sync += execution_result.eq(rex.created_item)
+
+
+                        m.d.comb += serializer.start_reduction.eq(1)
+                        m.d.comb += serializer.destroyed_item_valid_in.eq(1)
+
+                        m.d.comb += serializer.destroyed_item_in.eq(doublestacks.bigpop_port)
+                        m.d.comb += serializer.destroyed_item_index_in.eq(doublestacks.sidepop_port)
+
+
+                        m.d.comb += serializer.number_to_pop.eq(rex.number_to_pop)
+                        m.d.comb += serializer.item_created_by_reduce_rule.eq(rex.created_item)
+                        m.d.comb += serializer.reduce_rule_number.eq(rule_matcher.match_index_out)
+
+                        with m.If(rex.number_to_pop != 0):
+                            m.d.comb += doublestacks.command_in.eq(1)
+                            m.d.comb += doublestacks.command_in_strobe.eq(1)
+
+                    with m.If(rule_matcher.endofparse_reached == 1):
+                        m.next = "SUCCESS"
+                        m.d.comb += self.internal_fault.eq(1)
+                        m.d.comb += self.parse_success_out.eq(1)
+                        m.d.comb += self.parse_complete_out.eq(1)
+
+            with m.State("SUCCESS"):
+                m.next = "SUCCESS"
+                m.d.comb += self.internal_fault.eq(1)
+                m.d.comb += self.parse_success_out.eq(1)
+                m.d.comb += self.parse_complete_out.eq(1)
+                m.d.sync += self.last_index_to_smem.eq(serializer.serialized_index)
+
+                with m.If(doublestacks.internal_fault | serializer.internal_fault == 1):
+                    m.next="ABORT"
+
+
+            with m.State("REDUCE"):
+                m.d.comb += fsm_ready.eq(0)
+
+                # FIXME XXX make this into an FSM
+                with m.If(number_to_pop != 0):
+
+                    m.d.comb += serializer.destroyed_item_valid_in.eq(1)
+                    m.d.comb += serializer.destroyed_item_in.eq(doublestacks.bigpop_port)
+                    m.d.comb += serializer.destroyed_item_index_in.eq(doublestacks.sidepop_port)
+
+                    m.d.comb += doublestacks.command_in.eq(1)
+                    m.d.comb += doublestacks.command_in_strobe.eq(1)
+                    m.d.sync += number_to_pop.eq(number_to_pop - 1)
+
+                with m.If(number_to_pop == 0):
+                    m.d.comb += doublestacks.command_in.eq(2)
+                    m.d.comb += doublestacks.big_push_port.eq(execution_result)
+                    m.d.comb += doublestacks.side_push_port.eq(serializer.serialized_index | (1<<(self.indices_width)))
+                    m.d.comb += doublestacks.command_in_strobe.eq(1)
+
+#                    m.d.comb += fsm_ready.eq(1)
+                    
+                    m.next = "SHIFTREDUCE"
+
+                with m.If(doublestacks.internal_fault | serializer.internal_fault == 1):
+                    m.next="ABORT"
+
+
+            with m.State("ABORT"):
+                m.d.comb += fsm_ready.eq(0)
+                m.d.comb += doublestacks.command_in_strobe.eq(0)
+
+                m.d.comb += self.internal_fault.eq(1)
+                m.d.comb += self.parse_success_out.eq(0)
+                m.d.comb += self.parse_complete_out.eq(1)
+
+        return m
+
+
+
+
+
+class DummyPlug(Elaboratable):
+
+    #def __init__(self):
+
+
+
+    def elaborate(self, platform):
+        m = Module()
+
+        m.submodules.table = table = GOTOtable(3,3,[[1,2,3],[4,5,6],[7,0,1]])
+        counter = Signal(8)
+        m.d.sync += counter.eq(counter+1)
+
+        with m.If(counter == 3):
+            m.d.comb += table.in_valid.eq(1)
+            m.d.comb += table.state_in.eq(0)
+            m.d.comb += table.nonterminal_in.eq(0)
+        with m.If(counter == 4):
+            m.d.comb += table.in_valid.eq(1)
+            m.d.comb += table.state_in.eq(0)
+            m.d.comb += table.nonterminal_in.eq(1)
+        with m.If(counter == 6):
+            m.d.comb += table.in_valid.eq(1)
+            m.d.comb += table.state_in.eq(2)
+            m.d.comb += table.nonterminal_in.eq(0)
+        return m
+
+
+
+if __name__ == '__main__':
+    baka =DummyPlug()
+    main(baka)
+    #platform.build(DummyPlug())
-- 
GitLab