diff --git a/rtl_lib/arbitrary_width_memory.py b/rtl_lib/arbitrary_width_memory.py index 81b3f2f37df3a8c428f751df0b906cfd029829fa..26e0111aab355859176009496bc420c6c11a1ce8 100644 --- a/rtl_lib/arbitrary_width_memory.py +++ b/rtl_lib/arbitrary_width_memory.py @@ -77,7 +77,7 @@ class ArbitraryWidthMemory(Elaboratable): unwrapped_bit_index = Signal(range(self.backing_memory_length*self.backing_memory_data_width)) #probably cannot be made shorter. - left_bit_index = Signal(self.backing_memory_data_width) + LS_bit_index = Signal(self.backing_memory_data_width) right_bit_index = Signal(self.backing_memory_data_width) end_bit_pseudo_index = Signal(range(self.backing_memory_length*self.backing_memory_data_width)) # can be made shorter additional_words = Signal(self.backing_memory_address_width) # can also be amde shorter @@ -116,11 +116,11 @@ class ArbitraryWidthMemory(Elaboratable): m.d.sync += shreg.eq(shreg << (shreg_new_bits) | current_slice) - m.d.comb += lower_bits_cut.eq(read_port.data>>left_bit_index) + m.d.comb += lower_bits_cut.eq(read_port.data>>LS_bit_index) m.d.comb += current_slice.eq(((lower_bits_cut<<right_bit_index)&0xff)>>right_bit_index) - m.d.comb += shreg_new_bits.eq(right_bit_index-left_bit_index+1) + m.d.comb += shreg_new_bits.eq(right_bit_index-LS_bit_index+1) # is like a gear box but with variable upstream elnght @@ -159,11 +159,11 @@ class ArbitraryWidthMemory(Elaboratable): # We start our cut at the unwrapped bit index modulo the memory data width. # since the memory data width is a power of two, this is the K-least-significant-bits # of the unwrapped bit index - m.d.comb += left_bit_index.eq(unwrapped_bit_index[:self.backing_memory_data_width_bits]) + m.d.comb += LS_bit_index.eq(unwrapped_bit_index[:self.backing_memory_data_width_bits]) # Here's where they start trying to trick you. We need to handle the case where the end of the # fake word goes beyond a real memory word. - m.d.comb += end_bit_pseudo_index.eq(left_bit_index + self.fake_data_width - 1) + m.d.comb += end_bit_pseudo_index.eq(LS_bit_index + self.fake_data_width - 1) # So here we determine if there's any need for additional memory words: m.d.comb += additional_words.eq(end_bit_pseudo_index[self.backing_memory_data_width_bits:]) @@ -190,14 +190,14 @@ class ArbitraryWidthMemory(Elaboratable): # we handle both the full-word fetches and the final (potentially partial word) fetch here with m.If(additional_words_regd == 1): # We start from zero... - m.d.comb += left_bit_index.eq(0) + m.d.comb += LS_bit_index.eq(0) # But this is the last word, so we may not have to include the whole word! m.d.comb += right_bit_index.eq(end_bit_pseudo_index_regd[:self.backing_memory_data_width_bits]) m.d.comb += fetch_address.eq(next_address) m.next = "STALL" with m.Else(): # non-special case, fetch the whole word - m.d.comb += left_bit_index.eq(0) + m.d.comb += LS_bit_index.eq(0) m.d.comb += right_bit_index.eq(self.backing_memory_data_width-1) # and increment the address and decrement the remaining words counter m.d.sync += next_address.eq(next_address + 1)