diff --git a/gdb-port/top-level-parse.py b/gdb-port/top-level-parse.py
index d3ca70b0afaccafbb186ce8529f84770cbece4c5..79684a5575759b0af5d49265beb6f741e7d2d0b6 100644
--- a/gdb-port/top-level-parse.py
+++ b/gdb-port/top-level-parse.py
@@ -408,9 +408,16 @@ class TopLevelParse:
 		print("h=len(charbuf):", len(charbuf), "w :", w)
 		tokenmap_values_sorted = sorted(tokenmap_val_list, key=lambda token: (token['start'], token['end']))
 		print("len(tokenmap_values_sorted):", len(tokenmap_values_sorted), "rows:", numrows)
-		for i in range(0, numrows):
+		hpr_strings = []
+		for i in range(0, numrows+1):
 			token_length = tokenmap_values_sorted[i]['end'] - tokenmap_values_sorted[i]['start']
 			charbuf[i][tokenmap_values_sorted[i]['start']:tokenmap_values_sorted[i]['end']] = ['X'] * min(token_length, w)
+			hparseresult_addr = tokenmap_values_sorted[i]['hparseresult']
+			try:
+				hpr = HParseResult(int(hparseresult_addr, 16))
+				hpr_strings.append(hpr.str_no_deref())
+			except:
+				hpr_strings.append("invalid")
 #		for i in range(0, w):
 #			active_tokens_unsorted = [token for token in tokenmap_values if (token['start'] <= i and i <= token['end'])]
 #			# TODO: sorting this on each column seems like a lot of wasted effort
@@ -431,11 +438,13 @@ class TopLevelParse:
 					#	print("end: ", end, "start+w", start+w)
 					#	print("w:", w, "h=rows:", h)
 					#	raise ie
-		charbufrows_token_debug = [ "".join(row) + "\n" + str(tokenmap_values_sorted[index]) for index, row in enumerate(charbuf)]
+		charbufrows_token_debug = [ "".join(row) + "\n" + " ".join([hpr_strings[index], str(tokenmap_values_sorted[index])]) for index, row in enumerate(charbuf)]
 		charbufrows = ["".join(row) for row in charbuf]
 		#charbuf_final = "\n".join(charbufrows)
 		charbuf_final = "\n".join(charbufrows_token_debug)
 		print(charbuf_final)
+		#print(hpr_strings)
+		#print(len(hpr_strings), len(charbuf), len(tokenmap_values_sorted))
 
 	def print_input_map(self, token, rec_depth=0, parent_bounds=None):
 		w = gdb.parameter("width")