Lots more python updates

This commit is contained in:
Jordan Carlin 2024-12-17 21:31:12 -08:00
parent 21e35c9068
commit 51c3d59605
No known key found for this signature in database
18 changed files with 483 additions and 560 deletions

View file

@ -65,10 +65,7 @@ with open(resultfile, mode='w', newline='') as csvfile:
# Loop through each architecture and run the make commands
for arch in arch_list:
if(str in arch):
xlen_value='32'
else:
xlen_value='64'
xlen_value = "32" if str in arch else "64"
os.system("make clean")
make_all = f"make all XLEN={xlen_value} ARCH={arch}"
os.system(make_all)

View file

@ -30,12 +30,8 @@ def tabulate_arch_sweep(directory):
file = case+"_"+arch+".json"
file_path = os.path.join(directory, file)
lines = []
try:
f = open(file_path)
with open(file_path) as f:
lines = f.readlines()
except:
f.close()
#print(file_path+" does not exist")
for line in lines:
#print("File: "+file+" Line: "+line)
#p = re.compile('".*" : .*,')

View file

@ -16,7 +16,7 @@ def loadCoremark():
keywordlist = ["CoreMark 1.0", "CoreMark Size", "MTIME", "MINSTRET", "Branches Miss Predictions", "BTB Misses"]
for keyword in keywordlist:
bashInst = "cat " + coremarkPath + " | grep \"" + keyword + "\" | cut -d \':\' -f 2 | cut -d \" \" -f 2 | tail -1"
bashInst = "cat " + coremarkPath + ' | grep "' + keyword + "\" | cut -d ':' -f 2 | cut -d \" \" -f 2 | tail -1"
result = subprocess.run(bashInst, stdout=subprocess.PIPE, shell=True)
if (debug): print(result)
coremarkData[keyword] = int(result.stdout)
@ -25,7 +25,7 @@ def loadCoremark():
def loadEmbench(embenchPath, embenchData):
"""loads the embench data dictionary"""
f = open(embenchPath)
with open(embenchPath) as f:
embenchData = json.load(f)
if (debug): print(embenchData)
return embenchData

View file

@ -284,7 +284,7 @@ def main(args):
elif lninfo[1] == 'A':
atoms += 1
if not result == lninfo[2]:
if result != lninfo[2]:
print(f"Result mismatch at address {lninfo[0]}. Wally: {lninfo[2]}, Sim: {result}")
mismatches += 1
if args.dist:

View file

@ -473,7 +473,7 @@ class TestRunner:
if failed_configs:
md_file.write("## Failed Configurations\n\n")
for config, log_file in failed_configs:
md_file.write(f"- <span class=\"failure\" style=\"color: red;\">{config}</span> ({log_file})\n")
md_file.write(f'- <span class="failure" style="color: red;">{config}</span> ({log_file})\n')
md_file.write("\n")
else:
md_file.write("## Failed Configurations\n")
@ -481,7 +481,7 @@ class TestRunner:
md_file.write("\n## Passed Configurations\n")
for config in passed_configs:
md_file.write(f"- <span class=\"success\" style=\"color: green;\">{config}</span>\n")
md_file.write(f'- <span class="success" style="color: green;">{config}</span>\n')
self.logger.info("writing test outputs to markdown")
@ -526,7 +526,7 @@ class TestRunner:
md_file.write("\n")
except subprocess.CalledProcessError as e:
# Handle if the command fails
md_file.write(f"Failed to identify host and Operating System information: {str(e)}")
md_file.write(f"Failed to identify host and Operating System information: {e!s}")
# Which tests did we run
md_file.write(f"\n**Tests made:** `make {test_type}`\n")
@ -548,7 +548,7 @@ class TestRunner:
if len(item) == 0:
md_file.write("\n")
md_file.write("* <span class=\"no-failure\" style=\"color: green;\">No failures</span>\n")
md_file.write('* <span class="no-failure" style="color: green;">No failures</span>\n')
md_file.write("\n")
else:
for failed_test in item:
@ -556,7 +556,7 @@ class TestRunner:
log_file = failed_test[1]
md_file.write("\n")
md_file.write(f"* <span class=\"failure\" style=\"color: red;\">{config}</span> ({log_file})\n")
md_file.write(f'* <span class="failure" style="color: red;">{config}</span> ({log_file})\n')
md_file.write("\n")
# Successful Tests
@ -571,14 +571,14 @@ class TestRunner:
if len(item) == 0:
md_file.write("\n")
md_file.write("* <span class=\"no-successes\" style=\"color: red;\">No successes</span>\n")
md_file.write('* <span class="no-successes" style="color: red;">No successes</span>\n')
md_file.write("\n")
else:
for passed_tests in item:
config = passed_tests
md_file.write("\n")
md_file.write(f"* <span class=\"success\" style=\"color: green;\">{config}</span>\n")
md_file.write(f'* <span class="success" style="color: green;">{config}</span>\n')
md_file.write("\n")
self.logger.info("Combining markdown files")
@ -800,7 +800,7 @@ def main():
logger.info(f"The total failures for all tests ran are: {total_number_failures}")
# Copy actual test logs from sim/questa, sim/verilator, sim/vcs
if not args.tests == "test_lint":
if args.tests != 'test_lint':
test_runner.copy_sim_logs([test_runner.cvw / "sim/questa/logs", test_runner.cvw / "sim/verilator/logs", test_runner.cvw / "sim/vcs/logs"])
#############################################

View file

@ -46,7 +46,7 @@ def ParseBranchListFile(path):
is formated in row columns. Each row is a trace with the file, branch predictor type, and the parameters.
parameters can be any number and depend on the predictor type. Returns a list of lists.'''
lst = []
BranchList = open(path)
with open(path) as BranchList:
for line in BranchList:
tokens = line.split()
predictorLog = os.path.dirname(path) + '/' + tokens[0]
@ -62,9 +62,9 @@ def ProcessFile(fileName):
# 1 find lines with Read memfile and extract test name
# 2 parse counters into a list of (name, value) tuples (dictionary maybe?)
benchmarks = []
transcript = open(fileName)
HPMClist = { }
testName = ''
with open(fileName) as transcript:
for line in transcript.readlines():
lineToken = line.split()
if(len(lineToken) > 3 and lineToken[1] == 'Read' and lineToken[2] == 'memfile'):

View file

@ -268,21 +268,12 @@ def addTests(tests, sim):
for test in tests:
config = test[0]
suites = test[1]
if len(test) >= 3:
args = f" --args {test[2]}"
else:
args = ""
if len(test) >= 4:
gs = test[3]
else:
gs = "All tests ran without failures"
args = f" --args {test[2]}" if len(test) >= 3 else ""
gs = test[3] if len(test) >= 4 else "All tests ran without failures"
cmdPrefix=f"wsim --sim {sim} {coverStr} {config}"
for t in suites:
sim_log = f"{sim_logdir}{config}_{t}.log"
if len(test) >= 5:
grepfile = sim_logdir + test[4]
else:
grepfile = sim_log
grepfile = sim_logdir + test[4] if len(test) >= 5 else sim_log
tc = TestCase(
name=t,
variant=config,
@ -535,9 +526,7 @@ def main():
os.system('rm -f questa/fcov_ucdb/* questa/fcov_logs/* questa/fcov/*')
elif args.buildroot:
TIMEOUT_DUR = 60*1440 # 1 day
elif args.testfloat:
TIMEOUT_DUR = 30*60 # seconds
elif args.nightly:
elif args.testfloat or args.nightly:
TIMEOUT_DUR = 30*60 # seconds
else:
TIMEOUT_DUR = 10*60 # seconds
@ -545,10 +534,7 @@ def main():
# Scale the number of concurrent processes to the number of test cases, but
# max out at a limited number of concurrent processes to not overwhelm the system
# right now fcov, ccov, nightly all use Imperas
if (args.ccov or args.fcov or args.nightly):
ImperasDVLicenseCount = 16 # limit number of concurrent processes to avoid overloading ImperasDV licenses
else:
ImperasDVLicenseCount = 10000 # effectively no license limit for non-lockstep tests
ImperasDVLicenseCount = 16 if args.ccov or args.fcov or args.nightly else 10000
with Pool(processes=min(len(configs),multiprocessing.cpu_count(), ImperasDVLicenseCount)) as pool:
num_fail = 0
results = {}

View file

@ -67,8 +67,8 @@ def main():
parser.add_argument('-d', "--dist", action='store_true', help="Report distribution of operations")
parser.add_argument('-s', "--sim", help="Simulator", choices=["questa", "verilator", "vcs"], default="verilator")
args = parser.parse_args()
simargs = "I_CACHE_ADDR_LOGGER=1\\\'b1 D_CACHE_ADDR_LOGGER=1\\\'b1"
testcmd = "wsim --sim " + args.sim + " rv64gc {} --params \"" + simargs + "\" > /dev/null"
simargs = "I_CACHE_ADDR_LOGGER=1\\'b1 D_CACHE_ADDR_LOGGER=1\\'b1"
testcmd = "wsim --sim " + args.sim + ' rv64gc {} --params "' + simargs + '" > /dev/null'
#cachecmd = "CacheSim.py 64 4 56 44 -f {} --verbose"
cachecmd = "CacheSim.py 64 4 56 44 -f {}"
mismatches = 0

View file

@ -13,7 +13,7 @@ if not os.path.isfile(sys.path[0]+'/slack-webhook-url.txt'):
print('Tutorial for slack webhook urls: https://bit.ly/BenSlackNotifier')
print('==============================================================')
else:
urlFile = open(sys.path[0]+'/slack-webhook-url.txt')
with open(sys.path[0]+'/slack-webhook-url.txt') as urlFile:
url = urlFile.readline().strip('\n')
# Traverse 3 parents up the process tree
@ -25,7 +25,7 @@ else:
result = subprocess.check_output('ps -o cmd -p '+PPID3,shell=True)
cmdName = str(result).split('\\n')[1]
# Get current time
timezone_offset = -8.0 # Pacific Standard Time (UTC08:00)
timezone_offset = -8.0 # Pacific Standard Time (UTC-08:00)
tzinfo = timezone(timedelta(hours=timezone_offset))
time = datetime.now(tzinfo).strftime('%I:%M %p')
# Send message

View file

@ -24,7 +24,7 @@ def runFindCommand(cmd):
res = subprocess.check_output(cmd, shell=True, )
res = str(res)
res = res.replace("\\n", " ") # replace newline with space
res = res.replace("\'", "") # strip off quotation marks
res = res.replace("'", "") # strip off quotation marks
res = res[1:] # strip off leading b from byte string
return res
@ -81,13 +81,10 @@ def processArgs(wkdir, args):
def setupParamOverrides(wkdir, args):
paramOverrideFile = os.path.join(wkdir, "param_overrides.txt")
with open(paramOverrideFile, "w", encoding="utf-8") as f:
with open(paramOverrideFile, "w") as f:
for param in args.params.split():
[param, value] = param.split("=")
if r"\'" in value: # for bit values
value = value.replace(r"\'", "'")
else: # for strings
value = f'"{value}"'
value = value.replace("\\'", "'") if "\\'" in value else f'"{value}"' # transform quotes/bit indicators
f.write(f"assign {value} {args.tb}/{param}\n")
return f" -parameters {wkdir}/param_overrides.txt "

View file

@ -27,7 +27,7 @@ def synthsintocsv():
specReg = re.compile('[a-zA-Z0-9]+')
metricReg = re.compile('-?\d+\.\d+[e]?[-+]?\d*')
file = open("Summary.csv", "w")
with open("Summary.csv", "w") as file:
writer = csv.writer(file)
writer.writerow(['Width', 'Config', 'Mod', 'Tech', 'Target Freq', 'Delay', 'Area'])
@ -36,10 +36,7 @@ def synthsintocsv():
# print("From " + oneSynth + " Find ")
# for d in descrip:
# print(d)
if (descrip[3] == "sram"):
base = 4
else:
base = 3
base = 4 if descrip[3] == "sram" else 3
width = descrip[base][:4]
config = descrip[base][4:]
if descrip[base+1][-2:] == 'nm':
@ -68,7 +65,6 @@ def synthsintocsv():
delay = 1000/int(freq) - metrics[0]
area = metrics[1]
writer.writerow([width, config, mod, tech, freq, delay, area])
file.close()
def synthsfromcsv(filename):
@ -93,7 +89,7 @@ def freqPlot(tech, width, config):
freqsL, delaysL, areasL = ([[], []] for i in range(3))
for oneSynth in allSynths:
if (width == oneSynth.width) & (config == oneSynth.config) & (tech == oneSynth.tech) & ('orig' == oneSynth.mod):
if (width == oneSynth.width) & (config == oneSynth.config) & (tech == oneSynth.tech) & (oneSynth.mod == 'orig'):
ind = (1000/oneSynth.delay < (0.95*oneSynth.freq)) # when delay is within target clock period
freqsL[ind] += [oneSynth.freq]
delaysL[ind] += [oneSynth.delay]
@ -101,10 +97,7 @@ def freqPlot(tech, width, config):
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
allFreqs = list(flatten(freqsL))
if allFreqs != []:
median = np.median(allFreqs)
else:
median = 0
median = np.median(allFreqs) if allFreqs != [] else 0
for ind in [0,1]:
areas = areasL[ind]
@ -169,8 +162,7 @@ def plotFeatures(tech, width, config):
delays, areas, labels = ([] for i in range(3))
freq = techdict[tech].targfreq
for oneSynth in allSynths:
if (tech == oneSynth.tech) & (freq == oneSynth.freq):
if (oneSynth.config == config) & (width == oneSynth.width):
if (tech == oneSynth.tech) & (freq == oneSynth.freq) & (oneSynth.config == config) & (width == oneSynth.width):
delays += [oneSynth.delay]
areas += [oneSynth.area]
labels += [oneSynth.mod]

View file

@ -50,7 +50,7 @@ def synthsintocsv():
specReg = re.compile("[a-zA-Z0-9]+")
metricReg = re.compile("-?\d+\.\d+[e]?[-+]?\d*")
file = open("ppaData.csv", "w")
with open("ppaData.csv", "w") as file:
writer = csv.writer(file)
writer.writerow(
[
@ -92,7 +92,6 @@ def synthsintocsv():
[area, lpower, denergy] = [n / 2 for n in [area, lpower, denergy]]
writer.writerow([module, tech, width, freq, delay, area, lpower, denergy])
file.close()
def cleanup():
@ -129,10 +128,7 @@ def getVals(tech, module, var, freq=None, width=None):
works at a specified target frequency or if none is given, uses the synthesis with the best achievable delay for each width
"""
if width is not None:
widthsToGet = width
else:
widthsToGet = widths
widthsToGet = width if width is not None else widths
metric = []
widthL = []
@ -171,21 +167,15 @@ def csvOfBest(filename):
m = np.Inf # large number to start
best = None
for oneSynth in allSynths: # best achievable, rightmost green
if (
(oneSynth.width == w)
& (oneSynth.tech == tech)
& (oneSynth.module == mod)
):
if (oneSynth.delay < m) & (
1000 / oneSynth.delay > oneSynth.freq
):
if (oneSynth.width == w) & (oneSynth.tech == tech) & (oneSynth.module == mod):
if (oneSynth.delay < m) & (1000 / oneSynth.delay > oneSynth.freq):
m = oneSynth.delay
best = oneSynth
if (best is not None) & (best not in bestSynths):
bestSynths += [best]
file = open(filename, "w")
with open(filename, "w") as file:
writer = csv.writer(file)
writer.writerow(
[
@ -201,7 +191,6 @@ def csvOfBest(filename):
)
for synth in bestSynths:
writer.writerow(list(synth))
file.close()
return bestSynths
@ -229,7 +218,7 @@ def genLegend(fits, coefs, r2=None, spec=None, ale=False):
eq = ""
ind = 0
for k in eqDict.keys():
for k in eqDict:
if k in fits:
if str(coefsr[ind]) != "0":
eq += " + " + coefsr[ind] + eqDict[k]
@ -277,10 +266,7 @@ def oneMetricPlot(
modFit = fitDict[module]
fits = modFit[ale]
if freq:
ls = "--"
else:
ls = "-"
ls = "--" if freq else "-"
for spec in techSpecs:
# print(f"Searching for module of spec {spec} and module {module} and var {var}")
@ -403,7 +389,7 @@ def makeCoefTable():
"""writes CSV with each line containing the coefficients for a regression fit
to a particular combination of module, metric (including both techs, normalized)
"""
file = open("ppaFitting.csv", "w")
with open("ppaFitting.csv", "w") as file:
writer = csv.writer(file)
writer.writerow(
["Module", "Metric", "Target", "1", "N", "N^2", "log2(N)", "Nlog2(N)", "R^2"]
@ -436,8 +422,6 @@ def makeCoefTable():
row = [module, var, target] + coefsToWrite + [r2]
writer.writerow(row)
file.close()
def sigfig(num, figs):
return "{:g}".format(float("{:.{p}g}".format(num, p=figs)))
@ -447,7 +431,7 @@ def makeEqTable():
"""writes CSV with each line containing the equations for fits for each metric
to a particular module (including both techs, normalized)
"""
file = open("ppaEquations.csv", "w")
with open("ppaEquations.csv", "w") as file:
writer = csv.writer(file)
writer.writerow(
[
@ -486,9 +470,6 @@ def makeEqTable():
row = [module] + eqs
writer.writerow(row)
file.close()
def genFuncs(fits="clsgn"):
"""helper function for regress()
returns array of functions with one for each term desired in the regression fit
@ -819,10 +800,7 @@ def stdDevError():
norm = techdict[var]
metL += [m / norm for m in metric]
if ale:
ws = [w / normAddWidth for w in widths]
else:
ws = widths
ws = [w / normAddWidth for w in widths] if ale else widths
ws = ws * 2
mat = []
for w in ws:
@ -896,7 +874,7 @@ if __name__ == "__main__":
"flop": ["c", "l", "l"],
"binencoder": ["cg", "l", "l"],
}
fitDict.update(dict.fromkeys(["mux2", "mux4", "mux8"], ["cg", "l", "l"]))
fitDict.update({key: ["cg", "l", "l"] for key in ["mux2", "mux4", "mux8"]})
TechSpec = namedtuple("TechSpec", "tech color shape delay area lpower denergy")
# FO4 delay information information

View file

@ -21,8 +21,7 @@ args=parser.parse_args()
fin_path = glob.glob(f"{os.getenv('WALLY')}/src/**/{args.DESIGN}.sv",recursive=True)[0]
fin = open(fin_path)
with open(fin_path) as fin:
lines = fin.readlines()
# keeps track of what line number the module header begins
@ -35,7 +34,7 @@ lineModuleEnd = 0
moduleName = ""
# string that will keep track of the running module header
buf = "import cvw::*;\n`include \"config.vh\"\n`include \"parameter-defs.vh\"\n"
buf = 'import cvw::*;\n`include "config.vh"\n`include "parameter-defs.vh"\n'
# are we writing into the buffer
writeBuf=False
@ -63,11 +62,5 @@ buf += f"\t{moduleName} #(P) dut(.*);\nendmodule"
# path to wrapper
wrapperPath = f"{args.HDLPATH}/{moduleName}wrapper.sv"
fout = open(wrapperPath, "w")
with open(wrapperPath, "w") as fout:
fout.write(buf)
fin.close()
fout.close()
#print(buf)

View file

@ -7,10 +7,7 @@ import argparse
def runSynth(config, mod, tech, freq, maxopt, usesram):
global pool
if (usesram):
prefix = "syn_sram_"
else:
prefix = "syn_"
prefix = "syn_sram_" if usesram else "syn_"
cfg = prefix + config
command = f"make synth DESIGN=wallypipelinedcore CONFIG={cfg} MOD={mod} TECH={tech} DRIVE=FLOP FREQ={freq} MAXOPT={maxopt} USESRAM={usesram} MAXCORES=1"
pool.map(mask, [command])

View file

@ -6,7 +6,8 @@ import fileinput
address = 0
for line in fileinput.input('-'):
with fileinput.input('-') as f:
for line in f:
# the 14- is to reverse the byte order to little endian
formatedLine = ' '.join(line[14-i:14-i+2] for i in range(0, len(line), 2))
sys.stdout.write(f'@{address:08x} {formatedLine:s}\n')

View file

@ -40,9 +40,9 @@ mem_addr = mem_start_addr
def wl(line="", comment=None, fname=test_name):
with open(fname, "a") as f:
instr = False if (":" in line or
instr = not (":" in line or
".align" in line or
"# include" in line) else True
"# include" in line)
indent = 6 if instr else 0
comment = "// " + comment if comment is not None else ""
to_write = " " * indent + line + comment + "\n"

View file

@ -76,11 +76,8 @@ def create_vectors(my_config):
operation = my_config.op_code
rounding_mode = "X"
flags = "XX"
# use name to create our new tv
dest_file = open(f"{dest_dir}cvw_{my_config.bits}_{vector1[:-2]}.tv", 'w')
# open vectors
src_file1 = open(source_dir1 + vector1)
src_file2 = open(source_dir2 + vector2)
# use name to create our new tv and open vectors
with open(f"{dest_dir}cvw_{my_config.bits}_{vector1[:-2]}.tv", 'w') as dest_file, open(source_dir1 + vector1) as src_file1, open(source_dir2 + vector2) as src_file2:
# for each test in the vector
reading = True
src_file2.readline() #skip first bc junk
@ -196,7 +193,7 @@ def create_vectors(my_config):
answer = src_file2.readline().strip()
# print(f"Answer: {answer}")
#print(answer1,answer2)
if not (answer == "6f5ca309"): # if there is still stuff to read
if answer != '6f5ca309': # if there is still stuff to read
# parse through .S file
detected = False
done = False
@ -285,9 +282,6 @@ def create_vectors(my_config):
# print("read false")
reading = False
# print("out")
dest_file.close()
src_file1.close()
src_file2.close()
config_list = [
Config(32, "M", "div", "div-", 0),

View file

@ -39,10 +39,8 @@ for vector in div_vectors:
config_list = vector.split(".")[0].split("_")
operation = "1" #float div
rounding_mode = round_dict[str(config_list[2])]
# use name to create our new tv
dest_file = open(dest_dir + "cvw_" + vector, 'a')
# open vector
src_file = open(source_dir + vector)
# use name to create our new tv and open vector
with open(dest_dir + "cvw_" + vector, 'a') as dest_file, open(source_dir + vector) as src_file:
# for each test in the vector
for i in src_file.readlines():
translation = "" # this stores the test that we are currently working on
@ -50,8 +48,6 @@ for vector in div_vectors:
# put it all together, strip nec for removing \n on the end of the flags
translation = f"{operation}_{ext_bits(input_1)}_{ext_bits(input_2)}_{ext_bits(answer)}_{flags.strip()}_{rounding_mode}"
dest_file.write(translation + "\n")
dest_file.close()
src_file.close()
print("creating testfloat sqrt test vectors")
@ -64,10 +60,8 @@ for vector in sqrt_vectors:
config_list = vector.split(".")[0].split("_")
operation = "2" #sqrt
rounding_mode = round_dict[str(config_list[2])]
# use name to create our new tv
dest_file = open(dest_dir + "cvw_" + vector, 'a')
# open vector
src_file = open(source_dir + vector)
# use name to create our new tv and open vector
with open(dest_dir + "cvw_" + vector, 'a') as dest_file, open(source_dir + vector) as src_file:
# for each test in the vector
for i in src_file.readlines():
translation = "" # this stores the test that we are currently working on
@ -75,5 +69,3 @@ for vector in sqrt_vectors:
# put it all together, strip nec for removing \n on the end of the flags
translation = "{}_{}_{}_{}_{}_{}".format(operation, ext_bits(input_1), "X"*32, ext_bits(answer), flags.strip(), rounding_mode)
dest_file.write(translation + "\n")
dest_file.close()
src_file.close()