2026-01-28 06:16:04 +00:00

719 lines
30 KiB
Python

#! /usr/bin/env python3
"""Regression interface.
Usage:
regression.py (-h | --help)
regression.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
import os
import re
from pathlib import Path
from dict_recursive_update import recursive_update
import pandas as pd
from datetime import datetime
import pytz
import commentjson as json
import sys_flow_v2.flow_utils as futils
import sys_flow_v2.flow_constants as fconsts
import sys_flow_v2.gen_regression_json as gen_reg_json
import snoop
DEBUG = True if os.environ.get("REGRESSION_DEBUG", False) else False
snoop.install(enabled=DEBUG)
def add_prefix(dir_old, dir_base):
"""Help to normalize to full path."""
if dir_old[0] == "/":
return dir_old
else:
return "{}/{}".format(dir_base, dir_old)
# TODELETE: havnt use for years.
def open_nested_json(jsonfile):
"""Load nested json into one.
If a value is "nested_xx.json",
it will be replaced with the content of this json.
"""
jsonconfig = {}
with open(jsonfile) as json_file:
jsonconfig = json.load(json_file)
for k, v in jsonconfig.items():
# replace "nested_xxx.json" to real json
if type(v) == str and v.startswith("nested_") and v.endswith(".json"):
fv = os.path.dirname(jsonfile) + "/" + v
jsonconfig[k] = open_nested_json(fv)
return jsonconfig
def get_ip_eval_bw(fn_json, hw_mode):
"""Get bitwidth info from json."""
with open(fn_json, "r") as f:
j = json.load(f)
if hw_mode in [520]:
bw_weight = "weight_bandwidth_GBps"
bw_rdma = "DMA_Bandwith_GBps"
bw_wdma = "DMA_Bandwith_GBps"
else:
bw_weight = "GETW_bandwidth_GB/s"
bw_rdma = "RDMA_bandwidth_GB/s"
bw_wdma = "WDMA_bandwidth_GB/s"
results = {}
results["bw_weight"] = j.get(bw_weight, None)
results["bw_rdma"] = j.get(bw_rdma, None)
results["bw_wdma"] = j.get(bw_wdma, None)
return results
class regression:
"""Regression class is for whole regression, parse configs, preparations.
The config will passed to each test cases.
"""
def __init__(self, fn_json, bin_ready=True, keywords=None):
"""Init configs for this regression run."""
# if not ready, will skip some checkings
self.bin_ready = bin_ready
self.load_user_config(fn_json)
self.set_debug()
self.set_report_path()
self.check_platform()
self.check_regression_config()
self.check_path_config()
self.check_dynasty_config()
self.check_compiler_config()
self.check_knerex_config()
self.check_knerex_dumplevel()
self.check_snr_config()
self.check_nef()
self.load_model_id()
self.load_snr_reference()
self.print()
self.update_info()
self.sanity_check()
# at end of init, regression.config are ready to use.
def load_user_config(self, fn_json):
"""Load configs and combine for this regression.
This method handles two main tasks:
1. Updates self.config: The actual configuration that will be used to run regression
2. Sets self.fn_config_json: Only used in reports to indicate which json was used
- For dict input: set to "json_in_memory"
- For file input: set to the resolved path
"""
# 根据 fn_json 类型设置配置路径
if isinstance(fn_json, dict):
self.fn_config_json = "json_in_memory"
config_new = fn_json
else:
self.fn_config_json = Path(fn_json).resolve()
config_new = open_nested_json(fn_json)
self.load_default_config()
recursive_update(self.config, config_new)
def load_default_config(self):
"""Load the default config of regression first."""
# p_flow will be released
self.p_flow = Path(__file__).resolve().parent
# p_regression will not be released (internal use only)
self.p_regression = self.p_flow.parent
p_default = self.p_flow / "config_default.json"
if not p_default.exists():
raise FileNotFoundError
with open(p_default, "r") as f:
self.config = json.load(f)
def set_debug(self):
"""Prepare for debug mode."""
if DEBUG:
self.config["regression"]["logging_level"] = "DEBUG"
self.config["regression"]["print_command"] = True
self.config["regression"]["print_error"] = True
def check_platform(self):
"""Check any dynasty mode in 520 / 720 / 530.
test cases can refer to:
- 520 in config["hw_mode_on"]
- for hw_mode in config["hw_mode_on"] where they are on for sure.
"""
self.config["hw_mode_on"] = []
for hw_mode in fconsts.MODE_HARDWARE:
# for each platform (520/720/530)
# if any 520 / 520dq / 520wq / 520wqdq is true
# this hw_mode_on include 520
for k in self.config["mode_run"]:
# if ANY related model turned on
if str(hw_mode) in k and self.config["mode_run"][k]:
self.config["hw_mode_on"].append(hw_mode)
break
# convert decomp to graphopt
mode_graphopt = []
for k, v in self.config["mode_run"].items():
if k.endswith("decomp") and v:
k2 = k.replace("decomp", "graphopt")
print(f"WARNING: mode `{k}` is obsoleted. Please use `{k2}` mode.")
# TODO: raise error?
# turn graphopt
mode_graphopt.append(k2)
# patch graphopt
for m in mode_graphopt:
self.config["mode_run"][m] = True
set_on = set(self.config["hw_mode_on"])
set_all = set(fconsts.MODE_HARDWARE)
assert set_on.issubset(set_all), f"platforms: {set_on} should in {set_all}"
def check_regression_config(self):
"""Sanity check on configs, as name implies.
"USE_YOUR_OWN" must be replaced.
"""
assert self.config["tag"] != "USE_YOUR_OWN", "Please update the 'tag' in your config!"
for k in ["report", "cases"]:
assert self.config["path"][k] != "USE_YOUR_OWN", f"Please update the '{k}' in your config!"
if self.config["module_run"]["only_ip_evaluator"]:
self.logger.warning("regression run on ip evaluator only!")
for k in self.config["module_run"]:
self.config["module_run"][k] = False
for k in ["only_ip_evaluator", "gen_nef", "compiler_frontend"]:
# only two modules will run
self.config["module_run"][k] = True
self.config["compiler_piano"]["ip_evaluator"] = True
elif self.config["module_run"]["only_dongle"]:
self.logger.warning("regression run on dongle only!")
# never ever clean up
for k in self.config["pre_clean_up"]:
self.config["pre_clean_up"][k] = False
for k in self.config["module_run"]:
self.config["module_run"][k] = False
for k in ["run_nef_kneron_plus", "only_dongle"]:
# only 1 module will rune.
self.config["module_run"][k] = True
self.config["regression"]["skip_success"] = False
self.config["regression"]["only_success"] = True
if self.config["module_run"]["piano_knerex"]:
# combo: we need to run compiler frontend + knerex + compiler_piano together
self.config["module_run"]["compiler_frontend"] = True
self.config["module_run"]["gen_nef"] = True
if self.config["module_run"]["csim"]:
# usually if run csim, will do btm between dynasty vs csim.
# better prepare for PLD
# TODO for PLD
self.config["module_run"]["export_PLD_binary"] = True
def check_dynasty_config(self):
"""As name said.
NOTE:
dynasty inference will use gnu parallel. not python. so n_parallel_model and n_parallel_input is unrelated now.
"""
assert self.config["dynasty"]["regression_input"] in ["default", "all"]
assert self.config["dynasty"]["round_mode"] in [0, 1]
assert type(self.config["dynasty"]["num_input_samples"]) is int, "num_input_samples must be integer."
if self.config["dynasty"]["sample_seed"]:
assert type(self.config["dynasty"]["sample_seed"]) is int, "sample_seed must be integer"
assert self.config["dynasty"]["input_shape"] in ["onnx_shape", "channel_last"]
onnx_source = self.config["dynasty"]["piano_dynasty"]["onnx_source"]
assert onnx_source in ["piano_onnx", "piano_bie"], f"piano_dynasty/onnx_source must be piano_onnx|piano_bie, but got {onnx_source}"
def check_compiler_config(self):
"""As name said."""
assert self.config["compiler_piano"]["model_format"] in fconsts.MODEL_FORMAT, "wrong model_format: {}".format(self.config["compiler_piano"]["model_format"])
assert self.config["compiler_piano"]["model_optimize"] in fconsts.MODEL_RELEASE.keys(), "wrong model_optimze: {}".format(self.config["compiler_piano"]["model_optimize"])
# TODO: add support for "wqbi-s" / "hwbi" / etc
if self.config["compiler_piano"]["model_optimize"] in ["wqbi"]:
for hw_code in self.config["hw_mode_on"]:
self.config["mode_run"][f"{hw_code}-wqbi"] = True
if not self.config["path"]["internal"]:
# should not timeout for customer when fm_cut
# use 24 hours for now.
self.config["compiler_piano"]["timeout"] = 3600*24
if self.config["compiler_piano"]["ip_evaluator"]:
# internal debug
do_s1 = self.config["compiler_piano"]["ip_evaluator_debug"] == "stage_1"
jp = {}
ip_bw = {}
for hw_code in self.config["hw_mode_on"]:
jp[hw_code] = Path(self.config["path"]["binary"]["ip_eval"][hw_code])
if do_s1:
if self.use_toolchain:
self.logger.error("ip_evaluator_debug set to stage_1, but not supported in toolchain.")
else:
jp_s1 = jp[hw_code].parent / f"ip_eval_{hw_code}_s1.json"
self.create_ip_eval_s1(jp[hw_code], jp_s1, override=False)
jp[hw_code] = jp_s1 # replaced
ip_bw[hw_code] = get_ip_eval_bw(jp[hw_code], hw_code)
self.config["compiler_piano"]["ip_evaluator_json"] = jp
self.config["compiler_piano"]["ip_evaluator_bw"] = ip_bw
@staticmethod
def create_ip_eval_s1(fn_ori, fn_s1, override=False):
"""As name implies."""
if fn_s1.exists() and (not override):
return
with open(fn_ori, "r") as f0, open(fn_s1, "w") as f1:
j = json.load(f0)
j["detailed_info"] = True
json.dump(j, f1, indent=4, sort_keys=False)
def check_knerex_config(self):
"""Make sure knerex config is correct."""
if self.config["knerex"]["datapath_range_method"] == "percentage":
# set to 0 to save time
self.config["knerex"]["percentile"] = 0
# DELETED: bitwidth_mode/weight_4bit_enable since 0.21.1
ks = ["bitwidth_mode", "weight_4bit_enable"]
for k in ks:
if k in self.config["knerex"]:
self.logger.error(f"config: knerex/{k} is not supported. please change according to #18108.")
raise NotImplementedError
fsm = self.config["knerex"]["fixed_scale_mode"]
if fsm not in ["0", "co", "all"]:
raise ValueError(f"fixed_scale_model must be: 0, co, all, but got {fsm}")
# verify datapath_bitwidth_mode / weight_bitwidth_mode since 0.21.1
bw_wght = self.config["knerex"]["weight_bitwidth_mode"]
bw_data = self.config["knerex"]["datapath_bitwidth_mode"]
bw_in = self.config["knerex"]["model_in_bitwidth_mode"]
bw_out = self.config["knerex"]["model_out_bitwidth_mode"]
bw_cpu = self.config["knerex"]["cpu_bitwidth_mode"]
if bw_wght in ["mix_interleave_8", "mix_interleave_16"]:
raise ValueError(f"weight bw ({bw_wght}) which should be expanded to weight_bitwidth_mode (mix interleave) + weight_mix_percentile")
for hw_mode in self.config["hw_mode_on"]:
d_bw = gen_reg_json.check_bitwidth_mode(bw_data, bw_wght, bw_in, bw_out, bw_cpu, hw_mode)
# TODO: the d_bw `weight_bitwidth_mode` and `weight_mix_percentile` may need to change.
def check_knerex_dumplevel(self):
"""Knerex dump level is complicated."""
def require_quan_onnx(conf):
"""Return true if configed to run any wq related mode."""
modes_wq = [f"{hw}{md}" for hw in [520, 720, 530, 730, 630, 540] for md in ["wq", "wqdq"]]
DUMP_QUAN = 3 # 1+2
for m in modes_wq:
if futils.get_switch_value(conf, m, False):
return DUMP_QUAN
return 0
def require_bias_adjust_onnx(conf):
"""Return true if configed to run any wq related mode.
we are interested in modes, e.g.,
* 520wq-wqbi
* 520-wqbi
* 720-hwbi
* 530-hwbi-mse
In a short, they all ended with "wqbi"
"""
BIAS_ADJUST_DUMPLEVEL = {
# stage: knerex_dump_level
"wqbi": 0b10000000011, # 1+2+1024
"wqbi-s": 0b00000000111, # 1+2+4
"hwbi": 0b00000100011, # 1+2+32
"hwbi-mse": 0b00001000011, # 1+2+64
}
switch_value = 0
for bi_stage, k_switch in BIAS_ADJUST_DUMPLEVEL.items():
for k, v in conf.items():
if v and k.endswith(bi_stage):
switch_value |= k_switch
break
return switch_value
# TODELETE
# def require_decomp(conf):
# DUMP_DECOMPOSE = 0b10000000
# for k, v in conf.items():
# if v and "decomp" in k:
# return DUMP_DECOMPOSE
# return 0
def require_onnx(model_fmt):
DUMP_ONNX_AND_JSON = 0b1000
if model_fmt == "onnx":
return DUMP_ONNX_AND_JSON
else:
return 0
# if it is written as a string, convert it to number.
# e.g., "11" -> 3
if "dump_level" in self.config["knerex"]:
if type(self.config["knerex"]["dump_level"]) is str:
self.config["knerex"]["dump_level"] = int(self.config["knerex"]["dump_level"], 2)
# for scaled.quantized onnx
self.config["knerex"]["dump_level"] |= require_quan_onnx(self.config["mode_run"])
# will run any bias_adjust stages?
any_bi = require_bias_adjust_onnx(self.config["mode_run"])
self.config["knerex"]["dump_level"] |= any_bi
self.config["module_run"]["any_bi_enable"] = any_bi > 0
# check DECOMP # no more decomp after 0.26.0
# self.config["knerex"]["dump_level"] |= require_decomp(self.config["mode_run"])
# check ONNX+JSON
self.config["knerex"]["dump_level"] |= require_onnx(self.config["compiler_piano"]["model_format"])
def check_snr_config(self):
"""As name said."""
# should I do SNR?
# NOTE: need to rethink relationship between dynasty and snr calculation.
if not self.config["module_run"]["snr_calculation"]:
d_modes_run = [f"mode_{k}_piano" for k, v in self.config["mode_run"].items() if v]
if self.config["module_run"]["piano_dynasty"] and fconsts.contain_valid_snr_pairs(d_modes_run):
# if run dynasty_float only, then no need to run SNR_calculation
# if run float and 520, then need to run SNR_calculation
self.config["module_run"]["snr_calculation"] = True
else:
# if the snr_calculation is True already, run it anyway!
pass
if self.config["module_run"]["snr_calculation"] and self.config["module_run"]["verify_decomp_snr"]:
self.config["mode_run"]["float"] = True
for hw_mode in self.config["hw_mode_on"]:
self.config["mode_run"][f"{hw_mode}graphopt"] = True
# here are the reports for snr related.
# the snr reports are seperated for better view.
self.snr_csv = {}
self.snr_csv["bias_adjust"] = f"{self.p_report}/{self.run_tag}_bias_adjust_snr.csv"
for snr_target in list([str(a) for a in fconsts.MODE_HARDWARE] + list(fconsts.SNR_BI_IMPROVE.keys())):
self.snr_csv[snr_target] = f"{self.p_report}/{self.run_tag}_{snr_target}_snr.csv"
for col in self.config["snr"]["report_snr_col"]:
assert col in fconsts.SNR_REPORT_COLS, f"wrong snr col: {col}, must be: {fconsts.SNR_REPORT_COLS}"
# after dynasty reference, all snr pairs are computed.
# but for final SNR report, we are interested in ONLY 1 ref + 1 deg
# e.g, float vs 520, 530graphopt vs 530-wqbi
# there may be two type of reference: float or 520graphopt (preferred!)
# the deg may be scaled/wqbi/hwbi/etc
self.config["snr"]["ref"] = {}
self.config["snr"]["deg"] = {}
for hw_mode in self.config["hw_mode_on"]:
# prioriy: 520graphopt > float
refs_all = [f"{hw_mode}graphopt", "float"]
refs_on = [ref for ref in refs_all if futils.get_switch_value(self.config["mode_run"], ref, False)]
if len(refs_on) == 0:
if not self.config["module_run"]["only_ip_evaluator"]:
self.logger.error(f"ERROR: some {hw_mode} related modes on. but BOTH float or {hw_mode}graphopt are OFF. SNR report may fail.")
# make reference as float but which is not turned on yet.
self.config["snr"]["ref"][hw_mode] = "float"
# TODO: should I force mode_run/float on?
else:
self.config["snr"]["ref"][hw_mode] = refs_on[0]
model_opt = futils.get_switch_value(self.config["compiler_piano"], "model_optimize", "scaled")
deg = f"{hw_mode}{fconsts.MODEL_RELEASE[model_opt]}"
if not futils.get_switch_value(self.config["mode_run"], deg, False):
self.logger.error(f"ERROR: mode_run / {deg} not turned on, but compiler was set to use {deg}. ")
self.logger.error(f"ERROR: If the test cases does not have {deg} bie exists, snr verification / compiler will fail!!!")
self.logger.info("Suggest: Change compile/model_optimize to scaled.")
# TODO: what if not turned on?
self.config["snr"]["deg"][hw_mode] = deg
def check_nef(self):
"""Nef inference with kneron+ is internal only."""
if not self.config["path"]["internal"]:
run_kneron_plus = self.config["module_run"]["run_nef_kneron_plus"]
assert not run_kneron_plus, "kneron plus is for kneron internal use."
def load_snr_reference(self):
"""Load snr reference.
NOTE: to update latest snr record,
just save the csv to snr_reference folder, then update the link to latest
"""
p_snr = Path("/opt/data/piano_bin_cache/reg_configs/snr_reference")
if self.config["path"]["internal"] and "big_model" in self.config["path"]["cases"] and p_snr.exists():
# only has snr reference for big models.
def clean_snr(s):
return [s1.split("//")[0] for s1 in s]
def load_csv(fn_snr):
df = pd.read_csv(fn_snr)
df["name"] = df.apply(lambda x: futils.clean_case_name(x.case), axis=1)
df.set_index("name", inplace=True)
col_snr = [a for a in df.columns if "SNR" in a]
return df[col_snr].apply(clean_snr, axis=0).transpose().to_dict()
snr_ref = {}
for fn_snr in p_snr.glob("*.csv"):
snr_ref.update(load_csv(fn_snr))
else:
snr_ref = None
self.config["snr_ref"] = snr_ref
def set_report_path(self):
"""Set up report details for this regression: timestamp, filenames."""
# set timestamp
def get_timestamp():
try:
# in bash: export regression_timestamp=$(date +%Y%m%d_%H%M%S)
# if set bash, multiple regression may share SAME timestamp which will appeare in same report
timestamp = os.environ["regression_timestamp"]
except KeyError:
timezone = pytz.timezone("America/Los_Angeles")
timestamp = datetime.now(timezone).strftime("%Y%m%d_%H%M%S")
return timestamp
def patch_report_path():
"""Make CI could specify branch."""
p1 = os.environ.get("REGRESSION_REPORT", None)
if p1 is not None:
p_report_base = Path(p1)
if p_report_base.exists():
# override by shell environ
self.config["path"]["report"] = p_report_base
return p_report_base
return Path(self.config["path"]["report"])
self.config["timestamp"] = self.timestamp = get_timestamp()
run_tag = self.config.get("tag", "kneron_flow")
self.config["run_tag"] = self.run_tag = run_tag
# all configs are in same folder of timestamp.
p_report = patch_report_path() / self.timestamp
p_report.mkdir(parents=True, exist_ok=True)
self.p_report = p_report
self.log_file = p_report / f"{run_tag}_regression.info"
self.logger = futils.create_logger("config", None, self.config["regression"]["logging_level"])
self.logger.info("checking regression configs.")
# this is the report for bit-true-match
self.report_csv = p_report / f"{run_tag}_status.csv"
self.commit_info = []
def get_docker_version(self):
"""As name implied."""
try:
hostname = os.environ["HOST_HOSTNAME"]
except:
hostname = os.environ["HOSTNAME"]
p_fn = Path("/workspace/version.txt")
if p_fn.exists():
is_inside_docker = True
with open(str(p_fn), "r") as f:
docker_version = f.readline().strip()
else:
docker_version = "N/A"
is_inside_docker = False
return hostname, is_inside_docker, docker_version
def load_model_id(self):
"""Load predefined model-id for internal use. debug only."""
p_record = Path("/opt/data/model_source/big_model/model_id.pkl")
if p_record.exists():
map_model_id = futils.pkl2df(p_record)
else:
map_model_id = {}
self.config["map_model_id"] = map_model_id
def check_binary_set(self):
"""Sometime we want to specify a folder of prebuild binary to run."""
bin_set = fconsts.BIN_SET
bin_set.update(self.get_regression_scripts())
# bin_set contains more info for internal regression. so put into config/path/binary
def get_compiler_commit_from_print():
"""Get compiler commit by calling the compiler binary itself."""
try:
bin_dir = bin_set["compiler"]["bin_dir"]
lib_dir = bin_set["compiler"]["lib_dir"]
bin_compiler = bin_set["compiler"]["compiler"]
cmd = f"""export COMPILER_BIN_DIR={bin_dir} && \
export LD_LIBRARY_PATH="{lib_dir}:${{LD_LIBRARY_PATH}}" && \
export KNERON_COMPILER_PATH={bin_compiler} && \
{bin_compiler} -v"""
cp = futils.run_bash_script(cmd)
search = re.compile(": (.*?)\)$")
return search.findall(cp.stderr.strip())[0]
except:
return "temp"
# outputs
self.config["path"]["binary"] = bin_set
self.commit_info.append(f"{fconsts.bin_msg}\n")
self.config["path"]["use_prebuild"] = fconsts.is_use_prebuild
self.config["path"]["use_piano"] = fconsts.is_use_piano
self.config["path"]["use_toolchain"] = fconsts.is_use_toolchain
# to used in calling batch compiler v1
self.config["path"]["compiler_commit"] = get_compiler_commit_from_print()
self.config["path"]["kneron_piano_commit"] = fconsts.PIANO_COMMIT
def check_path_config(self):
"""Find out realpath for case folder.
sometime search in linked folder does not work.
"""
self.config["path"]["cases"] = os.path.realpath(self.config["path"]["cases"])
self.config["path"]["flow"] = self.p_flow
self.config["path"]["regression"] = self.p_regression
self.config["path"]["template"] = self.p_flow / "template"
hostname, is_inside_docker, docker_version = self.get_docker_version()
self.config["path"]["is_inside_docker"] = is_inside_docker
self.config["path"]["toolchain"] = {"version": docker_version}
self.config["path"]["hostname"] = hostname
self.config["path"]["fn_config"] = f"@{hostname}:{self.fn_config_json}"
self.commit_info.append(f"docker, version: {docker_version}\n")
self.commit_info.append(f"""config: {self.config["path"]["fn_config"]}\n""")
self.check_binary_set()
if self.bin_ready:
self.check_binary_exist()
def get_regression_scripts(self):
"""Find internal regression needed scripts."""
d = {}
d1 = {}
d1["dump.py"] = self.p_flow / "util" / "dump_tflite_results.py"
d1["onnxruntime.py"] = self.p_flow / "util" / "dump_onnxruntime_results.py"
d["tflite"] = d1
if self.config["path"]["internal"]:
d2 = {}
d2["nef_client.py"] = self.p_flow / "../nef_utils/inference_client.py"
d["nef"] = d2
else:
d["nef"] = None
return d
def sanity_check(self):
"""Sanity check on misc."""
if "repo" in self.config:
self.logger.error("\"repo\" in config is obsoleted! please remove from your config to avoid confusing.")
def check_binary_exist(self):
"""Check binary of each repo.
as there are different settings, only check the minimum part.
toolchain has only 520/720 related settings.
"""
# knerex
if self.config["module_run"]["piano_knerex"]:
fn_check = self.config["path"]["binary"]["knerex"]["normal"]
assert Path(fn_check).exists(), fn_check
# dynasty
# same repo as knerex. it should exisit at same time.
if self.config["module_run"]["piano_dynasty"]:
fn_check = self.config["path"]["binary"]["dynasty"]["binary"]
assert Path(fn_check).exists(), fn_check
# compiler
if self.config["module_run"]["compiler_piano"]:
fn_check = self.config["path"]["binary"]["data_converter"]["v2"]
assert Path(fn_check).exists(), fn_check
for k in ["compiler", "batch_compiler", "gen_py"]:
# NOTE: k = "gen_test_conf_py" is for 630/730 only. so only should exist in compiled-version, not in toolchain
v = self.config["path"]["binary"]["compiler"][k]
# examine compiler / gen_py / batch_compiler
assert Path(v).exists(), v
# TODO: compiler must be compiled as -DCMAKE_BUILD_TYPE=Debug
# csim, different binary for diff platform
if self.config["module_run"]["csim"]:
for hw_code in self.config["hw_mode_on"]:
fn_check = self.config["path"]["binary"]["csim"][hw_code]
assert Path(fn_check).exists(), fn_check
def filter_cases(self, cli_kw=None):
"""Find all test cases, but will filter with ALL keywords.
1. pass in from command line. will be obsoleted
2. put in json: path->search: ["k1", "k2", "k3"]
"""
# TODO: call
keywords = self.config["path"]["search"]
if type(cli_kw) is list:
keywords.extend(cli_kw)
dir_base = Path(self.config["path"]["cases"])
case_selected, case_all, case_success, case_not_success = futils.filter_cases(dir_base, keywords)
self.logger.debug(f"Selected {len(case_selected)}/{len(case_all)} cases.")
if self.config["regression"]["skip_success"]:
# for case file without output/success file.
return case_not_success
if self.config["regression"]["only_success"]:
# for those case WITH output/success file (empty though)
# useful for follow up stage like: only_dongle.
return case_success
return case_selected
def update_info(self):
"""Save important information to .info file PER RUN.
This info may be displayed in html reports.
"""
hostname = self.config["path"]["hostname"]
p_tc = self.config["path"]["cases"]
self.commit_info.append(f"test case path: @{hostname}:{p_tc}\n")
fmt = self.config["compiler_piano"]["model_format"]
opt = self.config["compiler_piano"]["model_optimize"]
self.commit_info.append(f"bit-true-match are using: *.{opt}.{fmt}\n")
def write_info(self):
"""Run when regression finish. with run time updated."""
with open(self.log_file, "w") as f:
f.writelines(self.commit_info)
def print(self):
"""Print function for this regression."""
self.logger.info(f"running time tag is: {self.timestamp}/{self.run_tag}")
self.logger.info("test case folder is: {}".format(self.config["path"]["cases"]))
if __name__ == "__main__":
arguments = docopt(__doc__, version="regression 6.0")
print(arguments)