#!/usr/bin/env python3 import argparse from cmath import nan import json import copy import os import sys import subprocess import shutil import get_run_val as rval from enum import Enum, auto OPT_NMEM_CONFLICT_VAL = False # used for HAPS or real chip DRAM_BASE_SHIFT = 0 # DRAM_BASE_SHIFT = 0x60000000 class NoValue(Enum): def __repr__(self): return '<%s.%s>' % (self.__class__.__name__, self.name) class CType(NoValue): MOZART = "520" BEETHOVEN = "720" BACH = "530" SCHUBERT = "630" WAGNER = "730" DVORAK = "540" class Version520(NoValue): SINGLE = "single" MULTI = "multi" MODEL = "model" MODEL_OPT = "model_opt" MODEL_REL = "model_rel" MODEL_DBG = "model_dbg" MODEL_DBG_EDA = "model_dbg_eda" class Version720(NoValue): SINGLE = "single" MULTI = "multi" MODEL = "model" MODEL_OPT = "model_opt" MODEL_REL = "model_rel" MULTI_DBG = "multi_dbg" MODEL_DBG = "model_dbg" MODEL_DBG_EDA = "model_dbg_eda" MODEL_ENC = "model_enc" MODEL_CPU = "model_cpu" """ Versions for 530, 630 and 730. """ class VersionX30(NoValue): SINGLE = "single" MULTI = "multi" MODEL = "model" MODEL_OPT = "model_opt" MODEL_REL = "model_rel" MULTI_DBG = "multi_dbg" MODEL_DBG = "model_dbg" MODEL_DBG_EDA = "model_dbg_eda" MODEL_ENC = "model_enc" MODEL_CPU = "model_cpu" def dbg_print_args(): bin = os.path.basename(sys.argv[0]) args = {" ".join(sys.argv[1:])} print(f"=> [{bin}] arguments: {args}") def load_json_file(path): with open(path) as f: return json.load(f) def to_mebibyte(hex_value): if hex_value is None: return None return int(hex_value, 16) / (2 ** 20) def to_fw_addr(size): return f"{hex(size)}00000" def get_version_cfg(ctype, version): bcfg = get_basic_cfg(ctype) if ctype == CType.MOZART.value: config = get_520_version_cfg(bcfg, version) elif ctype == CType.BEETHOVEN.value: config = get_720_version_cfg(bcfg, version) elif ctype == CType.BACH.value: config = get_530_version_cfg(bcfg, version) elif ctype == CType.SCHUBERT.value: config = get_630_version_cfg(bcfg, version) elif ctype == CType.WAGNER.value: config = get_730_version_cfg(bcfg, version) elif ctype == CType.DVORAK.value: config = get_540_version_cfg(bcfg, version) return config def get_basic_cfg(ctype): BASIC_CFG_520 = { "cmd_addr": "0x000000", "input_addr": "0x300000", "output_addr": "0x800000", "dram_base_addr": "0x1700000", "weight_addr": "0x3500000", "sram_size": 512, "cmd_optimize": False, "debug_mode": False, "skip_backend": False, "hw_sim": True, "input_fmt": "seq8", "dynamic_fp": True, "dynamic_json_path": "XXXXX.json", } BASIC_CFG_720 = { "dram_base_addr": "0x10000", "dedicated_outbuf": True, "sram_size": 1024, "weight_compact": False, "weight_compress": False, "dynamic_fp": True, "dynamic_json_path": "XXXXX.json", "skip_backend": False, "debug_mode": False, "gen_setup_fbs": True, "optimize": { "cmd_size": False, "dma": False, "getw": False, "pooling": False, "parallel": False }, } BASIC_CFG_530 = { "dram_base_addr": "0x80000000", "dedicated_outbuf": True, "sram_size": 512, "weight_compact": False, "dynamic_fp": True, "dynamic_json_path": "XXXXX.json", "skip_backend": False, "debug_mode": False, "optimize": { "cmd_size": False, "dma": False, "getw": False, "pooling": False, "parallel": False }, "enable_ncore": True, } BASIC_CFG_630 = { "dram_base_addr": "0x80000000", "dedicated_outbuf": True, "sram_size": 256, "weight_compact": False, "weight_compress": False, "dynamic_fp": True, "dynamic_json_path": "XXXXX.json", "skip_backend": False, "debug_mode": False, "optimize": { "cmd_size": False, "dma": False, "getw": False, "pooling": False, "parallel": False, "nmem_conflict": False }, "enable_ncore": True, } BASIC_CFG_730 = { "dram_base_addr": "0x80000000", "dedicated_outbuf": True, "sram_size": 1024, "weight_compact": False, "weight_compress": False, "dynamic_fp": True, "dynamic_json_path": "XXXXX.json", "skip_backend": False, "debug_mode": False, "optimize": { "cmd_size": False, "dma": False, "getw": False, "pooling": False, "parallel": False, "nmem_conflict": False }, "enable_ncore": True, } BASIC_CFG_540 = { "dram_base_addr": "0x80000000", "dedicated_outbuf": True, "sram_size": 512, "weight_compact": False, "weight_compress": False, "dynamic_fp": True, "dynamic_json_path": "XXXXX.json", "skip_backend": False, "debug_mode": False, "optimize": { "cmd_size": False, "dma": False, "getw": False, "pooling": False, "parallel": False, "nmem_conflict": False }, "enable_ncore": True, } if ctype == CType.MOZART.value: return BASIC_CFG_520 elif ctype == CType.BEETHOVEN.value: return BASIC_CFG_720 elif ctype == CType.BACH.value: return BASIC_CFG_530 elif ctype == CType.SCHUBERT.value: return BASIC_CFG_630 elif ctype == CType.WAGNER.value: return BASIC_CFG_730 elif ctype == CType.DVORAK.value: return BASIC_CFG_540 else: raise ValueError(f"invalid versionctype [{ctype}]") def get_520_version_cfg(basic, version): SINGLE_CFG = merge_dict(basic, { "single_layer_test": True, }) MULTI_CFG = merge_dict(basic, { "single_layer_test": False, }) MODEL_CFG = merge_dict( basic, { # "input_fmt": "seq8", "input_fmt": "rgba8", }) MODEL_OPT_CFG = merge_dict(MODEL_CFG, {"cmd_optimize": True}) MODEL_REL_CFG = merge_dict( MODEL_OPT_CFG, { "dram_base_addr" : "0x60000000", "input_fmt": "rgba8", "hw_sim": False, }, ) MODEL_DBG_CFG = merge_dict(MODEL_CFG, { "debug_mode": True, }) MODEL_DBG_EDA_CFG = MODEL_DBG_CFG if version == "single": return SINGLE_CFG elif version == "multi": return MULTI_CFG elif version == "model": return MODEL_CFG elif version == "model_opt": return MODEL_OPT_CFG elif version == "model_rel": return MODEL_REL_CFG elif version == "model_dbg" or version == "model_dbg_eda": return MODEL_DBG_CFG else: raise ValueError(f"invalid version [{version}]") def get_720_version_cfg(basic, version): SINGLE_CFG = merge_dict(basic, { "single_layer_test": True }) MULTI_CFG = merge_dict(basic, {"optimize": { "getw": True, "parallel": True }}) MODEL_CFG = merge_dict( basic, { "optimize": { "dma": True, "getw": True, "pooling": True, "parallel": True } }) MODEL_OPT_CFG = merge_dict(MODEL_CFG, {"optimize": { "cmd_size": True }}) MODEL_REL_CFG = merge_dict(MODEL_OPT_CFG, { "dram_base_addr": "0x80220000", # = 2050.125MiB }) MULTI_DBG_CFG = merge_dict(MULTI_CFG, { "debug_mode": True, }) MODEL_DBG_CFG = merge_dict(MODEL_CFG, { "debug_mode": True, }) MODEL_DBG_EDA_CFG = merge_dict(MODEL_CFG, {"optimize": { "getw": False, }}) MODEL_ENC_CFG = merge_dict( MODEL_CFG, { "weight_compress": True, "encryption_flag": True, "encryption_key": "0x1234", # any file with size > 256 bytes can be used "encryption_file": os.path.realpath(__file__), }) if version == "single": return SINGLE_CFG elif version == "multi": return MULTI_CFG elif version == "model": return MODEL_CFG elif version == "model_opt" or version == "model_cpu": return MODEL_OPT_CFG elif version == "model_rel": return MODEL_REL_CFG elif version == "multi_dbg": return MULTI_DBG_CFG elif version == "model_dbg": return MODEL_DBG_CFG elif version == "model_dbg_eda": return MODEL_DBG_EDA_CFG elif version == "model_enc": return MODEL_ENC_CFG else: raise ValueError(f"invalid version [{version}]") def get_530_version_cfg(basic, version): SINGLE_CFG = merge_dict(basic, { "single_layer_test": True, "enable_ncore": False }) MULTI_CFG = merge_dict(basic, {"optimize": { "getw": True, "pooling": True, "parallel": True }}) MODEL_CFG = merge_dict( basic, { "optimize": { "dma": True, "getw": True, "pooling": True, "parallel": True } }) MULTI_DBG_CFG = merge_dict(MULTI_CFG, { "debug_mode": True, }) MODEL_DBG_CFG = merge_dict( MODEL_CFG, { "debug_mode": True, }) MODEL_DBG_EDA_CFG = merge_dict(MODEL_CFG, {"optimize": { "getw": False, }}) MODEL_OPT_CFG = merge_dict(MODEL_CFG, {"optimize": { "cmd_size": True }}) MODEL_REL_CFG = merge_dict(MODEL_OPT_CFG, { "dram_base_addr": "0x80220000", }) MODEL_ENC_CFG = merge_dict( MODEL_CFG, { "encryption_flag": True, "encryption_key": "0x1234", "encryption_file": "/home/rick/working/kdp720_compiler/resource/keys.bin", }) if version == "single": return SINGLE_CFG elif version == "multi": return MULTI_CFG elif version == "model": return MODEL_CFG elif version == "model_opt" or version == "model_cpu": return MODEL_OPT_CFG elif version == "model_rel": return MODEL_REL_CFG elif version == "multi_dbg": return MULTI_DBG_CFG elif version == "model_dbg": return MODEL_DBG_CFG elif version == "model_dbg_eda": return MODEL_DBG_EDA_CFG elif version == "model_enc": return MODEL_ENC_CFG else: raise ValueError(f"invalid version [{version}]") def get_630_version_cfg(basic, version): basic = update_run_val(basic) SINGLE_CFG = merge_dict(basic, { "single_layer_test": True, "enable_ncore": False }) MULTI_CFG = merge_dict(basic, {"optimize": { "getw": True, "pooling": True, "parallel": True, "nmem_conflict": OPT_NMEM_CONFLICT_VAL, }}) MODEL_CFG = merge_dict(basic, { "optimize": { "dma": True, "getw": True, "pooling": True, "parallel": True, "nmem_conflict": OPT_NMEM_CONFLICT_VAL, } }) MULTI_DBG_CFG = merge_dict(MULTI_CFG, { "debug_mode": True, }) MODEL_DBG_CFG = merge_dict(MODEL_CFG, { "debug_mode": True, # yolov3_keras_3c_608w_608h: needs large data_buffer }) MODEL_DBG_EDA_CFG = merge_dict(MODEL_CFG, { "optimize": { "getw": False, } }) MODEL_OPT_CFG = merge_dict(MODEL_CFG, { "optimize": { "cmd_size": True } }) MODEL_REL_CFG = merge_dict(MODEL_OPT_CFG, { "dram_base_addr":"0x2000000", }) MODEL_ENC_CFG = merge_dict(MODEL_CFG, { "weight_compress": True, "encryption_flag": True, "encryption_key": "0x1234", "encryption_file": "/home/rick/working/kdp720_compiler/resource/keys.bin", }) if version == "single": return SINGLE_CFG elif version == "multi": return MULTI_CFG elif version == "model": return MODEL_CFG elif version == "model_opt" or version == "model_cpu": return MODEL_OPT_CFG elif version == "model_rel": return MODEL_REL_CFG elif version == "multi_dbg": return MULTI_DBG_CFG elif version == "model_dbg": return MODEL_DBG_CFG elif version == "model_dbg_eda": return MODEL_DBG_EDA_CFG elif version == "model_enc": return MODEL_ENC_CFG else: raise ValueError(f"invalid version [{version}]") def get_730_version_cfg(basic, version): basic = update_run_val(basic) SINGLE_CFG = merge_dict(basic, { "single_layer_test": True, "enable_ncore": False }) MULTI_CFG = merge_dict(basic, { "optimize": { "getw": True, "pooling": True, "parallel": True, "nmem_conflict": OPT_NMEM_CONFLICT_VAL, }}) MODEL_CFG = merge_dict(basic, { "optimize": { "dma": True, "getw": True, "pooling": True, "parallel": True, "nmem_conflict": OPT_NMEM_CONFLICT_VAL, } }) MULTI_DBG_CFG = merge_dict(MULTI_CFG, { "debug_mode": True, }) MODEL_DBG_CFG = merge_dict(MODEL_CFG, { "debug_mode": True, }) MODEL_DBG_EDA_CFG = merge_dict(MODEL_CFG, {"optimize": { "getw": False, }}) MODEL_OPT_CFG = merge_dict(MODEL_CFG, {"optimize": { "cmd_size": True }}) MODEL_REL_CFG = merge_dict(MODEL_OPT_CFG, { }) MODEL_ENC_CFG = merge_dict(MODEL_CFG, { "weight_compress": True, "encryption_flag": True, "encryption_key": "0x1234", "encryption_file": "/home/rick/working/kdp720_compiler/resource/keys.bin", }) if version == "single": return SINGLE_CFG elif version == "multi": return MULTI_CFG elif version == "model": return MODEL_CFG elif version == "model_opt" or version == "model_cpu": return MODEL_OPT_CFG elif version == "model_rel": return MODEL_REL_CFG elif version == "multi_dbg": return MULTI_DBG_CFG elif version == "model_dbg": return MODEL_DBG_CFG elif version == "model_dbg_eda": return MODEL_DBG_EDA_CFG elif version == "model_enc": return MODEL_ENC_CFG else: raise ValueError(f"invalid version [{version}]") def get_540_version_cfg(basic, version): basic = update_run_val(basic) SINGLE_CFG = merge_dict(basic, { "single_layer_test": True, "enable_ncore": False }) MULTI_CFG = merge_dict( basic, {"optimize": { "getw": True, "pooling": True, "parallel": True, "nmem_conflict": OPT_NMEM_CONFLICT_VAL, }}) MODEL_CFG = merge_dict( basic, { "optimize": { "dma": True, "getw": True, "pooling": True, "parallel": True, "nmem_conflict": OPT_NMEM_CONFLICT_VAL, } }) MULTI_DBG_CFG = merge_dict( MULTI_CFG, { "debug_mode": True, }) MODEL_DBG_CFG = merge_dict( MODEL_CFG, { "debug_mode": True, }) MODEL_DBG_EDA_CFG = merge_dict(MODEL_CFG, {"optimize": { "getw": False, }}) MODEL_OPT_CFG = merge_dict(MODEL_CFG, {"optimize": { "cmd_size": True }}) MODEL_REL_CFG = merge_dict(MODEL_OPT_CFG, { "dram_base_addr": "0x04000000", }) MODEL_ENC_CFG = merge_dict( MODEL_CFG, { "weight_compress": True, "encryption_flag": True, "encryption_key": "0x1234", "encryption_file": "/home/rick/working/kdp720_compiler/resource/keys.bin", }) MODEL_CPU_CFG = merge_dict(MODEL_OPT_CFG, { "dram_base_addr": "0x20000000" }) if version == "single": return SINGLE_CFG elif version == "multi": return MULTI_CFG elif version == "model": return MODEL_CFG elif version == "model_opt": return MODEL_OPT_CFG elif version == "model_rel": return MODEL_REL_CFG elif version == "multi_dbg": return MULTI_DBG_CFG elif version == "model_dbg": return MODEL_DBG_CFG elif version == "model_dbg_eda": return MODEL_DBG_EDA_CFG elif version == "model_enc": return MODEL_ENC_CFG elif version == "model_cpu": return MODEL_CPU_CFG else: raise ValueError(f"invalid version [{version}]") def update_run_val(config): run_val = rval.get_run_val() return merge_dict( config, { "run_val": run_val, }) def get_add_cfg(add_json): cfg = {} if add_json: cfg = json.loads(add_json) return cfg def get_hack_cfg(hack_cfg): cfg = {} if hack_cfg: cfg["hack_cfg"] = os.path.abspath(hack_cfg) hcfg = load_json_file(hack_cfg) if "input_fmt" in hcfg: cfg["input_fmt"] = hcfg["input_fmt"] if "output_fmt" in hcfg: cfg["output_fmt"] = hcfg["output_fmt"] if "pool_opt" in hcfg: if "optimize" not in cfg: cfg["optimize"] = {} cfg["optimize"]["pooling"] = hcfg["pool_opt"] == 1 return cfg def get_radix_cfg(radix_cfg): cfg = {} if radix_cfg is None: cfg["dynamic_fp"] = False else: if not os.path.isfile(radix_cfg): raise FileNotFoundError(radix_cfg) cfg = { "dynamic_fp": True, "dynamic_json_path": os.path.abspath(radix_cfg) } return cfg def delete_elements(data, elements_to_delete=[]): for element in elements_to_delete: if isinstance(element, list): delete_elements(data, element) continue elif isinstance(element, dict): for key, val in element.items(): delete_elements(data[key], [val]) continue if element not in data: continue if isinstance(data, list): data.remove(element) elif isinstance(data, dict): del data[element] def merge_dict(base_dictionary, additional_dictionary, keys_to_delete=[]): if additional_dictionary is None: return base_dictionary result = copy.deepcopy(base_dictionary) delete_elements(result, keys_to_delete) # merges additional_dictionary into result for key in additional_dictionary: if key not in result: result[key] = additional_dictionary[key] continue if isinstance(result[key], dict) and isinstance(additional_dictionary[key], dict): result[key] = merge_dict(result[key], additional_dictionary[key]) continue result[key] = additional_dictionary[key] return result def env_set_cfg(cfg, ctype): if os.getenv("RUN_IP_EVAL") == "1": add_ip_eval_cfg(cfg, ctype) if os.getenv("WT_COMPRESS") == "1": cfg["weight_compress"] = True return cfg def add_ip_eval_cfg(cfg, ctype): key = "ip_evaluator_cfg" if key not in cfg: cfg[key] = get_ip_eval_cfg(ctype) return True return False def remove_ip_eval_cfg(cfg): key = "ip_evaluator_cfg" if key in cfg: del cfg[key] def get_ip_eval_cfg(ctype): fpath = f"ip_eval/ip_eval_{ctype}.json" return get_resource_fpath(fpath) def get_resource_fpath(rel_path): # /home/kai/compiler/test/gen_rtl_test/utils/gen_config.py script_path = os.path.realpath(__file__) proj_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(script_path)))) return os.path.join(proj_dir, "resource", rel_path) def hack_dram_addr(cfg): if DRAM_BASE_SHIFT <= 0: return cfg print(f"Hack dram addr config by adding \"0x{DRAM_BASE_SHIFT:x}\":", file=sys.stderr) for key, val in cfg.items(): if key[-5:] == "_addr": addr = DRAM_BASE_SHIFT + int(val, 16) addr = str(hex(addr)) print(f" {key}: {val} => {addr}", file=sys.stderr) print("", file=sys.stderr) return cfg def save_config(path, cfg): with open(path, "w") as fp: json.dump(cfg, fp, indent=4, sort_keys=True) fp.write('\n') # use opt_compile.py to find best compiler config def gen_best_config(ctype, opt_model, cfg, cfg_path): def get_script_path(): opt_compile_dir = os.getenv("OPT_COMPILE_DIR") if opt_compile_dir and os.path.exists(opt_compile_dir): opt_compile = os.path.join(opt_compile_dir, 'opt_compile.py') else: # get opt_compile.py in default path. ex: compiler/test/gen_rtl_test/utils/gen_config.py script_path = os.path.realpath(__file__) proj_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(script_path)))) opt_compile = os.path.join(proj_dir, 'bin/opt_compile/opt_compile.py') assert os.path.exists( opt_compile ), f"environment variable OPT_COMPILE_DIR [{opt_compile_dir}] doesn't exist ! please export OPT_COMPILE_DIR=XXXX" return opt_compile if not opt_model: return 0 print('=> [opt_compile]') # env [COMPILER_BIN_DIR] is used and checked in opt_compile.py assert os.path.exists(opt_model), f"opt_model [{opt_model}] doesn't exist" if ctype not in ['720', '530', '730', '630', '540']: print('[Not support] only support for 720|530|730|630|540 !') return -1 # create tmp_cfg, and add ip_evaluator_cfg key out_dir = os.path.dirname(os.path.realpath(cfg_path)) tmp_cfg = copy.deepcopy(cfg) is_add_ip_eval = add_ip_eval_cfg(tmp_cfg, ctype) tmp_cfg_path = os.path.join(out_dir, 'tmp_cfg.json') save_config(tmp_cfg_path, tmp_cfg) # run opt_compile.py script = get_script_path() tmp_out_dir = os.path.join(out_dir, 'opt_compile') command = f"{script} {ctype} {opt_model} {tmp_cfg_path} {tmp_out_dir}" print(command) cmd_args = command.split() o = subprocess.run(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # export opt.log, compile.log and image_cut_search/Summary.txt opt_log = f"{tmp_out_dir}/opt.log" if os.path.exists(opt_log): shutil.copy(opt_log, out_dir, follow_symlinks=True) img_cut_search_summary = f"{tmp_out_dir}/image_cut_search/Summary.txt" if os.path.exists(img_cut_search_summary): shutil.copy(img_cut_search_summary, out_dir+"/summary_image_cut_search.txt", follow_symlinks=True) compiler_log = f"{tmp_out_dir}/image_cut_search/compile.log" if os.path.exists(compiler_log): shutil.copy(compiler_log, out_dir+"/opt_compile.log", follow_symlinks=True) if o.returncode != 0: print('[Failed] failed to run opt_compile !') shutil.rmtree(tmp_out_dir) sys.exit(o.returncode) # overrite cfg_path with best_cfg best_cfg_path = os.path.join(tmp_out_dir, 'best_config.json') assert os.path.exists(best_cfg_path), f"best_config.json doesn't exist" best_cfg = load_json_file(best_cfg_path) # module dependency: "image_cut_search" sub-module # copy image_cut_search config file to the directory where compiler config lives if 'fmap_cut' in best_cfg and best_cfg['fmap_cut']['mode'] == 'manual': org_fmap_cut_cfg = best_cfg['fmap_cut']['cfg'] new_fmap_cut_cfg = os.path.join(out_dir, 'best_image_cut_config.json') shutil.copyfile(org_fmap_cut_cfg, new_fmap_cut_cfg) best_cfg['fmap_cut']['cfg'] = new_fmap_cut_cfg if is_add_ip_eval: remove_ip_eval_cfg(best_cfg) save_config(cfg_path, best_cfg) shutil.rmtree(tmp_out_dir) return 0 def gen_all(): versions = { CType.MOZART : Version520, CType.BEETHOVEN : Version720, CType.BACH : VersionX30, CType.SCHUBERT : VersionX30, CType.WAGNER : VersionX30, CType.DVORAK : Version540, } for ctype in versions: output_directory = f"./{ctype.value}" if not os.path.exists(output_directory): os.makedirs(output_directory, exist_ok=True) for version in versions[ctype]: config = get_version_cfg(ctype.value, version.value) file_name = f"config_{ctype.value}_{version.value}.json" config_path = f"{output_directory}/{file_name}" save_config(config_path, config) ################ ## MAIN ## ################ if __name__ == "__main__": parser = argparse.ArgumentParser(description="generate config.json for compiler") ctypes = "|".join([ctype.value for ctype in CType]) parser.add_argument("-t", "--ctype", help=f"compiler type [{ctypes}]") parser.add_argument( "-v", "--version", help="config version: [single|multi|model|model_rel|model_opt|model_dbg|model_dbg_eda]") parser.add_argument("-f", "--tmpl_cfg", help="path to template config (if exists, version is skipped)") parser.add_argument("-o", "--output", default="config.json", help="path to output config") parser.add_argument("-r", "--radix_cfg", help="path to radix config") parser.add_argument("-k", "--hack_cfg", help="path to hack config") parser.add_argument("-a", "--add_json", action='append', help="additional config in json str format, e.g. '{\"single_layer_test\":true,\"hstride\":true}'") parser.add_argument( "-m", "--opt_model", help= "model path. run opt_compile to find best compiler config. please export COMPILER_BIN_DIR=XXXX to specify compiler bin dir, and export OPT_COMPILE_DIR=XXXX to specify opt compile bin dir." ) parser.add_argument("-g", "--gen-all-version", action="store_true", help="Generate basic configs for all version with all ctype to verify or to debug.") # dbg_print_args() args = parser.parse_args() if args.gen_all_version: gen_all() print("Generation done.") sys.exit() ctype = args.ctype tmpl_cfg = args.tmpl_cfg if tmpl_cfg: cfg = load_json_file(tmpl_cfg) else: cfg = get_version_cfg(ctype, args.version) radix_cfg = get_radix_cfg(args.radix_cfg) cfg = merge_dict(cfg, radix_cfg) hack_cfg = get_hack_cfg(args.hack_cfg) cfg = merge_dict(cfg, hack_cfg) if args.add_json: for js in args.add_json: add_cfg = get_add_cfg(js) cfg = merge_dict(cfg, add_cfg) cfg = env_set_cfg(cfg, ctype) cfg = hack_dram_addr(cfg) # save to file cfg_path = args.output save_config(cfg_path, cfg) # find best compiler config and replace the old one if 0 != gen_best_config(ctype, args.opt_model, cfg, cfg_path): sys.exit(-1)