828 lines
36 KiB
Python
828 lines
36 KiB
Python
"""
|
|
Runs an image through an entire testing flow.
|
|
"""
|
|
import argparse
|
|
import importlib
|
|
import json
|
|
import math
|
|
import multiprocessing as mp
|
|
import pathlib
|
|
import random
|
|
import re
|
|
import sys
|
|
import traceback
|
|
import os
|
|
from typing import Callable, List, Mapping, Optional, Tuple, Union
|
|
|
|
import tqdm
|
|
|
|
from python_flow.common import config
|
|
from python_flow.common import exceptions
|
|
from python_flow.common import hidden_print as hp
|
|
from python_flow.internal import internal
|
|
from python_flow.utils import utils
|
|
|
|
from python_flow.dongle import dongle_config
|
|
|
|
DEFAULT_OPT = "app/template_app/example_options.json"
|
|
CLEAR_OPT = ["all_x_result", "before", "none"]
|
|
FORMAT_OPT = ["ALL", "INF", "NIR", "RGB"]
|
|
INPUT_OPT = ["binary", "bin_txt", "image", "image_txt"]
|
|
# 0 - channel last, 1 - onnx shape
|
|
INPUT_SHAPE_OPT = [0, 1]
|
|
PLATFORM_OPT = ["alg", "sys520", "sys530", "sys540", "sys630", "sys720", "sys730"]
|
|
|
|
BIN_EXT = ["*.bin"]
|
|
IMAGE_EXT = ["*.bmp", "*.BMP", "*.jpeg", "*.JPEG",
|
|
"*.jpg", "*.JPG", "*.png", "*.PNG",
|
|
"*.ppm", "*.PPM", "*.wav", "*.WAV"]
|
|
|
|
LOG_FOLDER = ".hidden/"
|
|
|
|
def sorted_nicely(my_list: List[Union[List[str], Mapping]],
|
|
dict_key: str = "") -> List[Union[List[str], Mapping]]:
|
|
"""Sort the input image list in the way humans expect.
|
|
|
|
Assume my_list is a list of list of strings if dict_key not specified. Otherwise,
|
|
assume my_list is a list of dictionaries.
|
|
|
|
https://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python
|
|
|
|
Arguments:
|
|
my_list: List of list of strings or list of dictionaries.
|
|
dict_key: String key of dictionary for which values will be sorted on.
|
|
|
|
Returns:
|
|
A list with the same data but sorted in an alphanumeric way.
|
|
"""
|
|
convert = lambda text: int(text) if text.isdigit() else text
|
|
if dict_key == "":
|
|
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key[0])]
|
|
else:
|
|
if isinstance(my_list[0][dict_key], list):
|
|
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key[dict_key][0])]
|
|
else:
|
|
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key[dict_key])]
|
|
return sorted(my_list, key=alphanum_key)
|
|
|
|
def setup_user_flow(app: pathlib.Path, options: Mapping,
|
|
input_image_list: bool) -> Tuple[Callable, Mapping]:
|
|
"""Get the test to run and config from the user's flow.
|
|
|
|
Arguments:
|
|
app: Pathlib Path to the folder of the application to run.
|
|
options: Mapping of strings to user parsed options.
|
|
input_image_list: Flag indicating if an input image JSON is provided.
|
|
|
|
Returns:
|
|
A tuple of size 2 with the first value being the user's application function to test
|
|
and the second value being a mapping where the key is the same as the keys defined in
|
|
the "user_config" dictionary in user's flow.py and the value is a TestConfig object
|
|
that holds the user's input config and options.
|
|
"""
|
|
try:
|
|
# additional flows should be in format flow_xxx
|
|
flow_module = "_".join(["flow", str(options["flow"])])
|
|
if options["flow"] is None: # default will be 'flow.py'
|
|
flow_module = "flow"
|
|
flow = importlib.import_module(flow_module)
|
|
my_test = getattr(flow, options["test"])
|
|
my_config = getattr(flow, "user_config")
|
|
except AttributeError as error:
|
|
sys.exit(f"{error}\nPlease check your input test function or flow.py from {app}...")
|
|
except ModuleNotFoundError as error:
|
|
sys.exit(f"{error}\nError trying to import flow.py from {app}...")
|
|
|
|
test_config = setup_config(my_config, app, options, input_image_list)
|
|
|
|
# only load runner if team using it
|
|
any_config = list(test_config.items())[0][1].config
|
|
if "pre_mode" in any_config["pre"] and any_config["pre"]["pre_mode"] != "":
|
|
internal.setup_team_runner(test_config, app)
|
|
|
|
if options["debug"]:
|
|
test_config["debug"] = options["debug"]
|
|
|
|
# load bin info once if specified
|
|
if ((options["inputs"] == "binary" or options["inputs"] == "bin_txt") and
|
|
options["bin_input"] is not None):
|
|
try:
|
|
with open(options["bin_input"]) as bin_info:
|
|
data = json.load(bin_info)
|
|
test_config["bin_input"] = data
|
|
except FileNotFoundError:
|
|
sys.exit(f"{options['bin_input']} does not exist... Please check your -b option path.")
|
|
except json.JSONDecodeError:
|
|
sys.exit(f"Error parsing {options['bin_input']}... Please check your -b option path.")
|
|
|
|
return my_test, test_config
|
|
|
|
def setup_config(user_config: Mapping, app: pathlib.Path, options: Mapping,
|
|
input_image_list: bool) -> Mapping:
|
|
"""Initialize a TestConfig class for each given JSON path.
|
|
|
|
Arguments:
|
|
user_config: Mapping taken from "user_config" dictionary in the user's flow.py.
|
|
app: Pathlib Path to the folder of the application to run.
|
|
options: Mapping of strings to user parsed options.
|
|
input_image_list: Flag indicating if an input image JSON is provided.
|
|
|
|
Returns:
|
|
A mapping where the key is the same as the keys defined in the "user_config" dictionary
|
|
in user's flow.py and the value is a TestConfig object that holds the user's input
|
|
config and options.
|
|
"""
|
|
test_config = {}
|
|
for key, data_path in user_config.items():
|
|
if options["platform"] is not None: # runner subfolder support
|
|
data_path = data_path.replace("{platform}", options["platform"])
|
|
json_file = app / data_path
|
|
try:
|
|
with open(json_file) as data_file:
|
|
data = json.load(data_file)
|
|
test_config[key] = config.TestConfig(data, options, json_file, input_image_list)
|
|
except exceptions.ConfigError as error:
|
|
sys.exit(error)
|
|
except FileNotFoundError:
|
|
sys.exit(f"{json_file} does not exist... Check your user_config in flow.py.")
|
|
except json.JSONDecodeError:
|
|
traceback.print_exc()
|
|
sys.exit(f"Error parsing {json_file}")
|
|
if options["clear"] != "none":
|
|
test_config[key].clear_results(True)
|
|
|
|
# outside for only one print
|
|
if options["toolchain"] != "current":
|
|
print(f"--------------------------------------------------\n"
|
|
f"Running {options['toolchain']} libraries for this test")
|
|
|
|
return test_config
|
|
|
|
def setup_parser(test_args: Optional[List]) -> argparse.Namespace:
|
|
"""Setup the command line parser.
|
|
|
|
If test_args are not provided, the parser will grab arguments from the command line.
|
|
|
|
Arguments:
|
|
test_args: List of values to be used as parser arguments.
|
|
|
|
Returns:
|
|
A argparse.Namespace object that has all of the provided arguments as attributes.
|
|
"""
|
|
parser = argparse.ArgumentParser(description="Runs through a solution provided a dataset.",
|
|
formatter_class=argparse.RawTextHelpFormatter)
|
|
parser.add_argument("app_folder", help="directory of your application")
|
|
parser.add_argument("image_directory", help="directory of images to test")
|
|
parser.add_argument("test", help="type of test to be run, should be one of the "
|
|
"functions in your app's flow.py")
|
|
|
|
parser.add_argument("-j", "--json_options", help="JSON file holding the command line options"
|
|
"\nNOTE: any options provided will overwrite the options in the JSON file")
|
|
|
|
parser.add_argument("-b", "--bin_input", help="JSON file to specify the color format and "
|
|
"dimensions of the binary inputs")
|
|
parser.add_argument("-c", "--clear", default="before", choices=CLEAR_OPT,
|
|
help="when to clear CSIM or Dynasty dumps, default: 'before'")
|
|
parser.add_argument("--cuda", action="store_true", help="use CUDA compiled Dynasty library, "
|
|
"only works if you have CUDA installed on your device")
|
|
parser.add_argument("--debug", action="store_true", help="enable debug flag for the runners")
|
|
parser.add_argument("-d", "--debug_print", action="store_true", help="flag to enable prints")
|
|
parser.add_argument("--dump", action="store_true",
|
|
help="flag to dump intermediate node outputs for the simulator")
|
|
parser.add_argument("-f", "--fmt", default="ALL", choices=FORMAT_OPT,
|
|
help="format of the input images to test, default: 'ALL'")
|
|
parser.add_argument("-fl", "--flow", help="suffix of flow.py to run")
|
|
parser.add_argument("--fusion", action="store_true", help="flag to enable fusion test input")
|
|
parser.add_argument("-il", "--image_json", help="JSON file holding all of the test "
|
|
"images\nNOTE: using this option will ignore the provided image directory")
|
|
parser.add_argument("-i", "--inputs", default="image", choices=INPUT_OPT,
|
|
help="type of inputs to be tested, default: 'image'")
|
|
parser.add_argument("--input_shape", default=0, type=int, choices=INPUT_SHAPE_OPT,
|
|
help="shape of input data from preprocess, default: 0"
|
|
"\n(0) channel last\n(1) ONNX shape\n")
|
|
parser.add_argument("-is", "--input_single", nargs="*", help="list of files to test one "
|
|
"singular input, mostly applicable when using run_solution wrapper")
|
|
parser.add_argument("-n", "--num_images", default=0, type=int, help="number of images to test")
|
|
parser.add_argument("--ort", action="store_true", help="use onnxruntime Dynasty inferencer")
|
|
parser.add_argument("-p", "--platform", choices=PLATFORM_OPT,
|
|
help="platform to use for input JSON subfolder, only internal runners are "
|
|
"affected, default will use no subfolder")
|
|
parser.add_argument("-pv", "--platform_version", default=0, type=int, help="which version of "
|
|
"a specific platform to use")
|
|
parser.add_argument("-r", "--resume", action="store_true",
|
|
help="flag to resume dataset from where it left off previously")
|
|
# used in runner.py to dump image
|
|
parser.add_argument("--runner_dump", nargs="*", help="name of runners to dump result")
|
|
parser.add_argument("--rgba", action="store_true",
|
|
help="flag to dump preprocessed RGBA binary")
|
|
parser.add_argument("-s", "--sort", action="store_true",
|
|
help="sort the images in alphanumerical order")
|
|
parser.add_argument("-tc", "--toolchain", default="current",
|
|
help="version of CSIM and Dynasty libraries to run, "
|
|
"NOTE: unused after 0.21.0 API change")
|
|
parser.add_argument("-v", "--video", nargs=3, default=["none", 60, 60],
|
|
help="parameters to help drop images for video\n(1) 'excess', 'randomly', "
|
|
"'periodically'\n(2) app fps\n(3) video fps")
|
|
parser.add_argument("-w", "--workers", default=1, type=int,
|
|
help="number of worker processes to run, default: 1")
|
|
|
|
# the following are used in run inference using the dongle
|
|
parser.add_argument("-g", "--group", default=None, type=str,
|
|
help="group using this E2E platform (CI/SYS/ALGO)")
|
|
parser.add_argument("-in", "--num_inference", default=1, type=int,
|
|
help="number of devices (520/530/720) to run inference, default: 1")
|
|
parser.add_argument("-dfix", "--dongle_fixed_mode", action="store_true",
|
|
help="flag to enable dongle to return fixed result then convert to float "
|
|
"using CSIM utility function")
|
|
parser.add_argument("-ddebug", "--dongle_debug_mode", action="store_true", help="flag to "
|
|
"enable dongle debug mode to dump intermediate inference results")
|
|
|
|
return parser.parse_args(test_args)
|
|
|
|
def parse_json_options(json_file: str, parsed_options: Mapping) -> Mapping:
|
|
"""Parse command line options from the input JSON file.
|
|
|
|
Values in parsed_options will overwrite the options set in json_file.
|
|
|
|
Arguments:
|
|
json_file: String path to the JSON with the command line configurations.
|
|
parsed_options: Mapping of the already parsed options provided by the user.
|
|
|
|
Returns:
|
|
A mapping of the options provided in the json_file, with the ones specified
|
|
in parsed_options being overwritten.
|
|
"""
|
|
json_file = pathlib.Path(json_file).resolve()
|
|
if not json_file.exists():
|
|
sys.exit(f"JSON options {json_file} does not exist... Please check the -j option.")
|
|
|
|
with open(DEFAULT_OPT) as default_json:
|
|
json_data = json.load(default_json)
|
|
|
|
# check for options set on command line to override the ones in config
|
|
overwritten_keys = set()
|
|
keys_with_defaults = {
|
|
"-c": "clear", "--clear": "clear",
|
|
"-f": "fmt", "--fmt": "fmt",
|
|
"-i": "inputs", "--inputs": "inputs",
|
|
"--input_shape": "input_shape",
|
|
"-n": "num_images", "--num_images": "num_images",
|
|
"-p": "platform", "--platform": "platform",
|
|
"-pv": "platform_version", "--platform_version": "platform_version",
|
|
"-v": "video", "--video": "video",
|
|
"-w": "workers", "--workers": "workers",
|
|
"-g": "group", "--group": "group",
|
|
"-in": "num_inference", "--num_inference": "num_inference"
|
|
}
|
|
for key in keys_with_defaults:
|
|
if key in sys.argv[1:]:
|
|
overwritten_keys.add(keys_with_defaults[key])
|
|
for key in json_data:
|
|
if json_data[key] != parsed_options[key]:
|
|
overwritten_keys.add(key)
|
|
|
|
with open(json_file) as json_config:
|
|
custom_data = json.load(json_config)
|
|
|
|
json_data.update(custom_data)
|
|
# check for some with limited options if they are valid
|
|
if json_data["clear"] not in CLEAR_OPT:
|
|
sys.exit(f"'clear' parameter should be one of {CLEAR_OPT}. Please check {json_file}")
|
|
|
|
if json_data["fmt"] not in FORMAT_OPT:
|
|
sys.exit(f"'fmt' parameter should be one of {FORMAT_OPT}. Please check {json_file}")
|
|
|
|
if json_data["inputs"] not in INPUT_OPT:
|
|
sys.exit(f"'inputs' parameter should be one of {INPUT_OPT}. Please check {json_file}")
|
|
|
|
if json_data["input_shape"] not in INPUT_SHAPE_OPT:
|
|
sys.exit(f"'input_shape' parameter should be one of {INPUT_SHAPE_OPT}. "
|
|
f"Please check {json_file}")
|
|
|
|
if json_data["platform"] is not None and json_data["platform"] not in PLATFORM_OPT:
|
|
sys.exit(f"'platform' parameter should be one of {PLATFORM_OPT}. Please check {json_file}")
|
|
|
|
# overwrite if option provided
|
|
for key in overwritten_keys:
|
|
json_data[key] = parsed_options[key]
|
|
|
|
return json_data
|
|
|
|
def drop_video(input_images: List[Union[List[str], Mapping]], drop_method: str, app_fps: int,
|
|
video_fps: int, dict_key: str = "") -> List[Union[List[str], Mapping]]:
|
|
"""Drops images if video inference is set to mimic hardware limitations.
|
|
|
|
Assumes images are already sorted prior to this function.
|
|
|
|
Arguments:
|
|
input_images: List of list of strings or list of dictionaries.
|
|
drop_method: String method with which to drop frames. One of "excess", "randomly",
|
|
or "periodically".
|
|
app_fps: Integer frames per second of application to mimic.
|
|
video_fps: Integer frames per second of the video input.
|
|
dict_key: String key of dictionary for which values will be sorted on.
|
|
|
|
Returns:
|
|
A list with the same data but with some dropped frames and sorted in an alphanumeric way.
|
|
"""
|
|
# load everything in a video
|
|
if app_fps >= video_fps:
|
|
return input_images
|
|
|
|
# every video FPS
|
|
selected_images = []
|
|
for i in range(0, len(input_images), video_fps):
|
|
video_batch = input_images[i:i + video_fps]
|
|
if drop_method == "excess" or len(video_batch) < app_fps:
|
|
app_batch = video_batch[:app_fps]
|
|
elif drop_method == "randomly":
|
|
app_batch = random.sample(video_batch, app_fps)
|
|
elif drop_method == "periodically":
|
|
app_batch = video_batch[::math.ceil(video_fps / app_fps)]
|
|
left_batch = [image for image in video_batch if image not in app_batch]
|
|
app_batch += random.sample(left_batch, (app_fps - len(app_batch)))
|
|
selected_images.extend(app_batch)
|
|
|
|
return sorted_nicely(selected_images, dict_key)
|
|
|
|
def setup_images(args_dict: Mapping, directory: pathlib.Path, log: str) -> List[List[str]]:
|
|
"""Gets the list of input images to test.
|
|
|
|
Arguments:
|
|
args_dict: Mapping of strings to user parsed options.
|
|
directory: Pathlib Path to the folder to look for input images.
|
|
log: String path to the log of completed images.
|
|
|
|
Returns:
|
|
A list of list of string paths to the input images.
|
|
"""
|
|
prefix = args_dict["fmt"] if args_dict["fmt"] != "ALL" else ""
|
|
dir_to_skip = "@eaDir"
|
|
|
|
# get prefix to set fusion order
|
|
if args_dict["fusion"]:
|
|
fusion_order = ["RGB", "NIR"]
|
|
else:
|
|
fusion_order = [prefix]
|
|
file_type = None
|
|
if args_dict["bin_input"] is not None:
|
|
try:
|
|
with open(args_dict["bin_input"]) as bin_info:
|
|
data = json.load(bin_info)
|
|
except FileNotFoundError:
|
|
sys.exit(f"{args_dict['bin_input']} does not exist... Please check your -b option path.")
|
|
except json.JSONDecodeError:
|
|
sys.exit(f"Error parsing {args_dict['bin_input']}... Please check your -b option path.")
|
|
if "sequence" in data:
|
|
fusion_order = data["sequence"]
|
|
if "file_type" in data:
|
|
file_type = data["file_type"]
|
|
first = fusion_order[0]
|
|
|
|
inputs = []
|
|
if args_dict["fusion"]:
|
|
second = fusion_order[1]
|
|
# if file extensions are specified in bin input use them
|
|
if file_type is not None:
|
|
images = [files for files in directory.rglob(first + "*." + file_type[0])
|
|
if dir_to_skip not in str(files)]
|
|
for image in images:
|
|
pair = pathlib.Path(str(image).replace(first, second)).with_suffix("." + file_type[1])
|
|
if pair.exists():
|
|
inputs.append([str(image.resolve()), str(pair.resolve())])
|
|
else:
|
|
suffixes = [*BIN_EXT, *IMAGE_EXT]
|
|
images = []
|
|
for suffix in suffixes:
|
|
images.extend([files for files in directory.rglob(first + suffix)
|
|
if dir_to_skip not in str(files)])
|
|
|
|
for image in images:
|
|
# only check same extension pair if specified
|
|
|
|
# attempt pair bin extension
|
|
if image.suffix != ".bin" or args_dict["inputs"] == "binary":
|
|
pair = pathlib.Path(str(image).replace(first, second)).with_suffix(".bin")
|
|
if pair.exists():
|
|
inputs.append([str(image.resolve()), str(pair.resolve())])
|
|
continue
|
|
|
|
# attempt pair image extension
|
|
if image.suffix == ".bin" or args_dict["inputs"] == "image":
|
|
for suffix in IMAGE_EXT:
|
|
ext = suffix[1:] # remove wildcard from string
|
|
pair = pathlib.Path(str(image).replace(first, second)).with_suffix(ext)
|
|
if pair.exists():
|
|
inputs.append([str(image.resolve()), str(pair.resolve())])
|
|
continue
|
|
elif args_dict["inputs"] == "binary":
|
|
inputs = directory.rglob(first + "*.bin")
|
|
inputs = [[str(files.resolve())] for files in inputs if dir_to_skip not in str(files)]
|
|
elif args_dict["inputs"] == "image":
|
|
for image_type in IMAGE_EXT:
|
|
images = [[str(files.resolve())] for files in directory.rglob(first + image_type)
|
|
if dir_to_skip not in str(files)]
|
|
inputs.extend(images)
|
|
elif args_dict["inputs"] in ["image_txt", "bin_txt"]:
|
|
# image + text file pair
|
|
extension = ".png" if args_dict["inputs"] == "image_txt" else ".bin"
|
|
txts = [files for files in directory.rglob("NIR*.txt") if dir_to_skip not in str(files)]
|
|
for txt in txts: # image will be RGB, txt will be NIR
|
|
txt_name = str(txt).replace(".txt", extension).replace("NIR", "RGB")
|
|
img = pathlib.Path(txt_name)
|
|
if img.exists():
|
|
inputs.append([str(img.resolve()), str(txt.resolve())])
|
|
|
|
if args_dict["sort"]:
|
|
inputs = sorted_nicely(inputs)
|
|
if args_dict["resume"]: # remove previously completed images
|
|
try:
|
|
completed = set(line.strip() for line in open(log))
|
|
except FileNotFoundError:
|
|
completed = set()
|
|
inputs = [image for image in inputs if image[0] not in completed]
|
|
if args_dict["num_images"] > 0:
|
|
inputs = inputs[:args_dict["num_images"]]
|
|
|
|
if args_dict["video"][0] != "none": # drop video images
|
|
mode, app_fps, video_fps = args_dict["video"]
|
|
inputs = drop_video(inputs, mode, app_fps, video_fps)
|
|
|
|
return inputs
|
|
|
|
def setup_image_list(args_dict: Mapping, image_list: pathlib.Path,
|
|
log: str) -> Tuple[List[List[str]], Mapping]:
|
|
"""Gets the specified list of images to test and corresponding image data.
|
|
|
|
Arguments:
|
|
args_dict: Mapping of strings to user parsed options.
|
|
image_list: Pathlib Path to the JSON file with all of the input images.
|
|
log: String path to the log of completed images.
|
|
|
|
Returns:
|
|
A tuple of size 2 where the first value is a list of list of strings to the input
|
|
images and the second value is a mapping of strings to values used for binary
|
|
input images.
|
|
"""
|
|
with open(image_list) as image_json:
|
|
data = json.load(image_json)
|
|
|
|
if args_dict["sort"]:
|
|
data = sorted_nicely(data, dict_key="img_path")
|
|
if args_dict["resume"]: # remove previously completed images
|
|
try:
|
|
completed = set(line.strip() for line in open(log))
|
|
except FileNotFoundError:
|
|
completed = set()
|
|
if isinstance(data[0]["img_path"], list):
|
|
data = [image_info for image_info in data if image_info["img_path"][0] not in completed]
|
|
else:
|
|
data = [image_info for image_info in data if image_info["img_path"] not in completed]
|
|
if args_dict["num_images"] > 0:
|
|
data = data[:args_dict["num_images"]]
|
|
|
|
if args_dict["video"][0] != "none": # drop video images
|
|
mode, app_fps, video_fps = args_dict["video"]
|
|
data = drop_video(data, mode, app_fps, video_fps, dict_key="img_path")
|
|
|
|
img_data = {}
|
|
inputs = []
|
|
# newer version where fusion paths are paired
|
|
if isinstance(data[0]["img_path"], list):
|
|
if args_dict["inputs"] == "binary" or args_dict["inputs"] == "bin_txt":
|
|
for img_info in data:
|
|
bin_file = img_info["bin_path"]
|
|
inputs.append(bin_file)
|
|
img_data[bin_file[0]] = { # bin_file now list, so get first image
|
|
"format": img_info["channel_format"],
|
|
"size": img_info["img_size"]
|
|
}
|
|
elif args_dict["inputs"] == "image" or args_dict["inputs"] == "image_txt":
|
|
inputs = [img_info["img_path"] for img_info in data]
|
|
else: # older version where each image is separated
|
|
if args_dict["fusion"]: # generate RGB/NIR pairs
|
|
unpaired = {}
|
|
for img_info in data:
|
|
if args_dict["inputs"] == "binary":
|
|
img_path = img_info["bin_path"]
|
|
else:
|
|
img_path = img_info["img_path"]
|
|
|
|
if "NIR" in img_path:
|
|
pair_path = img_path.replace("NIR", "RGB")
|
|
paths = [pair_path, img_path]
|
|
else:
|
|
pair_path = img_path.replace("RGB", "NIR")
|
|
paths = [img_path, pair_path]
|
|
|
|
if pair_path not in unpaired:
|
|
if args_dict["inputs"] == "binary":
|
|
unpaired[img_path] = {
|
|
"format": img_info["channel_format"],
|
|
"size": img_info["img_size"]
|
|
}
|
|
else:
|
|
unpaired[img_path] = {}
|
|
else:
|
|
if args_dict["inputs"] == "binary":
|
|
image_format = [unpaired[pair_path]["format"], img_info["channel_format"]]
|
|
image_size = [unpaired[pair_path]["size"], img_info["img_size"]]
|
|
if "RGB" in img_path:
|
|
image_format[0], image_format[1] = image_format[1], image_format[0]
|
|
image_size[0], image_size[1] = image_size[1], image_size[0]
|
|
|
|
img_data[paths[0]] = {
|
|
"format": image_format,
|
|
"size": image_size
|
|
}
|
|
inputs.append(paths)
|
|
del unpaired[pair_path]
|
|
elif args_dict["inputs"] == "image_txt":
|
|
unpaired = set()
|
|
for img_info in data:
|
|
img_path = img_info["img_path"]
|
|
|
|
if "NIR" in img_path: # txt file, pair will be RGB png
|
|
pair_path = img_path.replace(".txt", ".png").replace("NIR", "RGB")
|
|
paths = [pair_path, img_path]
|
|
else: # png file, pair will be NIR txt
|
|
pair_path = img_path.replace(".png", ".txt").replace("RGB", "NIR")
|
|
paths = [img_path, pair_path]
|
|
|
|
if pair_path not in unpaired:
|
|
unpaired.add(img_path)
|
|
else:
|
|
inputs.append(paths)
|
|
unpaired.remove(pair_path)
|
|
elif args_dict["inputs"] == "bin_txt":
|
|
unpaired = {}
|
|
for img_info in data:
|
|
img_path = img_info["bin_path"]
|
|
|
|
if "NIR" in img_path: # txt file, pair will be RGB bin
|
|
pair_path = img_path.replace(".txt", ".bin").replace("NIR", "RGB")
|
|
paths = [pair_path, img_path]
|
|
else: # png file, pair will be NIR txt
|
|
pair_path = img_path.replace(".bin", ".txt").replace("RGB", "NIR")
|
|
paths = [img_path, pair_path]
|
|
|
|
if pair_path not in unpaired:
|
|
unpaired[img_path] = {
|
|
"format": img_info["channel_format"],
|
|
"size": img_info["img_size"]
|
|
}
|
|
else:
|
|
img_data[paths[0]] = unpaired[paths[0]]
|
|
inputs.append(paths)
|
|
del unpaired[paths[0]]
|
|
elif args_dict["inputs"] == "binary":
|
|
for img_info in data:
|
|
bin_file = img_info["bin_path"]
|
|
inputs.append([bin_file])
|
|
img_data[bin_file] = {
|
|
"format": [img_info["channel_format"]],
|
|
"size": [img_info["img_size"]]
|
|
}
|
|
elif args_dict["inputs"] == "image":
|
|
inputs = [[img_info["img_path"]] for img_info in data]
|
|
|
|
return inputs, img_data
|
|
|
|
def dump_process(queue: mp.Queue, log: str) -> None:
|
|
"""Dumps completed images to a file safely when multiple processes are used.
|
|
|
|
Arguments:
|
|
queue: Multiprocessing queue that holds log of completed images.
|
|
log: String path to the log of completed images.
|
|
"""
|
|
with open(log, "a") as out_file:
|
|
while True:
|
|
paths = queue.get()
|
|
if paths is None:
|
|
break
|
|
for path in paths:
|
|
out_file.write(str(path) + "\n")
|
|
|
|
def worker_process(args: Mapping):
|
|
"""Performs a full test on one input (one worker process).
|
|
|
|
Arguments:
|
|
args: Mapping of arguments to be passed to the user's test function.
|
|
|
|
Returns:
|
|
A mapping of keys to values equal to the return value of the user defined flow in
|
|
the application.
|
|
"""
|
|
dongle_config.lock.acquire()
|
|
if os.getpid() not in dongle_config.pId_internalId.keys():
|
|
dongle_config.pId_internalId[os.getpid()] = dongle_config.count.value
|
|
dongle_config.count.value += 1
|
|
dongle_config.lock.release()
|
|
|
|
user_config = args["user_config"].copy()
|
|
if args["img_data"]:
|
|
user_config["bin_input"] = args["img_data"][args["path"][0]]
|
|
|
|
if args["debug"]:
|
|
with hp.no_stdout():
|
|
result = args["user_test"](args["path"], user_config)
|
|
else:
|
|
with hp.HiddenPrints():
|
|
result = args["user_test"](args["path"], user_config)
|
|
|
|
any_config = list(user_config.items())[0][1]
|
|
any_config.clear_results(False)
|
|
|
|
# Dump result to each json file
|
|
if result is not None:
|
|
result_json = any_config.config["flow"]["out_folder"] / "result.json"
|
|
utils.dump_json(result, result_json)
|
|
|
|
# Save log of completed images to restore if needed
|
|
if args["result_queue"] is None:
|
|
with open(args["log"], "a") as out_file:
|
|
for path in args["path"]:
|
|
out_file.write(str(path) + "\n")
|
|
else:
|
|
args["result_queue"].put(args["path"])
|
|
|
|
return result
|
|
|
|
def run_parallel(paths: List[List[str]], user_test: Callable, user_config: config.TestConfig,
|
|
options: Mapping, img_data: Mapping, log: str) -> Mapping:
|
|
"""Runs a test in a specified directory in parallel.
|
|
|
|
Arguments:
|
|
paths: List of list of string paths to the input images.
|
|
user_test: Function in the user's flow.py to test.
|
|
user_config: TestConfig object that holds the user's input config and options.
|
|
options: Mapping of strings to user parsed options.
|
|
img_data: Mapping of string image path to data needed for binary images.
|
|
log: String path to the log of completed images.
|
|
|
|
Returns:
|
|
A mapping where the key is a string of one of the input images and the values are equal
|
|
to the return value of the user defined flow in the application for that test image.
|
|
"""
|
|
manager = mp.Manager()
|
|
result_queue = manager.Queue()
|
|
result_process = mp.Process(target=dump_process, args=(result_queue, log))
|
|
result_process.start()
|
|
|
|
args = []
|
|
for path in paths:
|
|
data = {
|
|
"user_test": user_test,
|
|
"user_config": user_config,
|
|
"debug": options["debug_print"],
|
|
"clear": options["clear"],
|
|
"path": path,
|
|
"log": log,
|
|
"result_queue": result_queue,
|
|
"img_data": img_data
|
|
}
|
|
args.append(data)
|
|
|
|
results = {}
|
|
with mp.Pool(options["workers"]) as pool:
|
|
progress = tqdm.tqdm(pool.imap(worker_process, args), total=len(args), file=sys.stdout)
|
|
for index, result in enumerate(progress):
|
|
cur_paths = args[index]["path"]
|
|
progress.set_description("+".join([image.rsplit("/", 1)[-1] for image in cur_paths]))
|
|
results[cur_paths[0]] = result
|
|
pass
|
|
|
|
# Process cleanup
|
|
result_queue.put(None)
|
|
result_process.join()
|
|
return results
|
|
|
|
def run_sequential(paths: List[List[str]], user_test: Callable, user_config: config.TestConfig,
|
|
options: Mapping, img_data: Mapping, log: str) -> Mapping:
|
|
"""Runs a test in a specified directory in sequence.
|
|
|
|
Arguments:
|
|
paths: List of list of string paths to the input images.
|
|
user_test: Function in the user's flow.py to test.
|
|
user_config: TestConfig object that holds the user's input config and options.
|
|
options: Mapping of strings to user parsed options.
|
|
img_data: Mapping of string image path to data needed for binary images.
|
|
log: String path to the log of completed images.
|
|
|
|
Returns:
|
|
A mapping where the key is a string of one of the input images and the values are equal
|
|
to the return value of the user defined flow in the application for that test image.
|
|
"""
|
|
data = {
|
|
"user_test": user_test,
|
|
"user_config": user_config,
|
|
"debug": options["debug_print"],
|
|
"clear": options["clear"],
|
|
"log": log,
|
|
"result_queue": None,
|
|
"img_data": img_data
|
|
}
|
|
progress = tqdm.tqdm(paths, file=sys.stdout)
|
|
results = {}
|
|
for path in progress:
|
|
progress.set_description("+".join([image.rsplit("/", 1)[-1] for image in path]))
|
|
data.update({"path": path})
|
|
results[path[0]] = worker_process(data) # use 1st image to specify results
|
|
return results
|
|
|
|
def main(test_args: Optional[List] = None) -> Mapping:
|
|
"""Main function to test every image in a given directory.
|
|
|
|
If test_args is None, the parser will grab arguments from the command line.
|
|
|
|
Arguments:
|
|
test_args: List of values to be used as parser arguments.
|
|
|
|
Returns:
|
|
A mapping where the key is a string of one of the input images and the values are equal
|
|
to the return value of the user defined flow in the application for that test image.
|
|
"""
|
|
args = setup_parser(test_args)
|
|
args_dict = vars(args)
|
|
|
|
# if they supplied option JSON, overwrite existing options with provided options
|
|
if args_dict["json_options"]:
|
|
args_dict.update(parse_json_options(args_dict["json_options"], args_dict))
|
|
|
|
app = pathlib.Path(args_dict["app_folder"]).resolve()
|
|
if not app.exists():
|
|
sys.exit(f"App directory {app} does not exist...")
|
|
sys.path.insert(0, str(app))
|
|
|
|
# to save completed images, to be used with -r flag
|
|
app_log = ".".join([str(app).strip("/").replace("/", "_"), "log"])
|
|
log = "".join([LOG_FOLDER, app_log])
|
|
|
|
img_data = {}
|
|
input_image_list = args_dict["image_json"] is not None # "-il option"
|
|
|
|
if args_dict["input_single"] is not None:
|
|
paths = [args_dict["input_single"]]
|
|
elif input_image_list:
|
|
image_json = pathlib.Path(args_dict["image_json"]).resolve()
|
|
if not image_json.exists():
|
|
sys.exit(f"Image JSON file {image_json} does not exist...")
|
|
paths, img_data = setup_image_list(args_dict, image_json, log)
|
|
else:
|
|
directory = pathlib.Path(args_dict["image_directory"]).resolve()
|
|
if not directory.exists():
|
|
sys.exit(f"Image directory {directory} does not exist...")
|
|
paths = setup_images(args_dict, directory, log)
|
|
|
|
user_test, user_config = setup_user_flow(app, args_dict, input_image_list)
|
|
|
|
for key in user_config.keys():
|
|
try:
|
|
emu_mode = user_config[key].config["emu"]["emu_mode"]
|
|
except:
|
|
continue
|
|
if emu_mode == "dongle":
|
|
import python_flow.dongle.dongle_client as dongle
|
|
import python_flow.dongle.dongle_config as dongle_config
|
|
dongle_config.toolchain_version = args_dict["toolchain"]
|
|
dongle.init_dongle_client(user_config, user_config[key].config["emu"]["platform"],
|
|
args_dict["group"], args_dict["num_inference"],
|
|
args_dict["workers"])
|
|
dongle_config.dongle_fixed_mode = args_dict["dongle_fixed_mode"]
|
|
dongle_config.dongle_debug_mode = args_dict["dongle_debug_mode"]
|
|
break
|
|
|
|
# clear log if not resuming in the middle
|
|
if not args_dict["resume"]:
|
|
with open(log, "w") as _:
|
|
pass
|
|
|
|
if args_dict["workers"] > 1:
|
|
return run_parallel(paths, user_test, user_config, args_dict, img_data, log)
|
|
else:
|
|
return run_sequential(paths, user_test, user_config, args_dict, img_data, log)
|
|
|
|
def run_solution(solution_path: str, run_config: str,
|
|
input_files: Optional[List[str]] = None) -> Mapping:
|
|
"""Function wrapper to call E2E given a solution and run_config file.
|
|
|
|
If input_files is not specified, default image list will use the algorithm golden folder.
|
|
|
|
Arguments:
|
|
solution_path: String path to the solution.
|
|
run_config: String path to the JSON with the command line configurations (-j flag).
|
|
input_files: List of strings to input image files to use.
|
|
|
|
Returns:
|
|
A mapping where the key is a string of one of the input images and the values are equal
|
|
to the return value of the user defined flow in the application for that test image.
|
|
"""
|
|
if input_files is None:
|
|
default_images = solution_path + "/alg/golden"
|
|
return main([solution_path, default_images, "application", "-j", run_config])
|
|
else:
|
|
return main([solution_path, "", "application", "-j", run_config, "-is", *input_files])
|
|
|
|
if __name__ == "__main__":
|
|
main()
|