diff --git a/Jay Chou.npy b/Jay Chou.npy new file mode 100644 index 0000000..90d5766 Binary files /dev/null and b/Jay Chou.npy differ diff --git a/R34_G369K.nef b/R34_G369K.nef new file mode 100644 index 0000000..4a6bfc9 Binary files /dev/null and b/R34_G369K.nef differ diff --git a/alignment_visualization.jpg b/alignment_visualization.jpg new file mode 100644 index 0000000..72d7d57 Binary files /dev/null and b/alignment_visualization.jpg differ diff --git a/face_recognition.py b/face_recognition.py new file mode 100644 index 0000000..d98b23d --- /dev/null +++ b/face_recognition.py @@ -0,0 +1,321 @@ +import os +import sys +import argparse +import numpy as np +import json +import glob +import time +import cv2 +from mtcnn.mtcnn import MTCNN +import kp + +# 引入之前的功能 +from nef_test import ( + load_image_safe, landmarks, affine_matrix, extract_vector_data, + load_vector, cosine_similarity, SCPU_FW_PATH, NCPU_FW_PATH, visualize_alignment +) + +# 預設參數 +MODEL_FILE_PATH = 'R34_G369K.nef' +VECTOR_DATABASE_DIR = 'face_vectors' +SIMILARITY_THRESHOLD = 0.5 # 相似度閾值,可根據需要調整 + +def get_face_vector(device_group, model_nef_descriptor, image_path): + """從圖像中擷取人臉向量""" + + # 創建MTCNN檢測器 + detector = MTCNN(device="CPU:0") + + # 載入圖像 + try: + img_rgb, img_bgr = load_image_safe(image_path) + print(f" - 已載入圖像: {image_path}") + except Exception as e: + print(f"錯誤: {str(e)}") + return None + + # 獲取人臉特徵點並計算仿射變換矩陣 + try: + lmks = landmarks(detector, img_rgb) + mat, size = affine_matrix(lmks) + print(" - 已檢測到人臉特徵點") + except Exception as e: + print(f"錯誤: {str(e)}") + return None + + # 應用仿射變換 + aligned_img = cv2.warpAffine(img_bgr, mat, size) + + # visualize_alignment(img_bgr, lmks, aligned_img) + + # 轉換為BGR565格式並調整大小 + aligned_img_bgr565 = cv2.cvtColor(aligned_img, cv2.COLOR_BGR2BGR565) + img_bgr565 = cv2.resize(aligned_img_bgr565, (112, 112), interpolation=cv2.INTER_LINEAR) + print(" - 已對齊圖像並格式化") + + # 準備通用圖像推理輸入描述符 + generic_inference_input_descriptor = kp.GenericImageInferenceDescriptor( + model_id=model_nef_descriptor.models[0].id, + inference_number=0, + input_node_image_list=[ + kp.GenericInputNodeImage( + image=img_bgr565, + image_format=kp.ImageFormat.KP_IMAGE_FORMAT_RGB565, + resize_mode=kp.ResizeMode.KP_RESIZE_ENABLE, + padding_mode=kp.PaddingMode.KP_PADDING_CORNER, + normalize_mode=kp.NormalizeMode.KP_NORMALIZE_KNERON + ) + ] + ) + + # 開始推理工作 + try: + kp.inference.generic_image_inference_send( + device_group=device_group, + generic_inference_input_descriptor=generic_inference_input_descriptor + ) + + generic_raw_result = kp.inference.generic_image_inference_receive(device_group=device_group) + print(" - 推理成功完成") + except kp.ApiKPException as exception: + print(f' - 錯誤: 推理失敗,錯誤 = {exception}') + return None + + # 獲取推理節點輸出 + inf_node_output_list = [] + for node_idx in range(generic_raw_result.header.num_output_node): + inference_float_node_output = kp.inference.generic_inference_retrieve_float_node( + node_idx=node_idx, + generic_raw_result=generic_raw_result, + channels_ordering=kp.ChannelOrdering.KP_CHANNEL_ORDERING_CHW + ) + inf_node_output_list.append(inference_float_node_output) + + # 獲取人臉向量 + if len(inf_node_output_list) > 0: + face_vector = inf_node_output_list[0] + # 轉換向量為標準NumPy數組 + face_vector_np = extract_vector_data(face_vector) + + # Ensure the vector is flattened to 1D + face_vector_np = face_vector_np.flatten() + + # Print shape for debugging + print(f" - Face vector shape: {face_vector_np.shape}") + + return face_vector_np + else: + print("錯誤: 推理結果中沒有找到輸出節點") + return None + +def load_face_database(database_dir): + """載入人臉資料庫""" + face_db = [] + + # 確保資料庫目錄存在 + if not os.path.exists(database_dir): + print(f"警告: 人臉資料庫目錄 '{database_dir}' 不存在,將創建該目錄") + os.makedirs(database_dir) + return face_db + + # 查找所有支持的向量文件 + vector_files = [] + vector_files.extend(glob.glob(os.path.join(database_dir, "*.npy"))) + vector_files.extend(glob.glob(os.path.join(database_dir, "*.json"))) + vector_files.extend(glob.glob(os.path.join(database_dir, "*.pkl"))) + + # 載入每個向量文件 + for vector_file in vector_files: + try: + # 根據文件擴展名自動確定格式 + file_ext = os.path.splitext(vector_file)[1].lower() + if file_ext == '.npy': + vector = load_vector(vector_file, format='numpy') + # 嘗試從文件名中提取標識信息 + name = os.path.basename(vector_file).replace('.npy', '') + metadata = {'name': name} + face_db.append({ + 'vector': vector, + 'metadata': metadata, + 'file_path': vector_file + }) + elif file_ext == '.json': + vector, metadata = load_vector(vector_file, format='json') + face_db.append({ + 'vector': vector, + 'metadata': metadata or {'name': os.path.basename(vector_file).replace('.json', '')}, + 'file_path': vector_file + }) + elif file_ext == '.pkl': + vector = load_vector(vector_file, format='pickle') + # 嘗試從文件名中提取標識信息 + name = os.path.basename(vector_file).replace('.pkl', '') + metadata = {'name': name} + face_db.append({ + 'vector': vector, + 'metadata': metadata, + 'file_path': vector_file + }) + + print(f"已載入人臉向量: {vector_file}") + except Exception as e: + print(f"警告: 無法載入向量文件 '{vector_file}': {str(e)}") + + print(f"成功載入 {len(face_db)} 個人臉向量") + return face_db + +def recognize_face(new_face_vector, face_database, threshold=SIMILARITY_THRESHOLD): + """識別人臉,將新的人臉向量與資料庫中的向量進行比較""" + if not face_database: + return None, 0.0 + + max_similarity = 0.0 + best_match = None + + for face_entry in face_database: + stored_vector = face_entry['vector'] + similarity = cosine_similarity(new_face_vector, stored_vector) + + if similarity > max_similarity: + max_similarity = similarity + best_match = face_entry + + # 如果最高相似度超過閾值,則返回匹配結果 + if max_similarity >= threshold: + return best_match, max_similarity + else: + return None, max_similarity + +def add_face_to_database(face_vector, database_dir, name=None, image_path=None, format='json'): + """添加新的人臉向量到資料庫""" + if not os.path.exists(database_dir): + os.makedirs(database_dir) + + # 創建唯一的文件名 + timestamp = time.strftime("%Y%m%d_%H%M%S") + if name: + file_name = f"{name}_{timestamp}" + else: + file_name = f"unknown_{timestamp}" + + # 創建元數據 + metadata = { + 'name': name or 'unknown', + 'timestamp': time.strftime("%Y-%m-%d %H:%M:%S"), + 'image_path': image_path + } + + # 根據格式保存向量 + if format == 'numpy': + file_path = os.path.join(database_dir, f"{file_name}.npy") + np.save(file_path, face_vector) + elif format == 'pickle': + import pickle + file_path = os.path.join(database_dir, f"{file_name}.pkl") + with open(file_path, 'wb') as f: + pickle.dump(face_vector, f) + else: # 預設使用JSON格式 + file_path = os.path.join(database_dir, f"{file_name}.json") + data = { + 'vector': face_vector.tolist(), + 'metadata': metadata + } + with open(file_path, 'w') as f: + json.dump(data, f) + + print(f"已將新的人臉向量保存到: {file_path}") + return file_path + +def main(): + parser = argparse.ArgumentParser(description='人臉識別和比對系統') + parser.add_argument('-p', '--port_id', help='使用指定的端口ID連接設備', default=28, type=int) + parser.add_argument('-m', '--model', help=f'模型文件路徑 (.nef) (預設: {MODEL_FILE_PATH})', + default=MODEL_FILE_PATH, type=str) + parser.add_argument('-i', '--img', help='待識別的圖像文件路徑', required=True, type=str) + parser.add_argument('-d', '--database', help=f'人臉向量資料庫目錄 (預設: {VECTOR_DATABASE_DIR})', + default=VECTOR_DATABASE_DIR, type=str) + parser.add_argument('-t', '--threshold', help=f'相似度閾值 (預設: {SIMILARITY_THRESHOLD})', + default=SIMILARITY_THRESHOLD, type=float) + parser.add_argument('-a', '--add', help='將新人臉添加到資料庫 (如果未識別)', action='store_true') + parser.add_argument('-n', '--name', help='新人臉的名稱 (與 --add 一起使用)', default=None, type=str) + parser.add_argument('-f', '--format', help='向量儲存格式: numpy, pickle, 或 json (預設: json)', + default='json', choices=['numpy', 'pickle', 'json'], type=str) + args = parser.parse_args() + + # 連接設備 + try: + print('[連接設備]') + device_group = kp.core.connect_devices(usb_port_ids=[args.port_id]) + print(' - 成功') + except kp.ApiKPException as exception: + print(f'錯誤: 連接設備失敗, 端口ID = \'{args.port_id}\',錯誤信息: [{str(exception)}]') + return + + # 設置USB通信超時 + print('[設置設備超時]') + kp.core.set_timeout(device_group=device_group, milliseconds=5000) + print(' - 成功') + + # 上傳固件到設備 + try: + print('[上傳固件]') + kp.core.load_firmware_from_file(device_group=device_group, + scpu_fw_path=SCPU_FW_PATH, + ncpu_fw_path=NCPU_FW_PATH) + print(' - 成功') + except kp.ApiKPException as exception: + print(f'錯誤: 上傳固件失敗,錯誤 = \'{str(exception)}\'') + return + + # 上傳模型到設備 + try: + print('[上傳模型]') + model_nef_descriptor = kp.core.load_model_from_file(device_group=device_group, + file_path=args.model) + print(' - 成功') + except kp.ApiKPException as exception: + print(f'錯誤: 上傳模型失敗,錯誤 = \'{str(exception)}\'') + return + + # 載入人臉資料庫 + print('[載入人臉資料庫]') + face_database = load_face_database(args.database) + + # 從圖像中提取人臉向量 + print('[處理輸入圖像]') + new_face_vector = get_face_vector(device_group, model_nef_descriptor, args.img) + + if new_face_vector is not None: + # 識別人臉 + print('[識別人臉]') + match, similarity = recognize_face(new_face_vector, face_database, args.threshold) + + if match: + metadata = match['metadata'] + name = metadata.get('name', '未知') + print(f"[結果] 識別為: {name}") + print(f" - 相似度: {similarity:.4f}") + print(f" - 來源文件: {match['file_path']}") + if 'timestamp' in metadata: + print(f" - 記錄時間: {metadata['timestamp']}") + else: + print(f"[結果] 未能識別人臉 (最高相似度: {similarity:.4f}, 閾值: {args.threshold})") + + # 如果指定了--add參數,則將新的人臉添加到資料庫 + if args.add: + print('[添加新人臉到資料庫]') + file_path = add_face_to_database( + new_face_vector, + args.database, + name=args.name, + image_path=args.img, + format=args.format + ) + print(f"[添加完成] 已添加新人臉: {args.name or '未命名'}") + + # 清理 + kp.core.disconnect_devices(device_group=device_group) + print("[清理] 設備已斷開連接") + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/face_vectors/Jay.npy b/face_vectors/Jay.npy new file mode 100644 index 0000000..bbbc975 Binary files /dev/null and b/face_vectors/Jay.npy differ diff --git a/face_vectors/Will Smith.npy b/face_vectors/Will Smith.npy new file mode 100644 index 0000000..ad3d1b2 Binary files /dev/null and b/face_vectors/Will Smith.npy differ diff --git a/face_vectors/Zhiyu.npy b/face_vectors/Zhiyu.npy new file mode 100644 index 0000000..74742f1 Binary files /dev/null and b/face_vectors/Zhiyu.npy differ diff --git a/face_vectors/山下智久.npy b/face_vectors/山下智久.npy new file mode 100644 index 0000000..1bf28bb Binary files /dev/null and b/face_vectors/山下智久.npy differ diff --git a/fw_info.txt b/fw_info.txt new file mode 100644 index 0000000..4489d41 --- /dev/null +++ b/fw_info.txt @@ -0,0 +1,8 @@ +Total [1] models: +[input] + id: [32769], version: [0x8b28] + size: input [0xc400], output [0x2000], buf [0x134000], cmd [0x1c14], weight [0x1839c00], fw_code [0xa0] + addr: input [0x60000000], output [0x6000c400], buf [0x6000e400], cmd [0x60142400], weight [0x60144020], fw_code [0x6197dc20] +dram_addr_end [0x6197dcc0], total bin size: [0x183b8c0] +checksum: all_models.bin [0xf7c5b2d6] + 0 [0xf7c5b2d6], \ No newline at end of file diff --git a/input.kdp520.decomposed.onnx b/input.kdp520.decomposed.onnx new file mode 100644 index 0000000..74056c5 Binary files /dev/null and b/input.kdp520.decomposed.onnx differ diff --git a/input.kdp520.scaled.bie b/input.kdp520.scaled.bie new file mode 100644 index 0000000..32d99b5 Binary files /dev/null and b/input.kdp520.scaled.bie differ diff --git a/input.onnx b/input.onnx new file mode 100644 index 0000000..c340af8 Binary files /dev/null and b/input.onnx differ diff --git a/model_fx_report.html b/model_fx_report.html new file mode 100644 index 0000000..74a2661 --- /dev/null +++ b/model_fx_report.html @@ -0,0 +1,727 @@ +

Summary



+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
info
docker_versionkneron/toolchain:v0.29.0
comments
kdp520/input bitwidthint8
kdp520/output bitwidthint8
kdp520/cpu bitwidthint8
kdp520/datapath bitwidthint8
kdp520/weight bitwidthint8
kdp520/ip_eval/fps13.5583
kdp520/ip_eval/ITC(ms)73.7554 ms
kdp520/ip_eval/RDMA bandwidth GB/s0.8
kdp520/ip_eval/WDMA bandwidth GB/s0.8
kdp520/ip_eval/GETW bandwidth GB/s0.8
kdp520/ip_eval/cpu_nodeN/A
kdp520/bieinput.kdp520.scaled.bie
kdp520/onnxinput.kdp520.decomposed.onnx
kdp520/nefmodels_520.nef
gen fx model reportmodel_fx_report.html
gen fx model jsonmodel_fx_report.json


kdp520



+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
nodenode origintypebw inbw outbw weightnode backend
0inputNoneNPUN/A[8]N/ANone
1/conv1/Conv/conv1/ConvNPU[8][16][8]npu_fusion_node_/conv1/Conv_/prelu/PRelu
2/prelu/PRelu/prelu/PReluNPU[16][8][8]
3/layer1/layer1.0/bn1/BatchNormalization_KNOPT_bn_sep_1/layer1/layer1.0/bn1/BatchNormalizationNPU[8][8][16]/layer1/layer1.0/bn1/BatchNormalization_KNOPT_bn_sep_1
4/layer1/layer1.0/bn1/BatchNormalization_KNOPT_bn_sep_2/layer1/layer1.0/bn1/BatchNormalizationNPU[8][8][16]/layer1/layer1.0/bn1/BatchNormalization_KNOPT_bn_sep_2
5/layer1/layer1.0/conv1/Conv/layer1/layer1.0/conv1/ConvNPU[8][16][8]npu_fusion_node_/layer1/layer1.0/conv1/Conv_/layer1/layer1.0/prelu/PRelu
6/layer1/layer1.0/prelu/PRelu/layer1/layer1.0/prelu/PReluNPU[16][8][8]
7/layer1/layer1.0/conv2/Conv_KNOPT_/layer1/layer1.0/Add_KNOPT_dummy_bn_0/layer1/layer1.0/conv2/ConvNPU[8][8][8]/layer1/layer1.0/conv2/Conv_KNOPT_/layer1/layer1.0/Add_KNOPT_dummy_bn_0
8/layer1/layer1.0/downsample/downsample.0/Conv/layer1/layer1.0/downsample/downsample.0/ConvNPU[8][16][8]npu_fusion_node_/layer1/layer1.0/downsample/downsample.0/Conv_/layer1/layer1.0/Add_KNOPT_dummy_bn_1
9/layer1/layer1.0/Add_KNOPT_dummy_bn_1/layer1/layer1.0/AddNPU[16][8][16]
10/layer1/layer1.0/Add/layer1/layer1.0/AddNPU[8, 8][8][8]/layer1/layer1.0/Add
11/layer1/layer1.0/Add_KNOPT_dummy_bn_2/layer1/layer1.1/AddNPU[8][8][16]/layer1/layer1.0/Add_KNOPT_dummy_bn_2
12/layer1/layer1.1/bn1/BatchNormalization/layer1/layer1.1/bn1/BatchNormalizationNPU[8][8][16]/layer1/layer1.1/bn1/BatchNormalization
13/layer1/layer1.1/conv1/Conv/layer1/layer1.1/conv1/ConvNPU[8][16][8]npu_fusion_node_/layer1/layer1.1/conv1/Conv_/layer1/layer1.1/prelu/PRelu
14/layer1/layer1.1/prelu/PRelu/layer1/layer1.1/prelu/PReluNPU[16][8][8]
15/layer1/layer1.1/conv2/Conv_KNOPT_/layer1/layer1.1/Add_KNOPT_dummy_bn_0/layer1/layer1.1/conv2/ConvNPU[8][8][8]/layer1/layer1.1/conv2/Conv_KNOPT_/layer1/layer1.1/Add_KNOPT_dummy_bn_0
16/layer1/layer1.1/Add/layer1/layer1.1/AddNPU[8, 8][8][8]/layer1/layer1.1/Add
17/layer1/layer1.1/Add_KNOPT_dummy_bn_1/layer2/layer2.0/downsample/downsample.0/ConvNPU[8][8][16]/layer1/layer1.1/Add_KNOPT_dummy_bn_1
18/layer2/layer2.0/downsample/downsample.0/Conv/layer2/layer2.0/downsample/downsample.0/ConvNPU[8][16][8]npu_fusion_node_/layer2/layer2.0/downsample/downsample.0/Conv_/layer2/layer2.0/Add_KNOPT_dummy_bn_1
19/layer2/layer2.0/Add_KNOPT_dummy_bn_1/layer2/layer2.0/AddNPU[16][8][16]
20/layer2/layer2.0/bn1/BatchNormalization/layer2/layer2.0/bn1/BatchNormalizationNPU[8][8][16]/layer2/layer2.0/bn1/BatchNormalization
21/layer2/layer2.0/conv1/Conv/layer2/layer2.0/conv1/ConvNPU[8][16][8]npu_fusion_node_/layer2/layer2.0/conv1/Conv_/layer2/layer2.0/prelu/PRelu
22/layer2/layer2.0/prelu/PRelu/layer2/layer2.0/prelu/PReluNPU[16][8][8]
23/layer2/layer2.0/conv2/Conv_KNOPT_/layer2/layer2.0/Add_KNOPT_dummy_bn_0/layer2/layer2.0/conv2/ConvNPU[8][8][8]/layer2/layer2.0/conv2/Conv_KNOPT_/layer2/layer2.0/Add_KNOPT_dummy_bn_0
24/layer2/layer2.0/Add/layer2/layer2.0/AddNPU[8, 8][8][8]/layer2/layer2.0/Add
25/layer2/layer2.0/Add_KNOPT_dummy_bn_2/layer2/layer2.1/AddNPU[8][8][16]/layer2/layer2.0/Add_KNOPT_dummy_bn_2
26/layer2/layer2.1/bn1/BatchNormalization/layer2/layer2.1/bn1/BatchNormalizationNPU[8][8][16]/layer2/layer2.1/bn1/BatchNormalization
27/layer2/layer2.1/conv1/Conv/layer2/layer2.1/conv1/ConvNPU[8][16][8]npu_fusion_node_/layer2/layer2.1/conv1/Conv_/layer2/layer2.1/prelu/PRelu
28/layer2/layer2.1/prelu/PRelu/layer2/layer2.1/prelu/PReluNPU[16][8][8]
29/layer2/layer2.1/conv2/Conv_KNOPT_/layer2/layer2.1/Add_KNOPT_dummy_bn_0/layer2/layer2.1/conv2/ConvNPU[8][8][8]/layer2/layer2.1/conv2/Conv_KNOPT_/layer2/layer2.1/Add_KNOPT_dummy_bn_0
30/layer2/layer2.1/Add/layer2/layer2.1/AddNPU[8, 8][8][8]/layer2/layer2.1/Add
31/layer2/layer2.1/Add_KNOPT_dummy_bn_1/layer3/layer3.0/downsample/downsample.0/ConvNPU[8][8][16]/layer2/layer2.1/Add_KNOPT_dummy_bn_1
32/layer3/layer3.0/downsample/downsample.0/Conv/layer3/layer3.0/downsample/downsample.0/ConvNPU[8][16][8]npu_fusion_node_/layer3/layer3.0/downsample/downsample.0/Conv_/layer3/layer3.0/Add_KNOPT_dummy_bn_1
33/layer3/layer3.0/Add_KNOPT_dummy_bn_1/layer3/layer3.0/AddNPU[16][8][16]
34/layer3/layer3.0/bn1/BatchNormalization/layer3/layer3.0/bn1/BatchNormalizationNPU[8][8][16]/layer3/layer3.0/bn1/BatchNormalization
35/layer3/layer3.0/conv1/Conv/layer3/layer3.0/conv1/ConvNPU[8][16][8]npu_fusion_node_/layer3/layer3.0/conv1/Conv_/layer3/layer3.0/prelu/PRelu
36/layer3/layer3.0/prelu/PRelu/layer3/layer3.0/prelu/PReluNPU[16][8][8]
37/layer3/layer3.0/conv2/Conv_KNOPT_/layer3/layer3.0/Add_KNOPT_dummy_bn_0/layer3/layer3.0/conv2/ConvNPU[8][8][8]/layer3/layer3.0/conv2/Conv_KNOPT_/layer3/layer3.0/Add_KNOPT_dummy_bn_0
38/layer3/layer3.0/Add/layer3/layer3.0/AddNPU[8, 8][8][8]/layer3/layer3.0/Add
39/layer3/layer3.0/Add_KNOPT_dummy_bn_2/layer3/layer3.1/AddNPU[8][8][16]/layer3/layer3.0/Add_KNOPT_dummy_bn_2
40/layer3/layer3.1/bn1/BatchNormalization/layer3/layer3.1/bn1/BatchNormalizationNPU[8][8][16]/layer3/layer3.1/bn1/BatchNormalization
41/layer3/layer3.1/conv1/Conv/layer3/layer3.1/conv1/ConvNPU[8][16][8]npu_fusion_node_/layer3/layer3.1/conv1/Conv_/layer3/layer3.1/prelu/PRelu
42/layer3/layer3.1/prelu/PRelu/layer3/layer3.1/prelu/PReluNPU[16][8][8]
43/layer3/layer3.1/conv2/Conv_KNOPT_/layer3/layer3.1/Add_KNOPT_dummy_bn_0/layer3/layer3.1/conv2/ConvNPU[8][8][8]/layer3/layer3.1/conv2/Conv_KNOPT_/layer3/layer3.1/Add_KNOPT_dummy_bn_0
44/layer3/layer3.1/Add/layer3/layer3.1/AddNPU[8, 8][8][8]/layer3/layer3.1/Add
45/layer3/layer3.1/Add_KNOPT_dummy_bn_1/layer4/layer4.0/downsample/downsample.0/ConvNPU[8][8][16]/layer3/layer3.1/Add_KNOPT_dummy_bn_1
46/layer4/layer4.0/downsample/downsample.0/Conv/layer4/layer4.0/downsample/downsample.0/ConvNPU[8][16][8]npu_fusion_node_/layer4/layer4.0/downsample/downsample.0/Conv_/layer4/layer4.0/Add_KNOPT_dummy_bn_1
47/layer4/layer4.0/Add_KNOPT_dummy_bn_1/layer4/layer4.0/AddNPU[16][8][16]
48/layer4/layer4.0/bn1/BatchNormalization/layer4/layer4.0/bn1/BatchNormalizationNPU[8][8][16]/layer4/layer4.0/bn1/BatchNormalization
49/layer4/layer4.0/conv1/Conv/layer4/layer4.0/conv1/ConvNPU[8][16][8]npu_fusion_node_/layer4/layer4.0/conv1/Conv_/layer4/layer4.0/prelu/PRelu
50/layer4/layer4.0/prelu/PRelu/layer4/layer4.0/prelu/PReluNPU[16][8][8]
51/layer4/layer4.0/conv2/Conv_KNOPT_/layer4/layer4.0/Add_KNOPT_dummy_bn_0/layer4/layer4.0/conv2/ConvNPU[8][8][8]/layer4/layer4.0/conv2/Conv_KNOPT_/layer4/layer4.0/Add_KNOPT_dummy_bn_0
52/layer4/layer4.0/Add/layer4/layer4.0/AddNPU[8, 8][8][8]/layer4/layer4.0/Add
53/layer4/layer4.0/Add_KNOPT_dummy_bn_2/layer4/layer4.1/AddNPU[8][8][16]/layer4/layer4.0/Add_KNOPT_dummy_bn_2
54/layer4/layer4.1/bn1/BatchNormalization/layer4/layer4.1/bn1/BatchNormalizationNPU[8][8][16]/layer4/layer4.1/bn1/BatchNormalization
55/layer4/layer4.1/conv1/Conv/layer4/layer4.1/conv1/ConvNPU[8][16][8]npu_fusion_node_/layer4/layer4.1/conv1/Conv_/layer4/layer4.1/prelu/PRelu
56/layer4/layer4.1/prelu/PRelu/layer4/layer4.1/prelu/PReluNPU[16][8][8]
57/layer4/layer4.1/conv2/Conv_KNOPT_/layer4/layer4.1/Add_KNOPT_dummy_bn_0/layer4/layer4.1/conv2/ConvNPU[8][8][8]/layer4/layer4.1/conv2/Conv_KNOPT_/layer4/layer4.1/Add_KNOPT_dummy_bn_0
58/layer4/layer4.1/Add/layer4/layer4.1/AddNPU[8, 8][16][8]npu_fusion_node_/layer4/layer4.1/Add_/bn2/BatchNormalization
59/bn2/BatchNormalization/bn2/BatchNormalizationNPU[16][8][16]
60/Flatten/FlattenNPU[8][8][8]None
61/fc/Gemm/fc/GemmNPU[8][8][8]npu_fusion_node_/layer4/layer4.1/Add_/bn2/BatchNormalization_KNERON_REFORMAT_next_0
62/features/BatchNormalization/features/BatchNormalizationNPU[8][8][16]/features/BatchNormalization


\ No newline at end of file diff --git a/model_fx_report.json b/model_fx_report.json new file mode 100644 index 0000000..6652318 --- /dev/null +++ b/model_fx_report.json @@ -0,0 +1,20 @@ +{ + "docker_version": "kneron/toolchain:v0.29.0", + "comments": "", + "kdp520/input bitwidth": "int8", + "kdp520/output bitwidth": "int8", + "kdp520/cpu bitwidth": "int8", + "kdp520/datapath bitwidth": "int8", + "kdp520/weight bitwidth": "int8", + "kdp520/ip_eval/fps": "13.5583", + "kdp520/ip_eval/ITC(ms)": "73.7554 ms", + "kdp520/ip_eval/RDMA bandwidth GB/s": 0.8, + "kdp520/ip_eval/WDMA bandwidth GB/s": 0.8, + "kdp520/ip_eval/GETW bandwidth GB/s": 0.8, + "kdp520/ip_eval/cpu_node": "N/A", + "kdp520/bie": "input.kdp520.scaled.bie", + "kdp520/onnx": "input.kdp520.decomposed.onnx", + "kdp520/nef": "models_520.nef", + "gen fx model report": "model_fx_report.html", + "gen fx model json": "model_fx_report.json" +} \ No newline at end of file diff --git a/models_520.nef b/models_520.nef new file mode 100644 index 0000000..3802196 Binary files /dev/null and b/models_520.nef differ diff --git a/nef_test.py b/nef_test.py new file mode 100644 index 0000000..c76f03b --- /dev/null +++ b/nef_test.py @@ -0,0 +1,518 @@ +import os +import sys +import argparse +import kp +import cv2 +import numpy as np +from mtcnn.mtcnn import MTCNN +import time +import pickle +import json + +SCPU_FW_PATH = r"C:\Users\mason\AppData\Local\Kneron_Academy\firmware\KL520\fw_scpu.bin" +NCPU_FW_PATH = r"C:\Users\mason\AppData\Local\Kneron_Academy\firmware\KL520\fw_ncpu.bin" +MODEL_FILE_PATH = 'R34_G369K.nef' +IMAGE_FILE_PATH = 'Chou1.jpg' + +def load_image_safe(image_path): + if not os.path.isfile(image_path): + raise FileNotFoundError(f"[Error] Image file '{image_path}' not found.") + img = cv2.imread(image_path) + if img is None: + raise ValueError(f"[Error] Failed to load image '{image_path}'. Check the file format (must be jpg, png, etc.).") + img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + return img_rgb, img + +def landmarks(detector, img_rgb): + faces = detector.detect_faces(img_rgb) + if len(faces) == 0: + raise ValueError("[Error] No faces detected in the image.") + face = max(faces, key=lambda x: x['confidence']) + return face['keypoints'] + +def affine_matrix(lmks, scale=2.5): + nose = np.array(lmks['nose'], dtype=np.float32) + left_eye = np.array(lmks['left_eye'], dtype=np.float32) + right_eye = np.array(lmks['right_eye'], dtype=np.float32) + eye_width = right_eye - left_eye + angle = np.arctan2(eye_width[1], eye_width[0]) + center = nose + alpha = np.cos(angle) + beta = np.sin(angle) + w = np.sqrt(np.sum(eye_width**2)) * scale + m = [[alpha, beta, -alpha * center[0] - beta * center[1] + w * 0.5], + [-beta, alpha, beta * center[0] - alpha * center[1] + w * 0.5]] + return np.array(m), (int(w), int(w)) + +def extract_vector_data(vector): + """ + Extract data from the inference output object and convert to a standard numpy array. + Always returns a flattened 1D array regardless of input shape. + """ + try: + # For Kneron InferenceFloatNodeOutput specifically + if 'InferenceFloatNodeOutput' in str(type(vector)): + # Try to access the data directly + if hasattr(vector, 'ndarray'): + data = vector.ndarray + elif hasattr(vector, 'data'): + data = vector.data + elif hasattr(vector, 'content'): + data = vector.content + elif hasattr(vector, 'output'): + data = vector.output + else: + # If no direct data attribute, try to access via shape and indexing + if hasattr(vector, 'shape'): + shape = vector.shape + if len(shape) == 4 and shape[0] == 1 and shape[2] == 1 and shape[3] == 1: + # Common shape for CNN feature vectors: [1, features, 1, 1] + # We need to extract each value manually + try: + data = np.array([vector[0, i, 0, 0] for i in range(shape[1])], dtype=np.float32) + return data # Return early as this is already flat + except Exception as e: + print(f"Manual extraction failed: {e}") + # Continue to other methods + + # Last resort - try numpy conversion + data = np.array(vector, dtype=np.float32) + + # Convert to numpy and flatten + array_data = np.array(data, dtype=np.float32) + return array_data.flatten() # Ensure 1D output + + # For regular numpy arrays or similar + if isinstance(vector, np.ndarray): + return vector.flatten() # Ensure 1D output + + # For list-like objects + if hasattr(vector, 'tolist'): + return np.array(vector.tolist(), dtype=np.float32).flatten() + + # Generic conversion + return np.array(vector, dtype=np.float32).flatten() + + except Exception as e: + print(f"Warning: Error converting vector: {e}") + print(f"Vector type: {type(vector)}") + if hasattr(vector, 'shape'): + print(f"Vector shape: {vector.shape}") + + # Return a properly shaped array of zeros as fallback + if hasattr(vector, 'shape') and len(vector.shape) > 0: + # Find the largest dimension which is likely the feature dimension + max_dim = max(vector.shape) + if max_dim > 10: # Reasonable size for a feature vector + return np.zeros(max_dim, dtype=np.float32) + + # Default fallback size + return np.zeros(512, dtype=np.float32) + +def save_vector(vector, file_path, format='numpy', metadata=None): + """Save a face vector to file using specified format""" + directory = os.path.dirname(file_path) + if directory and not os.path.exists(directory): + os.makedirs(directory) + + # Don't print the entire raw vector - it might be a complex object + print(f"Saving vector of type: {type(vector)}") + + # First, try to extract the data into a standard numpy array + vector_np = extract_vector_data(vector) + + # Check for all-zeros vector which indicates extraction failed + if np.all(vector_np == 0): + print("WARNING: Extracted vector contains all zeros - extraction likely failed!") + + # Add extra debugging before giving up + print("Attempting emergency extraction methods...") + + # Last-ditch effort - try direct attribute access with common names + for attr_name in ['data', 'array', 'values', 'tensor', 'vector', 'features']: + if hasattr(vector, attr_name): + try: + data = getattr(vector, attr_name) + vector_np = np.array(data, dtype=np.float32) + print(f"Emergency extraction via '{attr_name}' attribute succeeded!") + break + except: + continue + + # Debug information + print(f"Extracted vector type: {type(vector_np)}") + print(f"Extracted vector shape: {vector_np.shape}") + print(f"Sample values: {vector_np[:5]} ... {vector_np[-5:] if len(vector_np) > 5 else []}") + + # Check if the shape needs to be adjusted + if len(vector_np.shape) > 1: + vector_np = vector_np.squeeze() + print(f"Squeezed vector shape: {vector_np.shape}") + + # Save according to format + try: + if format == 'numpy': + np.save(file_path, vector_np) + elif format == 'pickle': + with open(file_path, 'wb') as f: + pickle.dump(vector_np, f) + elif format == 'json': + data = { + 'vector': vector_np.tolist(), + 'metadata': metadata or {} + } + with open(file_path, 'w') as f: + json.dump(data, f) + else: + raise ValueError(f"Unsupported format: {format}") + + print(f"Vector saved to {file_path}") + return file_path + except Exception as e: + print(f"Error saving vector: {e}") + + # Alternative save method if standard methods fail + if format == 'numpy' or format == 'pickle': + # Try saving as JSON as a fallback + try: + alt_path = file_path + '.json' + data = { + 'vector': vector_np.tolist(), + 'metadata': metadata or {} + } + with open(alt_path, 'w') as f: + json.dump(data, f) + print(f"Vector saved using alternative method to {alt_path}") + return alt_path + except Exception as e2: + print(f"Alternative save method also failed: {e2}") + return None + return None + +def load_vector(file_path, format='numpy'): + """Load a face vector from file using specified format""" + if not os.path.isfile(file_path): + raise FileNotFoundError(f"Vector file '{file_path}' not found.") + + if format == 'numpy' or file_path.endswith('.npy'): + return np.load(file_path) + elif format == 'pickle' or file_path.endswith('.pkl'): + with open(file_path, 'rb') as f: + return pickle.load(f) + elif format == 'json' or file_path.endswith('.json'): + with open(file_path, 'r') as f: + data = json.load(f) + return np.array(data['vector']), data.get('metadata') + else: + raise ValueError(f"Unsupported format: {format}") + +def visualize_alignment(original_img, lmks, aligned_img, save_path='alignment_visualization.jpg'): + """ + 視覺化MTCNN檢測到的特徵點和對齊後的結果 + + Args: + original_img: 原始BGR圖像 + lmks: MTCNN檢測到的特徵點字典 ('left_eye', 'right_eye', 'nose', etc.) + aligned_img: 對齊後的人臉圖像 + save_path: 保存視覺化結果的路徑 + """ + import matplotlib.pyplot as plt + + # 創建原始圖像的副本用於繪製 + img_vis = original_img.copy() + + # 在原始圖像上繪製特徵點 + cv2.circle(img_vis, tuple(map(int, lmks['left_eye'])), 5, (0, 255, 0), -1) + cv2.circle(img_vis, tuple(map(int, lmks['right_eye'])), 5, (0, 255, 0), -1) + cv2.circle(img_vis, tuple(map(int, lmks['nose'])), 5, (0, 255, 0), -1) + cv2.circle(img_vis, tuple(map(int, lmks['mouth_left'])), 5, (0, 255, 0), -1) + cv2.circle(img_vis, tuple(map(int, lmks['mouth_right'])), 5, (0, 255, 0), -1) + + # 在眼睛之間繪製線條,顯示對齊參考線 + cv2.line(img_vis, + tuple(map(int, lmks['left_eye'])), + tuple(map(int, lmks['right_eye'])), + (255, 0, 0), 2) + + # 創建一個圖形來顯示兩張圖像 + plt.figure(figsize=(12, 6)) + + # 顯示帶有特徵點的原始圖像 + plt.subplot(1, 2, 1) + plt.imshow(cv2.cvtColor(img_vis, cv2.COLOR_BGR2RGB)) + plt.title('原始圖像與特徵點') + plt.axis('off') + + # 顯示對齊後的圖像 + plt.subplot(1, 2, 2) + plt.imshow(cv2.cvtColor(aligned_img, cv2.COLOR_BGR2RGB)) + plt.title('對齊後的人臉') + plt.axis('off') + + plt.tight_layout() + plt.savefig(save_path) + plt.show() + + print(f"視覺化結果已保存到 '{save_path}'") + + +def cosine_similarity(vec1, vec2): + """ + Calculate cosine similarity between two vectors. + Handles different input shapes by flattening both vectors. + """ + # Ensure both vectors are numpy arrays + vec1 = np.array(vec1, dtype=np.float32) + vec2 = np.array(vec2, dtype=np.float32) + + # Flatten both vectors to ensure 1D + vec1 = vec1.flatten() + vec2 = vec2.flatten() + + # Check if vectors have compatible sizes + if vec1.size != vec2.size: + print(f"Warning: Vector size mismatch: {vec1.size} vs {vec2.size}") + # Resize shorter vector or truncate longer vector + if vec1.size < vec2.size: + vec2 = vec2[:vec1.size] + else: + vec1 = vec1[:vec2.size] + + # Calculate cosine similarity + dot_product = np.dot(vec1, vec2) + norm_a = np.linalg.norm(vec1) + norm_b = np.linalg.norm(vec2) + + # Handle zero division + if norm_a < 1e-10 or norm_b < 1e-10: + print("Warning: Vector with near-zero magnitude detected") + return 0.0 + + return dot_product / (norm_a * norm_b) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='KL520 ResNet18 model image inference implementation') + parser.add_argument('-p', + '--port_id', + help='Using specified port ID for connecting device (Default: port ID of first scanned Kneron device)', + default=28, + type=int) + parser.add_argument('-m', + '--model', + help='Model file path (.nef) (Default: {})'.format(MODEL_FILE_PATH), + default=MODEL_FILE_PATH, + type=str) + parser.add_argument('-i', + '--img', + help='Image file path (Default: {})'.format(IMAGE_FILE_PATH), + default=IMAGE_FILE_PATH, + type=str) + parser.add_argument('-o', + '--output', + help='Output vector file path (Default: output.npy)', + default='face_vectors\\output.npy', + type=str) + parser.add_argument('-f', + '--format', + help='Output format: numpy, pickle, or json (Default: numpy)', + default='numpy', + choices=['numpy', 'pickle', 'json'], + type=str) + parser.add_argument('-n', + '--name', + help='Person name for the face vector (for metadata)', + default=None, + type=str) + args = parser.parse_args() + + usb_port_id = args.port_id + MODEL_FILE_PATH = args.model + IMAGE_FILE_PATH = args.img + + """ + connect the device + """ + try: + print('[Connect Device]') + device_group = kp.core.connect_devices(usb_port_ids=[usb_port_id]) + print(' - Success') + except kp.ApiKPException as exception: + print('Error: connect device fail, port ID = \'{}\', error msg: [{}]'.format(usb_port_id, + str(exception))) + exit(0) + + """ + setting timeout of the usb communication with the device + """ + print('[Set Device Timeout]') + kp.core.set_timeout(device_group=device_group, milliseconds=5000) + print(' - Success') + + """ + upload firmware to device + """ + try: + print('[Upload Firmware]') + kp.core.load_firmware_from_file(device_group=device_group, + scpu_fw_path=SCPU_FW_PATH, + ncpu_fw_path=NCPU_FW_PATH) + print(' - Success') + except kp.ApiKPException as exception: + print('Error: upload firmware failed, error = \'{}\''.format(str(exception))) + exit(0) + + """ + upload model to device + """ + try: + print('[Upload Model]') + model_nef_descriptor = kp.core.load_model_from_file(device_group=device_group, + file_path=MODEL_FILE_PATH) + print(' - Success') + except kp.ApiKPException as exception: + print('Error: upload model failed, error = \'{}\''.format(str(exception))) + exit(0) + + """ + MTCNN Part + """ + print('[Process MTCNN]') + start = time.time() + # Create MTCNN detector + detector = MTCNN(device="CPU:0") + + # Load image + try: + img_rgb, img_bgr = load_image_safe(IMAGE_FILE_PATH) + print(f" - Image loaded: {IMAGE_FILE_PATH}") + except Exception as e: + print(str(e)) + exit(0) + + # Get landmarks and calculate affine matrix + try: + lmks = landmarks(detector, img_rgb) + mat, size = affine_matrix(lmks) + print(" - Face landmarks detected") + except Exception as e: + print(str(e)) + exit(0) + + # Apply affine transformation + aligned_img = cv2.warpAffine(img_bgr, mat, size) + + end = time.time() + print(f" - MTCNN processing time: {end - start:.2f} seconds") + + # 添加此行以可視化對齊過程 + visualize_alignment(img_bgr, lmks, aligned_img) + + # Convert aligned_img to BGR565 format and resize + aligned_img_bgr565 = cv2.cvtColor(aligned_img, cv2.COLOR_BGR2BGR565) + img_bgr565 = cv2.resize(aligned_img_bgr565, (112, 112), interpolation=cv2.INTER_LINEAR) + print(" - Image aligned and formatted for inference") + + """ + prepare generic image inference input descriptor + """ + generic_inference_input_descriptor = kp.GenericImageInferenceDescriptor( + model_id=model_nef_descriptor.models[0].id, + inference_number=0, + input_node_image_list=[ + kp.GenericInputNodeImage( + image=img_bgr565, + image_format=kp.ImageFormat.KP_IMAGE_FORMAT_RGB565, + resize_mode=kp.ResizeMode.KP_RESIZE_ENABLE, + padding_mode=kp.PaddingMode.KP_PADDING_CORNER, + normalize_mode=kp.NormalizeMode.KP_NORMALIZE_KNERON + ) + ] + ) + + """ + starting inference work + """ + print('[Starting Inference Work]') + try: + kp.inference.generic_image_inference_send(device_group=device_group, + generic_inference_input_descriptor=generic_inference_input_descriptor) + + generic_raw_result = kp.inference.generic_image_inference_receive(device_group=device_group) + print(" - Inference completed successfully") + except kp.ApiKPException as exception: + print(' - Error: inference failed, error = {}'.format(exception)) + exit(0) + + """ + retrieve inference node output + """ + print('[Retrieve Inference Node Output]') + inf_node_output_list = [] + for node_idx in range(generic_raw_result.header.num_output_node): + inference_float_node_output = kp.inference.generic_inference_retrieve_float_node( + node_idx=node_idx, + generic_raw_result=generic_raw_result, + channels_ordering=kp.ChannelOrdering.KP_CHANNEL_ORDERING_CHW + ) + inf_node_output_list.append(inference_float_node_output) + print(' - Success') + + """ + Process and save the face embedding vector + """ + # For face recognition models, typically the output is a feature vector + # Usually, the feature vector is in the first output node + if len(inf_node_output_list) > 0: + face_vector = inf_node_output_list[0] + # print(face_vector) + print(f"[Face Vector] Original type: {type(face_vector)}") + print(f"[Face Vector] Shape: {face_vector.shape}") + + # Try to examine the vector object + print("[Face Vector] Available attributes:", [attr for attr in dir(face_vector) if not attr.startswith('__')]) + + # Create metadata if name is provided + metadata = None + if args.name: + metadata = { + 'name': args.name, + 'image_path': IMAGE_FILE_PATH, + 'timestamp': time.strftime("%Y-%m-%d %H:%M:%S"), + 'dimensions': 512, + } + + output_format = args.format + output_path = args.output + # If saving numpy and the file name doesn’t end with .npy, add it + if output_format == 'numpy' and not output_path.endswith('.npy'): + output_path += '.npy' + # Likewise for pickle + if output_format == 'pickle' and not (output_path.endswith('.pkl') or output_path.endswith('.pickle')): + output_path += '.pkl' + + + # Save the vector + output_path = save_vector( + face_vector, + output_path, + format=output_format, + metadata=metadata + ) + + if output_path: + print(f"[Result] Face embedding vector saved to: {output_path}") + print(f" - Format: {output_format}") + if metadata: + print(f" - Metadata: {metadata}") + else: + print("[Error] Failed to save the vector") + else: + print("[Error] No output nodes found in the inference result") + + # Clean up + kp.core.disconnect_devices(device_group=device_group) + print("[Cleanup] Device disconnected") + + if output_format == 'numpy': + loaded = np.load(output_path) + print("Reload check:", loaded.shape, loaded.dtype) \ No newline at end of file diff --git a/output.npy b/output.npy new file mode 100644 index 0000000..b820e0d Binary files /dev/null and b/output.npy differ diff --git a/test_npy.py b/test_npy.py new file mode 100644 index 0000000..40e31a0 --- /dev/null +++ b/test_npy.py @@ -0,0 +1,47 @@ +import numpy as np +import numpy.lib.format as fmt +import argparse +import os +import sys + +def main(): + parser = argparse.ArgumentParser(description="讀取 .npy 檔並輸出內容 (診斷模式)") + parser.add_argument("npy_path", help="要讀取的 .npy 檔案路徑") + args = parser.parse_args() + + npy_path = args.npy_path + if not os.path.isfile(npy_path): + print(f"Error: 檔案不存在:{npy_path}") + sys.exit(1) + + size = os.path.getsize(npy_path) + print(f"File size: {size} bytes") + if size == 0: + print("Error: 檔案是空的,請確認產生並儲存時沒有錯誤。") + sys.exit(1) + + # 嘗試只讀 header + with open(npy_path, 'rb') as f: + try: + version = fmt.read_magic(f) + header = fmt._read_array_header(f, version) + print("成功讀取 header:", header) + except Exception as e: + print("讀 header 失敗:", e) + + try: + data = np.load(npy_path, allow_pickle=True) + except Exception as e: + print(f"載入失敗:{e}") + sys.exit(1) + + print(f"Loaded data from: {npy_path}") + print(f"Data type: {type(data)}") + if isinstance(data, np.ndarray): + print(f"Array shape: {data.shape}") + print(f"Array dtype: {data.dtype}") + print("Contents:") + print(data) + +if __name__ == "__main__": + main() diff --git a/test_picture/Chou1.jpg b/test_picture/Chou1.jpg new file mode 100644 index 0000000..9cd8d36 Binary files /dev/null and b/test_picture/Chou1.jpg differ diff --git a/test_picture/Chou2.jpg b/test_picture/Chou2.jpg new file mode 100644 index 0000000..5bc034e Binary files /dev/null and b/test_picture/Chou2.jpg differ diff --git a/test_picture/Chou3.jpg b/test_picture/Chou3.jpg new file mode 100644 index 0000000..94bcdd9 Binary files /dev/null and b/test_picture/Chou3.jpg differ diff --git a/test_picture/Chou4.jpg b/test_picture/Chou4.jpg new file mode 100644 index 0000000..bc4fdcf Binary files /dev/null and b/test_picture/Chou4.jpg differ diff --git a/test_picture/WS1.jpg b/test_picture/WS1.jpg new file mode 100644 index 0000000..720c248 Binary files /dev/null and b/test_picture/WS1.jpg differ diff --git a/test_picture/WS2.jpg b/test_picture/WS2.jpg new file mode 100644 index 0000000..1103d3b Binary files /dev/null and b/test_picture/WS2.jpg differ diff --git a/test_picture/Will_Smith.jpg b/test_picture/Will_Smith.jpg new file mode 100644 index 0000000..949c72f Binary files /dev/null and b/test_picture/Will_Smith.jpg differ diff --git a/test_picture/t.jpg b/test_picture/t.jpg new file mode 100644 index 0000000..8ee3823 Binary files /dev/null and b/test_picture/t.jpg differ diff --git a/test_picture/t2.jpg b/test_picture/t2.jpg new file mode 100644 index 0000000..38280eb Binary files /dev/null and b/test_picture/t2.jpg differ diff --git a/test_picture/test.jpg b/test_picture/test.jpg new file mode 100644 index 0000000..3589029 Binary files /dev/null and b/test_picture/test.jpg differ diff --git a/test_picture/ziyu.jpg b/test_picture/ziyu.jpg new file mode 100644 index 0000000..f75e27c Binary files /dev/null and b/test_picture/ziyu.jpg differ diff --git a/test_picture/ziyu2.jpg b/test_picture/ziyu2.jpg new file mode 100644 index 0000000..aac7c9f Binary files /dev/null and b/test_picture/ziyu2.jpg differ diff --git a/test_picture/ziyu4.jpg b/test_picture/ziyu4.jpg new file mode 100644 index 0000000..13e9226 Binary files /dev/null and b/test_picture/ziyu4.jpg differ