diff --git a/multi_series_dongle_manager.py b/multi_series_dongle_manager.py index ce5fdff..cad34a0 100644 --- a/multi_series_dongle_manager.py +++ b/multi_series_dongle_manager.py @@ -26,8 +26,8 @@ class InferenceResult: timestamp: float class DongleSeriesSpec: - KL520_GOPS = 345 - KL720_GOPS = 1425 + KL520_GOPS = 3 + KL720_GOPS = 28 SERIES_SPECS = { 0x100: {"name": "KL520", "gops": KL520_GOPS}, @@ -140,10 +140,8 @@ class MultiSeriesDongleManager: ) self.model_descriptors[product_id] = model_descriptor - # Prepare inference descriptor - self.inference_descriptors[product_id] = kp.GenericImageInferenceDescriptor( - model_id=model_descriptor.models[0].id - ) + # Store model descriptor for later use + # Note: inference descriptors will be created per task print(f"[{series_name}] Model upload success") # Create result queue for this dongle @@ -244,6 +242,7 @@ class MultiSeriesDongleManager: # Process image input if isinstance(image, str): image_data = cv2.imread(image) + image_data = cv2.cvtColor(image_data, cv2.COLOR_RGB2BGR565) if image_data is None: raise FileNotFoundError(f"Image file not found: {image}") elif isinstance(image, np.ndarray): @@ -254,6 +253,8 @@ class MultiSeriesDongleManager: # Convert format string to enum format_mapping = { 'BGR565': kp.ImageFormat.KP_IMAGE_FORMAT_RGB565, + # 'RGB888': kp.ImageFormat.KP_IMAGE_FORMAT_RGB888, + # 'BGR888': kp.ImageFormat.KP_IMAGE_FORMAT_RGB888, # OpenCV uses BGR by default 'RGB8888': kp.ImageFormat.KP_IMAGE_FORMAT_RGBA8888, 'YUYV': kp.ImageFormat.KP_IMAGE_FORMAT_YUYV, 'RAW8': kp.ImageFormat.KP_IMAGE_FORMAT_RAW8 @@ -341,7 +342,6 @@ class MultiSeriesDongleManager: print(f"Send worker started for {series_name}") device_group = self.dongle_groups[product_id] - inference_descriptor = self.inference_descriptors[product_id] result_queue = self.result_queues[product_id] while not self.stop_event.is_set(): @@ -350,22 +350,33 @@ class MultiSeriesDongleManager: if task is None: continue - # Configure inference descriptor - inference_descriptor.inference_number = task.sequence_id - inference_descriptor.input_node_image_list = [kp.GenericInputNodeImage( - image=task.image_data, - image_format=task.image_format, - resize_mode=kp.ResizeMode.KP_RESIZE_ENABLE, - padding_mode=kp.PaddingMode.KP_PADDING_CORNER, - normalize_mode=kp.NormalizeMode.KP_NORMALIZE_KNERON - )] + # print(f"Sending task {task.sequence_id} to {series_name}") + inference_descriptor = kp.GenericImageInferenceDescriptor( + model_id=self.model_descriptors[product_id].models[0].id, + ) + # print(f"Using model ID: {inference_descriptor.model_id}") + inference_descriptor.inference_number = task.sequence_id + # print(f"Task sequence ID: {task.sequence_id}") + # Create new inference descriptor for each task to avoid state issues + # print ("image data: ", task.image_data, ", format: ", task.image_format) + # print(device_group) + inference_descriptor.input_node_image_list = [ + kp.GenericInputNodeImage( + image=task.image_data, + image_format=task.image_format, + resize_mode=kp.ResizeMode.KP_RESIZE_ENABLE, + padding_mode=kp.PaddingMode.KP_PADDING_CORNER, + normalize_mode=kp.NormalizeMode.KP_NORMALIZE_KNERON + ) + ] + # print(f"Task image shape: {task.image_data.shape}, format: {task.image_format}") # Send inference kp.inference.generic_image_inference_send( device_group=device_group, generic_inference_input_descriptor=inference_descriptor ) - + print(f"Task {task.sequence_id} sent successfully to {series_name}") self.stats['dongle_stats'][product_id]['sent'] += 1 except queue.Empty: