From fba9ef8743b9c91a02cb822f5d441583bc3deba6 Mon Sep 17 00:00:00 2001
From: bsw215583320 <baoshiwei121@163.com>
Date: 星期三, 16 四月 2025 18:51:18 +0800
Subject: [PATCH] 增加上料机位置识别

---
 config/herb_ai.yaml |    6 +-
 model/hl.onnx       |    0 
 identifier.py       |   27 +++++++++++--
 herb_ai.py          |   48 +++++++++++++++++++++--
 4 files changed, 68 insertions(+), 13 deletions(-)

diff --git a/config/herb_ai.yaml b/config/herb_ai.yaml
index 636b292..ee33167 100644
--- a/config/herb_ai.yaml
+++ b/config/herb_ai.yaml
@@ -10,9 +10,9 @@
   safe: './model/safety_det.onnx'
   cls: './model/herb_identify.onnx'
 cam:
-  cam1: 1
-  cam2: 0
+  cam1: 0
+  cam2: 1
   sleep: 0.1
-  frames: 10
+  frames: 50
   days_threshold: 7
   max_files: 100
diff --git a/herb_ai.py b/herb_ai.py
index 0ff33a3..66be1e8 100644
--- a/herb_ai.py
+++ b/herb_ai.py
@@ -10,7 +10,6 @@
 import win32gui
 import multiprocessing
 from safety_detect import SAFETY_DETECT
-from cam_util import CAM_UTIL
 from identifier import IDENTIFIER
 import os
 from logger_config import logger
@@ -47,6 +46,7 @@
 
 # 璋冪敤鍙︿竴涓暱鐒﹂暅澶达紝鎷嶆憚娓呮櫚鐨勫眬閮ㄨ嵂鏉愬浘鐗�
 def get_image():
+    herb_identifier = IDENTIFIER("model/herb_identify.onnx")
     logger.info("璇嗗埆绾跨▼鍚姩")
     global is_loaded, class_count, class_count_max, class_sum
     camera2_index = config['cam']['cam2']
@@ -139,12 +139,23 @@
     capture.release()
 def send_result():
     global is_loaded,class_count, class_count_max, class_sum
+    # 瀵筩lass_count杩涜鎺掑簭锛屾寜鐓у�间粠澶у埌灏忔帓搴�,杩斿洖鍊兼渶澶х殑鍓嶄簲涓�
+    sorted_class_count = dict(sorted(class_count.items(), key=lambda x: x[1], reverse=True)[:5])
+    # 瀵筩lass_count_max杩涜鎺掑簭锛屾寜鐓у�间粠澶у埌灏忔帓搴�,杩斿洖鍊兼渶澶х殑鍓嶄簲涓�
+    sorted_class_count_max = dict(sorted(class_count_max.items(), key=lambda x: x[1], reverse=True)[:5])
+    # 瀵� class_sum杩涜鎺掑簭锛屾寜鐓у�间粠澶у埌灏忔帓搴�,杩斿洖鍊兼渶澶х殑鍓嶄簲涓�
+    sorted_class_sum = dict(sorted(class_sum.items(), key=lambda x: x[1], reverse=True)[:5])
     # 灏嗕笁绉嶇粺璁$粨鏋滆緭鍑哄埌鏃ュ織涓�
     logger.info("class_count:"+str(class_count))
+    logger.info("sorted_class_count:"+str(sorted_class_count))
     logger.info("class_count_max:"+str(class_count_max))
+    logger.info("sorted_class_count_max:"+str(sorted_class_count_max))
     logger.info("class_sum:"+str(class_sum))
+    logger.info("sorted_class_sum:"+str(sorted_class_sum))
     is_loaded = False
-    l.send_msg("airecognize," + f"{class_count}")
+    count_msg = "airecognize," + f"{sorted_class_count}"
+    logger.info("鍙戦�佽嵂鏉愯瘑鍒粨鏋滐細"+str(count_msg))
+    l.send_msg(count_msg)
     pass
 
 
@@ -181,6 +192,8 @@
     # 涓婃枡鐘舵��
     status = "娌℃湁涓婃枡"
 
+    # 鍒涘缓绐楀彛骞惰缃负鍙皟鏁村ぇ灏�
+    cv2.namedWindow("AICamera", cv2.WINDOW_NORMAL)
 
     # 寰幆璇诲彇鎽勫儚澶寸敾闈�
     while True:
@@ -215,7 +228,10 @@
         logger.info(f"瀹夊叏妫�娴嬭瘑鍒粨鏋�, {det_res}")
         # 濡傛灉cass_ids涓寘鍚�0锛屽垯琛ㄧず鏈夊畨鍏ㄦ娴嬪埌浜轰綋
         if 0 in class_ids:
-            l.send_msg("aidetect," + f"{det_res}")
+            res_ = "aidetect," + f"{det_res}"
+            logger.info("鍙戦�佸畨鍏ㄦ娴嬬粨鏋滐細"+str(res_))
+            l.send_msg(res_)
+
         # 涓婃枡璇嗗埆
         probabilities = load_identifier(frame)
         # 鎵惧埌鏈�澶ф鐜囩殑绫诲埆
@@ -260,6 +276,20 @@
                 logger.info("闀挎椂闂存湭涓婃枡锛岄噸缃鍦ㄤ笂鏂欑姸鎬�")
         # print(status)
 
+        # 涓婃枡鏈轰綅缃瘑鍒�
+        probabilities2 = hoister_position(frame);
+        predicted_class2 = np.argmax(probabilities2, axis=1)[0]
+        max_probability2 = np.max(probabilities2, axis=1)[0]
+        class_2 = hoister_position.class_names[predicted_class2]
+        print(f"-----------{class_2}:{predicted_class2}: {max_probability2}")
+        logger.info(f"-----------{class_2}:{predicted_class2}: {max_probability2}")
+
+        if predicted_class2 == 0:
+            feeder_res = {class_2: max_probability2}
+            class_feeder = "aifeeder," + f"{feeder_res}"
+            print("send_msg", class_feeder)
+            logger.info("鍙戦�佷笂鏂欐満浣嶇疆璇嗗埆缁撴灉锛�"+str(class_feeder))
+            l.send_msg(class_feeder)
         # 璁$畻甯ч�熺巼
         frame_count += 1
         end_time = time.time()
@@ -270,7 +300,14 @@
         cv2.putText(draw_img, f"FPS: {fps:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,
                     cv2.LINE_AA)
         # 鏄剧ず鐢婚潰
-        cv2.imshow("AICamera", draw_img)
+        # 鑾峰彇褰撳墠绐楀彛澶у皬
+        width = cv2.getWindowImageRect("AICamera")[2]
+        height = cv2.getWindowImageRect("AICamera")[3]
+
+        # 璋冩暣鍥惧儚澶у皬浠ラ�傚簲绐楀彛
+        resized_frame = cv2.resize(draw_img, (width, height))
+
+        cv2.imshow("AICamera", resized_frame)
         # 妫�娴嬫寜閿紝濡傛灉鎸変笅q閿垯閫�鍑哄惊鐜�
         if cv2.waitKey(1) & 0xFF == ord('q'):
             break
@@ -440,8 +477,9 @@
     # 鏄惁涓婅繃鏂�
     is_loaded = False
     # 鍔犺浇ONNX妯″瀷
-    herb_identifier = IDENTIFIER("model/herb_identify.onnx")
+
     load_identifier = IDENTIFIER("model/loading.onnx")
+    hoister_position = IDENTIFIER("model/hl.onnx")
     safety_detect = SAFETY_DETECT("model/safety_det.onnx")
     config = read_config()
     PCOPYDATASTRUCT = ctypes.POINTER(COPYDATASTRUCT)
diff --git a/identifier.py b/identifier.py
index 41e7b22..293bffe 100644
--- a/identifier.py
+++ b/identifier.py
@@ -21,7 +21,7 @@
         self.get_output_details()
 
     def idengify(self, image):
-        input_tensor, ratio = self.prepare_input(image)
+        input_tensor = self.prepare_input(image)
 
         # Perform inference on the image
         outputs = self.inference(input_tensor)
@@ -36,14 +36,16 @@
         input_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
 
         # Resize鍥剧墖涓嶈鐩存帴浣跨敤resize锛岄渶瑕佹寜姣斾緥缂╂斁锛岀┖鐧藉尯鍩熷~绌虹函鑹插嵆鍙�
-        input_img, ratio = self.ratioresize(input_img)
+        # input_img = self.ratioresize(input_img)
+        # 鍚敤涓績瑁佸壀
+        input_img = self.center_crop(input_img)
 
         # Scale input pixel values to 0 to 1
         input_img = input_img / 255.0
         input_img = input_img.transpose(2, 0, 1)
         input_tensor = input_img[np.newaxis, :, :, :].astype(np.float32)
 
-        return input_tensor, ratio
+        return input_tensor
 
     def inference(self, input_tensor):
         start = time.perf_counter()
@@ -83,6 +85,21 @@
 
         padded_img[: new_unpad[1], : new_unpad[0]] = im
         padded_img = np.ascontiguousarray(padded_img)
-        return padded_img, 1 / r
+        return padded_img
 
-
+    def center_crop(self, img):
+        # 鏂板涓績瑁佸壀鏂规硶
+        h, w = img.shape[:2]
+        desired_h = self.input_height
+        desired_w = self.input_width
+        
+        # 濡傛灉鍥剧墖灏哄澶т簬鐩爣灏哄锛屽垯杩涜涓績瑁佸壀
+        if h > desired_h and w > desired_w:
+            start_y = (h - desired_h) // 2
+            start_x = (w - desired_w) // 2
+            end_y = start_y + desired_h
+            end_x = start_x + desired_w
+            return img[start_y:end_y, start_x:end_x]
+        else:
+            # 鍚﹀垯杩涜缂╂斁
+            return self.ratioresize(img)
diff --git a/model/hl.onnx b/model/hl.onnx
new file mode 100644
index 0000000..f18eb6c
--- /dev/null
+++ b/model/hl.onnx
Binary files differ

--
Gitblit v1.9.3