From 776a127b8db01cd4338f4db2a84ea567a65bff9f Mon Sep 17 00:00:00 2001
From: baoshiwei <baoshiwei@shlanbao.cn>
Date: 星期二, 15 四月 2025 13:52:04 +0800
Subject: [PATCH] rjuq

---
 detect_onnx.py           |  220 +++++++++++
 shot_onnx.py             |  105 +++++
 save_img.py              |   31 +
 speech/formatmp3.py      |    5 
 replaceLabelNumber.py    |   38 ++
 quchong.py               |   21 
 speech/deepSpeechTest.py |   20 +
 pachong.py               |   36 +
 paherbbaidu.py           |   46 ++
 onnx-test.py             |   50 ++
 speech/whisper2.py       |    1 
 speech/whisperTest.py    |   10 
 onnx_predit.py           |  364 +++++++++++++++++++
 camera_onnx.py           |  101 +++++
 pc.py                    |   35 +
 15 files changed, 1,050 insertions(+), 33 deletions(-)

diff --git a/camera_onnx.py b/camera_onnx.py
new file mode 100644
index 0000000..ea02d93
--- /dev/null
+++ b/camera_onnx.py
@@ -0,0 +1,101 @@
+import cv2
+import time
+import numpy as np
+import onnxruntime
+from scipy.special import softmax
+
+# 鍔犺浇ONNX妯″瀷
+session = onnxruntime.InferenceSession("model/classify/s.onnx")
+# 鎽勫儚澶寸储寮曞彿锛岄�氬父涓�0琛ㄧず绗竴涓憚鍍忓ご
+camera_index = 0
+
+# 鎵撳紑鎽勫儚澶�
+cap = cv2.VideoCapture(camera_index, cv2.CAP_DSHOW)
+# 璁剧疆鍒嗚鲸鐜�
+cap.set(cv2.CAP_PROP_FRAME_WIDTH, 3840)  # 瀹藉害
+cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 2160)  # 楂樺害
+# 妫�鏌ユ憚鍍忓ご鏄惁鎴愬姛鎵撳紑
+if not cap.isOpened():
+    print("鏃犳硶鎵撳紑鎽勫儚澶�")
+    exit()
+
+width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
+height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
+print("鎽勫儚澶村垎杈ㄧ巼:", width, "x", height)
+
+
+
+# 浠巖es.json涓鍙栫被鍒�
+with open("res1-2.json", "r") as f:
+    classes = eval(f.read())
+
+
+
+# 鐩爣鍥惧儚灏哄
+target_width = 1024
+target_height = 768
+
+# 璁℃椂鍣�
+start_time = time.time()
+
+# 寰幆璇诲彇鎽勫儚澶寸敾闈�
+while True:
+    ret, frame = cap.read()
+
+    if not ret:
+        print("鏃犳硶璇诲彇鎽勫儚澶寸敾闈�")
+        break
+
+    # 1920*1080鐨勫浘鍍忥紝涓績瑁佸壀640*480鐨勫尯鍩�
+    cropped_frame = frame[int(height / 2 - target_height / 2):int(height / 2 + target_height / 2),
+                    int(width / 2 - target_width / 2):int(width / 2 + target_width / 2)]
+    # 璋冩暣鍥惧儚灏哄
+    resized_frame = cv2.resize(cropped_frame, (target_width, target_height))
+
+    # 鑾峰彇褰撳墠鏃堕棿
+    current_time = time.time()
+
+    #濡傛灉璺濈涓婁竴娆′繚瀛樺凡缁忚繃鍘�1绉掞紝鍒欎繚瀛樺綋鍓嶇敾闈�
+    # if current_time - start_time >= 3.0:
+    #     # 鐢熸垚淇濆瓨鏂囦欢鍚嶏紝浠ュ綋鍓嶆椂闂村懡鍚�
+    #     save_name = time.strftime("%Y%m%d%H%M%S", time.localtime()) + ".jpg"
+    #     # 淇濆瓨璋冩暣灏哄鍚庣殑鍥剧墖
+    #     cv2.imwrite(save_path + save_name, frame)
+    #     print("淇濆瓨鍥剧墖:", save_name)
+    #     # 閲嶇疆璁℃椂鍣�
+    #     start_time = time.time()
+
+    # 棰勫鐞�
+    blob = cv2.dnn.blobFromImage(resized_frame, 1 / 255.0, (640, 640), swapRB=True, crop=False)
+
+    # 妯″瀷鎺ㄧ悊
+    outputs = session.run(None, {session.get_inputs()[0].name: blob})
+
+    # print(outputs)
+    # 搴旂敤softmax鍑芥暟
+    probabilities = outputs[0]
+
+    # 鎵惧埌鏈�澶ф鐜囩殑绫诲埆
+    predicted_class = np.argmax(probabilities, axis=1)[0]
+    max_probability = np.max(probabilities, axis=1)[0]
+
+    # 鎵惧埌姒傜巼杈冮珮鐨勫墠鍗佷釜绫诲埆
+    top_ten_classes = np.argsort(probabilities, axis=1)[0][-5:]
+
+    # 杈撳嚭鍓嶅崄涓被鍒�
+    print("Top 5 Classes:")
+    for i in top_ten_classes:
+        print(f"{classes[i]}: {probabilities[0][i]}")
+
+    # 鏄剧ず鐢婚潰
+    cv2.imshow("Camera", resized_frame)
+
+    # 妫�娴嬫寜閿紝濡傛灉鎸変笅q閿垯閫�鍑哄惊鐜�
+    if cv2.waitKey(1) & 0xFF == ord('q'):
+        break
+
+# 鍏抽棴鎽勫儚澶�
+cap.release()
+
+# 鍏抽棴鎵�鏈夌獥鍙�
+cv2.destroyAllWindows()
diff --git a/detect_onnx.py b/detect_onnx.py
new file mode 100644
index 0000000..8d15e9d
--- /dev/null
+++ b/detect_onnx.py
@@ -0,0 +1,220 @@
+import time
+import cv2
+import numpy as np
+import onnxruntime
+
+
+class YOLOv8:
+
+    def __init__(self, path, conf_thres=0.7, iou_thres=0.7):
+        self.conf_threshold = conf_thres
+        self.iou_threshold = iou_thres
+
+        # Initialize model
+        self.initialize_model(path)
+
+    def __call__(self, image):
+        return self.detect_objects(image)
+
+    def initialize_model(self, path):
+        self.session = onnxruntime.InferenceSession(path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
+        # Get model info
+        self.get_input_details()
+        self.get_output_details()
+
+    def detect_objects(self, image):
+        input_tensor, ratio = self.prepare_input(image)
+
+        # Perform inference on the image
+        outputs = self.inference(input_tensor)
+
+        self.boxes, self.scores, self.class_ids = self.process_output(outputs, ratio)
+
+        return self.boxes, self.scores, self.class_ids
+
+    def prepare_input(self, image):
+        self.img_height, self.img_width = image.shape[:2]
+
+        input_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+
+        # Resize鍥剧墖涓嶈鐩存帴浣跨敤resize锛岄渶瑕佹寜姣斾緥缂╂斁锛岀┖鐧藉尯鍩熷~绌虹函鑹插嵆鍙�
+        input_img, ratio = self.ratioresize(input_img)
+
+        # Scale input pixel values to 0 to 1
+        input_img = input_img / 255.0
+        input_img = input_img.transpose(2, 0, 1)
+        input_tensor = input_img[np.newaxis, :, :, :].astype(np.float32)
+
+        return input_tensor, ratio
+
+    def inference(self, input_tensor):
+        start = time.perf_counter()
+        outputs = self.session.run(self.output_names, {self.input_names[0]: input_tensor})
+
+        # print(f"Inference time: {(time.perf_counter() - start)*1000:.2f} ms")
+        return outputs
+
+    def process_output(self, output, ratio):
+        predictions = np.squeeze(output[0]).T
+
+        # Filter out object confidence scores below threshold
+        scores = np.max(predictions[:, 4:], axis=1)
+        predictions = predictions[scores > self.conf_threshold, :]
+        scores = scores[scores > self.conf_threshold]
+
+        if len(scores) == 0:
+            return [], [], []
+
+        # Get the class with the highest confidence
+        class_ids = np.argmax(predictions[:, 4:], axis=1)
+
+        # Get bounding boxes for each object
+        boxes = self.extract_boxes(predictions, ratio)
+
+        # Apply non-maxima suppression to suppress weak, overlapping bounding boxes
+        indices = self.nms(boxes, scores, self.iou_threshold)
+
+        return boxes[indices], scores[indices], class_ids[indices]
+
+    def extract_boxes(self, predictions, ratio):
+        # Extract boxes from predictions
+        boxes = predictions[:, :4]
+
+        # Scale boxes to original image dimensions
+        # boxes = self.rescale_boxes(boxes)
+        boxes *= ratio
+
+        # Convert boxes to xyxy format
+        boxes = self.xywh2xyxy(boxes)
+
+        return boxes
+
+    def rescale_boxes(self, boxes):
+
+        # Rescale boxes to original image dimensions
+
+        input_shape = np.array([self.input_width, self.input_height, self.input_width, self.input_height])
+        boxes = np.divide(boxes, input_shape, dtype=np.float32)
+        boxes *= np.array([self.img_width, self.img_height, self.img_width, self.img_height])
+
+        return boxes
+
+    def get_input_details(self):
+        model_inputs = self.session.get_inputs()
+        self.input_names = [model_inputs[i].name for i in range(len(model_inputs))]
+
+        self.input_shape = model_inputs[0].shape
+        self.input_height = self.input_shape[2]
+        self.input_width = self.input_shape[3]
+
+    def get_output_details(self):
+        model_outputs = self.session.get_outputs()
+        self.output_names = [model_outputs[i].name for i in range(len(model_outputs))]
+
+    # 绛夋瘮渚嬬缉鏀惧浘鐗�
+    def ratioresize(self, im, color=114):
+        shape = im.shape[:2]
+        new_h, new_w = self.input_height, self.input_width
+        padded_img = np.ones((new_h, new_w, 3), dtype=np.uint8) * color
+
+        # Scale ratio (new / old)
+        r = min(new_h / shape[0], new_w / shape[1])
+
+        # Compute padding
+        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
+
+        if shape[::-1] != new_unpad:
+            im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
+
+        padded_img[: new_unpad[1], : new_unpad[0]] = im
+        padded_img = np.ascontiguousarray(padded_img)
+        return padded_img, 1 / r
+
+    def nms(self, boxes, scores, iou_threshold):
+        # Sort by score
+        sorted_indices = np.argsort(scores)[::-1]
+
+        keep_boxes = []
+        while sorted_indices.size > 0:
+            # Pick the last box
+            box_id = sorted_indices[0]
+            keep_boxes.append(box_id)
+
+            # Compute IoU of the picked box with the rest
+            ious = self.compute_iou(boxes[box_id, :], boxes[sorted_indices[1:], :])
+
+            # Remove boxes with IoU over the threshold
+            keep_indices = np.where(ious < iou_threshold)[0]
+
+            # print(keep_indices.shape, sorted_indices.shape)
+            sorted_indices = sorted_indices[keep_indices + 1]
+
+        return keep_boxes
+
+    def compute_iou(self, box, boxes):
+        # Compute xmin, ymin, xmax, ymax for both boxes
+        xmin = np.maximum(box[0], boxes[:, 0])
+        ymin = np.maximum(box[1], boxes[:, 1])
+        xmax = np.minimum(box[2], boxes[:, 2])
+        ymax = np.minimum(box[3], boxes[:, 3])
+
+        # Compute intersection area
+        intersection_area = np.maximum(0, xmax - xmin) * np.maximum(0, ymax - ymin)
+
+        # Compute union area
+        box_area = (box[2] - box[0]) * (box[3] - box[1])
+        boxes_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
+        union_area = box_area + boxes_area - intersection_area
+
+        # Compute IoU
+        iou = intersection_area / union_area
+
+        return iou
+
+    def xywh2xyxy(self, x):
+        # Convert bounding box (x, y, w, h) to bounding box (x1, y1, x2, y2)
+        y = np.copy(x)
+        y[..., 0] = x[..., 0] - x[..., 2] / 2
+        y[..., 1] = x[..., 1] - x[..., 3] / 2
+        y[..., 2] = x[..., 0] + x[..., 2] / 2
+        y[..., 3] = x[..., 1] + x[..., 3] / 2
+        return y
+
+
+if __name__ == "__main__":
+    yolov8_detector = YOLOv8('model/detect/best.onnx', conf_thres=0.7, iou_thres=0.7)
+
+
+    # 鎽勫儚澶寸储寮曞彿锛岄�氬父涓�0琛ㄧず绗竴涓憚鍍忓ご
+    camera_index = 0
+
+    # 鎵撳紑鎽勫儚澶�
+    cap = cv2.VideoCapture(camera_index, cv2.CAP_DSHOW)
+    # 璁剧疆鍒嗚鲸鐜�
+    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 3840)  # 瀹藉害
+    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 2160)  # 楂樺害
+    # 妫�鏌ユ憚鍍忓ご鏄惁鎴愬姛鎵撳紑
+    if not cap.isOpened():
+        print("鏃犳硶鎵撳紑鎽勫儚澶�")
+        exit()
+    width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
+    height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
+    print("鎽勫儚澶村垎杈ㄧ巼:", width, "x", height)
+    # 鐩爣鍥惧儚灏哄
+    target_width = 1024
+    target_height = 768
+    # 寰幆璇诲彇鎽勫儚澶寸敾闈�
+    while True:
+        ret, frame = cap.read()
+
+        if not ret:
+            print("鏃犳硶璇诲彇鎽勫儚澶寸敾闈�")
+            break
+
+        # 1920*1080鐨勫浘鍍忥紝涓績瑁佸壀640*480鐨勫尯鍩�
+        cropped_frame = frame[int(height / 2 - target_height / 2):int(height / 2 + target_height / 2),
+                        int(width / 2 - target_width / 2):int(width / 2 + target_width / 2)]
+        # 璋冩暣鍥惧儚灏哄
+        resized_frame = cv2.resize(cropped_frame, (target_width, target_height))
+        boxes, scores, class_ids = yolov8_detector(resized_frame)
+        print(boxes, scores, class_ids)
\ No newline at end of file
diff --git a/onnx-test.py b/onnx-test.py
new file mode 100644
index 0000000..a654632
--- /dev/null
+++ b/onnx-test.py
@@ -0,0 +1,50 @@
+import cv2
+import numpy as np
+import onnxruntime
+from scipy.special import softmax
+
+# 鍔犺浇ONNX妯″瀷
+session = onnxruntime.InferenceSession("model/classify/best.onnx")
+
+# 璇诲彇鍥剧墖
+img = cv2.imread("D:\\temp\\15.jpg")
+
+# 浠巖es.json涓鍙栫被鍒�
+with open("res1-2.json", "r") as f:
+    classes = eval(f.read())
+
+# 棰勫鐞�
+blob = cv2.dnn.blobFromImage(img, 1/255.0, (640, 640), swapRB=True, crop=False)
+
+# 妯″瀷鎺ㄧ悊
+outputs = session.run(None, {session.get_inputs()[0].name: blob})
+
+# print(outputs)
+# 搴旂敤softmax鍑芥暟
+probabilities = outputs[0]
+
+# 鎵惧埌鏈�澶ф鐜囩殑绫诲埆
+predicted_class = np.argmax(probabilities, axis=1)[0]
+max_probability = np.max(probabilities, axis=1)[0]
+
+# 鎵惧埌姒傜巼杈冮珮鐨勫墠鍗佷釜绫诲埆
+top_ten_classes = np.argsort(probabilities, axis=1)[0][-5:]
+
+# 杈撳嚭鍓嶅崄涓被鍒�
+print("Top 10 Classes:")
+for i in top_ten_classes:
+    print(f"{classes[i]}: {probabilities[0][i]}")
+
+
+
+# 鍚庡鐞�
+# for detection in outputs[0][0]:
+#     confidence = detection[4]
+#     if confidence > 0.5:
+#         class_id = int(detection[5])
+#         x, y, w, h = detection[:4]
+#         cv2.rectangle(img, (int(x), int(y)), (int(x+w), int(y+h)), (0, 255, 0), 2)
+#
+# # 鏄剧ず缁撴灉
+# cv2.imshow("YOLOv8 Detection", img)
+# cv2.waitKey(0)
\ No newline at end of file
diff --git a/onnx_predit.py b/onnx_predit.py
new file mode 100644
index 0000000..3168a19
--- /dev/null
+++ b/onnx_predit.py
@@ -0,0 +1,364 @@
+import time
+
+import cv2
+import onnxruntime as ort
+from PIL import Image
+import numpy as np
+
+# 缃俊搴�
+confidence_thres = 0.35
+# iou闃堝��
+iou_thres = 0.5
+# 绫诲埆
+
+classes = {0: 'herb'}
+# 闅忔満棰滆壊
+color_palette = np.random.uniform(100, 255, size=(len(classes), 3))
+
+# 鍒ゆ柇鏄娇鐢℅PU鎴朇PU
+providers = [
+    ('CUDAExecutionProvider', {
+        'device_id': 0,  # 鍙互閫夋嫨GPU璁惧ID锛屽鏋滀綘鏈夊涓狦PU
+    }),
+    'CPUExecutionProvider',  # 涔熷彲浠ヨ缃瓹PU浣滀负澶囬��
+]
+
+def calculate_iou(box, other_boxes):
+    """
+    璁$畻缁欏畾杈圭晫妗嗕笌涓�缁勫叾浠栬竟鐣屾涔嬮棿鐨勪氦骞舵瘮锛圛oU锛夈��
+
+    鍙傛暟锛�
+    - box: 鍗曚釜杈圭晫妗嗭紝鏍煎紡涓� [x1, y1, width, height]銆�
+    - other_boxes: 鍏朵粬杈圭晫妗嗙殑鏁扮粍锛屾瘡涓竟鐣屾鐨勬牸寮忎篃涓� [x1, y1, width, height]銆�
+
+    杩斿洖鍊硷細
+    - iou: 涓�涓暟缁勶紝鍖呭惈缁欏畾杈圭晫妗嗕笌姣忎釜鍏朵粬杈圭晫妗嗙殑IoU鍊笺��
+    """
+
+    # 璁$畻浜ら泦鐨勫乏涓婅鍧愭爣
+    x1 = np.maximum(box[0], np.array(other_boxes)[:, 0])
+    y1 = np.maximum(box[1], np.array(other_boxes)[:, 1])
+    # 璁$畻浜ら泦鐨勫彸涓嬭鍧愭爣
+    x2 = np.minimum(box[0] + box[2], np.array(other_boxes)[:, 0] + np.array(other_boxes)[:, 2])
+    y2 = np.minimum(box[1] + box[3], np.array(other_boxes)[:, 1] + np.array(other_boxes)[:, 3])
+    # 璁$畻浜ら泦鍖哄煙鐨勯潰绉�
+    intersection_area = np.maximum(0, x2 - x1) * np.maximum(0, y2 - y1)
+    # 璁$畻缁欏畾杈圭晫妗嗙殑闈㈢Н
+    box_area = box[2] * box[3]
+    # 璁$畻鍏朵粬杈圭晫妗嗙殑闈㈢Н
+    other_boxes_area = np.array(other_boxes)[:, 2] * np.array(other_boxes)[:, 3]
+    # 璁$畻IoU鍊�
+    iou = intersection_area / (box_area + other_boxes_area - intersection_area)
+    return iou
+
+def custom_NMSBoxes(boxes, scores, confidence_threshold, iou_threshold):
+    # 濡傛灉娌℃湁杈圭晫妗嗭紝鍒欑洿鎺ヨ繑鍥炵┖鍒楄〃
+    if len(boxes) == 0:
+        return []
+    # 灏嗗緱鍒嗗拰杈圭晫妗嗚浆鎹负NumPy鏁扮粍
+    scores = np.array(scores)
+    boxes = np.array(boxes)
+    # 鏍规嵁缃俊搴﹂槇鍊艰繃婊よ竟鐣屾
+    mask = scores > confidence_threshold
+    filtered_boxes = boxes[mask]
+    filtered_scores = scores[mask]
+    # 濡傛灉杩囨护鍚庢病鏈夎竟鐣屾锛屽垯杩斿洖绌哄垪琛�
+    if len(filtered_boxes) == 0:
+        return []
+    # 鏍规嵁缃俊搴﹀緱鍒嗗杈圭晫妗嗚繘琛屾帓搴�
+    sorted_indices = np.argsort(filtered_scores)[::-1]
+    # 鍒濆鍖栦竴涓┖鍒楄〃鏉ュ瓨鍌ㄩ�夋嫨鐨勮竟鐣屾绱㈠紩
+    indices = []
+    # 褰撹繕鏈夋湭澶勭悊鐨勮竟鐣屾鏃讹紝寰幆缁х画
+    while len(sorted_indices) > 0:
+        # 閫夋嫨寰楀垎鏈�楂樼殑杈圭晫妗嗙储寮�
+        current_index = sorted_indices[0]
+        indices.append(current_index)
+        # 濡傛灉鍙墿涓�涓竟鐣屾锛屽垯缁撴潫寰幆
+        if len(sorted_indices) == 1:
+            break
+        # 鑾峰彇褰撳墠杈圭晫妗嗗拰鍏朵粬杈圭晫妗�
+        current_box = filtered_boxes[current_index]
+        other_boxes = filtered_boxes[sorted_indices[1:]]
+        # 璁$畻褰撳墠杈圭晫妗嗕笌鍏朵粬杈圭晫妗嗙殑IoU
+        iou = calculate_iou(current_box, other_boxes)
+        # 鎵惧埌IoU浣庝簬闃堝�肩殑杈圭晫妗嗭紝鍗充笌褰撳墠杈圭晫妗嗕笉閲嶅彔鐨勮竟鐣屾
+        non_overlapping_indices = np.where(iou <= iou_threshold)[0]
+        # 鏇存柊sorted_indices浠ヤ粎鍖呭惈涓嶉噸鍙犵殑杈圭晫妗�
+        sorted_indices = sorted_indices[non_overlapping_indices + 1]
+    # 杩斿洖閫夋嫨鐨勮竟鐣屾绱㈠紩
+    return indices
+
+
+def draw_detections(img, box, score, class_id):
+    """
+    鍦ㄨ緭鍏ュ浘鍍忎笂缁樺埗妫�娴嬪埌鐨勫璞$殑杈圭晫妗嗗拰鏍囩銆�
+
+    鍙傛暟:
+            img: 瑕佸湪鍏朵笂缁樺埗妫�娴嬬粨鏋滅殑杈撳叆鍥惧儚銆�
+            box: 妫�娴嬪埌鐨勮竟鐣屾銆�
+            score: 瀵瑰簲鐨勬娴嬪緱鍒嗐��
+            class_id: 妫�娴嬪埌鐨勫璞$殑绫诲埆ID銆�
+
+    杩斿洖:
+            鏃�
+    """
+
+    # 鎻愬彇杈圭晫妗嗙殑鍧愭爣
+    x1, y1, w, h = box
+    # 鏍规嵁绫诲埆ID妫�绱㈤鑹�
+    color = color_palette[class_id]
+    # 鍦ㄥ浘鍍忎笂缁樺埗杈圭晫妗�
+    cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)
+    # 鍒涘缓鏍囩鏂囨湰锛屽寘鎷被鍚嶅拰寰楀垎
+    label = f'{classes[class_id]}: {score:.2f}'
+    # 璁$畻鏍囩鏂囨湰鐨勫昂瀵�
+    (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
+    # 璁$畻鏍囩鏂囨湰鐨勪綅缃�
+    label_x = x1
+    label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10
+    # 缁樺埗濉厖鐨勭煩褰綔涓烘爣绛炬枃鏈殑鑳屾櫙
+    cv2.rectangle(img, (label_x, label_y - label_height), (label_x + label_width, label_y + label_height), color, cv2.FILLED)
+    # 鍦ㄥ浘鍍忎笂缁樺埗鏍囩鏂囨湰
+    cv2.putText(img, label, (label_x, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
+
+
+def preprocess(img, input_width, input_height):
+    """
+    鍦ㄦ墽琛屾帹鐞嗕箣鍓嶉澶勭悊杈撳叆鍥惧儚銆�
+
+    杩斿洖:
+        image_data: 涓烘帹鐞嗗噯澶囧ソ鐨勯澶勭悊鍚庣殑鍥惧儚鏁版嵁銆�
+    """
+
+    # 鑾峰彇杈撳叆鍥惧儚鐨勯珮搴﹀拰瀹藉害
+    img_height, img_width = img.shape[:2]
+    # 灏嗗浘鍍忛鑹茬┖闂翠粠BGR杞崲涓篟GB
+    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+    # 灏嗗浘鍍忓ぇ灏忚皟鏁翠负鍖归厤杈撳叆褰㈢姸
+    img = cv2.resize(img, (input_width, input_height))
+    # 閫氳繃闄や互255.0鏉ュ綊涓�鍖栧浘鍍忔暟鎹�
+    image_data = np.array(img) / 255.0
+    # 杞疆鍥惧儚锛屼娇閫氶亾缁村害涓虹涓�缁�
+    image_data = np.transpose(image_data, (2, 0, 1))  # 閫氶亾棣�
+    # 鎵╁睍鍥惧儚鏁版嵁鐨勭淮搴︿互鍖归厤棰勬湡鐨勮緭鍏ュ舰鐘�
+    image_data = np.expand_dims(image_data, axis=0).astype(np.float32)
+    # 杩斿洖棰勫鐞嗗悗鐨勫浘鍍忔暟鎹�
+    return image_data, img_height, img_width
+
+def postprocess(input_image, output, input_width, input_height, img_width, img_height):
+    """
+    瀵规ā鍨嬭緭鍑鸿繘琛屽悗澶勭悊锛屾彁鍙栬竟鐣屾銆佸緱鍒嗗拰绫诲埆ID銆�
+
+    鍙傛暟:
+        input_image (numpy.ndarray): 杈撳叆鍥惧儚銆�
+        output (numpy.ndarray): 妯″瀷鐨勮緭鍑恒��
+        input_width (int): 妯″瀷杈撳叆瀹藉害銆�
+        input_height (int): 妯″瀷杈撳叆楂樺害銆�
+        img_width (int): 鍘熷鍥惧儚瀹藉害銆�
+        img_height (int): 鍘熷鍥惧儚楂樺害銆�
+
+    杩斿洖:
+        numpy.ndarray: 缁樺埗浜嗘娴嬬粨鏋滅殑杈撳叆鍥惧儚銆�
+    """
+
+    # 杞疆鍜屽帇缂╄緭鍑轰互鍖归厤棰勬湡鐨勫舰鐘�
+    outputs = np.transpose(np.squeeze(output[0]))
+    # 鑾峰彇杈撳嚭鏁扮粍鐨勮鏁�
+    rows = outputs.shape[0]
+    # 鐢ㄤ簬瀛樺偍妫�娴嬬殑杈圭晫妗嗐�佸緱鍒嗗拰绫诲埆ID鐨勫垪琛�
+    boxes = []
+    scores = []
+    class_ids = []
+    # 璁$畻杈圭晫妗嗗潗鏍囩殑缂╂斁鍥犲瓙
+    x_factor = img_width / input_width
+    y_factor = img_height / input_height
+    # 閬嶅巻杈撳嚭鏁扮粍鐨勬瘡涓�琛�
+    for i in range(rows):
+        # 浠庡綋鍓嶈鎻愬彇绫诲埆寰楀垎
+        classes_scores = outputs[i][4:]
+        # 鎵惧埌绫诲埆寰楀垎涓殑鏈�澶у緱鍒�
+        max_score = np.amax(classes_scores)
+        # 濡傛灉鏈�澶у緱鍒嗛珮浜庣疆淇″害闃堝��
+        if max_score >= confidence_thres:
+            # 鑾峰彇寰楀垎鏈�楂樼殑绫诲埆ID
+            class_id = np.argmax(classes_scores)
+            # 浠庡綋鍓嶈鎻愬彇杈圭晫妗嗗潗鏍�
+            x, y, w, h = outputs[i][0], outputs[i][1], outputs[i][2], outputs[i][3]
+            # 璁$畻杈圭晫妗嗙殑缂╂斁鍧愭爣
+            left = int((x - w / 2) * x_factor)
+            top = int((y - h / 2) * y_factor)
+            width = int(w * x_factor)
+            height = int(h * y_factor)
+            # 灏嗙被鍒獻D銆佸緱鍒嗗拰妗嗗潗鏍囨坊鍔犲埌鍚勮嚜鐨勫垪琛ㄤ腑
+            class_ids.append(class_id)
+            scores.append(max_score)
+            boxes.append([left, top, width, height])
+    # 搴旂敤闈炴渶澶ф姂鍒惰繃婊ら噸鍙犵殑杈圭晫妗�
+    indices = custom_NMSBoxes(boxes, scores, confidence_thres, iou_thres)
+    # 閬嶅巻闈炴渶澶ф姂鍒跺悗鐨勯�夊畾绱㈠紩
+    for i in indices:
+        # 鏍规嵁绱㈠紩鑾峰彇妗嗐�佸緱鍒嗗拰绫诲埆ID
+        box = boxes[i]
+        score = scores[i]
+        class_id = class_ids[i]
+        # 鍦ㄨ緭鍏ュ浘鍍忎笂缁樺埗妫�娴嬬粨鏋�
+        draw_detections(input_image, box, score, class_id)
+    # 杩斿洖淇敼鍚庣殑杈撳叆鍥惧儚
+    return input_image
+
+def init_detect_model(model_path):
+    # 浣跨敤ONNX妯″瀷鏂囦欢鍒涘缓涓�涓帹鐞嗕細璇濓紝骞舵寚瀹氭墽琛屾彁渚涜��
+    session = ort.InferenceSession(model_path, providers=providers)
+    # 鑾峰彇妯″瀷鐨勮緭鍏ヤ俊鎭�
+    model_inputs = session.get_inputs()
+    # 鑾峰彇杈撳叆鐨勫舰鐘讹紝鐢ㄤ簬鍚庣画浣跨敤
+    input_shape = model_inputs[0].shape
+    # 浠庤緭鍏ュ舰鐘朵腑鎻愬彇杈撳叆瀹藉害
+    input_width = input_shape[2]
+    # 浠庤緭鍏ュ舰鐘朵腑鎻愬彇杈撳叆楂樺害
+    input_height = input_shape[3]
+    # 杩斿洖浼氳瘽銆佹ā鍨嬭緭鍏ヤ俊鎭�佽緭鍏ュ搴﹀拰杈撳叆楂樺害
+    return session, model_inputs, input_width, input_height
+
+def detect_object(image, session, model_inputs, input_width, input_height):
+    # 濡傛灉杈撳叆鐨勫浘鍍忔槸PIL鍥惧儚瀵硅薄锛屽皢鍏惰浆鎹负NumPy鏁扮粍
+    if isinstance(image, Image.Image):
+        result_image = np.array(image)
+    else:
+        # 鍚﹀垯锛岀洿鎺ヤ娇鐢ㄨ緭鍏ョ殑鍥惧儚锛堝亣瀹氬凡缁忔槸NumPy鏁扮粍锛�
+        result_image = image
+    # 棰勫鐞嗗浘鍍忔暟鎹紝璋冩暣鍥惧儚澶у皬骞跺彲鑳借繘琛屽綊涓�鍖栫瓑鎿嶄綔
+    img_data, img_height, img_width = preprocess(result_image, input_width, input_height)
+    # 浣跨敤棰勫鐞嗗悗鐨勫浘鍍忔暟鎹繘琛屾帹鐞�
+    outputs = session.run(None, {model_inputs[0].name: img_data})
+    # 瀵规帹鐞嗙粨鏋滆繘琛屽悗澶勭悊锛屼緥濡傝В鐮佹娴嬫锛岃繃婊や綆缃俊搴︾殑妫�娴嬬瓑
+    output_image = postprocess(result_image, outputs, input_width, input_height, img_width, img_height)
+    # 杩斿洖澶勭悊鍚庣殑鍥惧儚
+    return output_image
+if __name__ == '__main__':
+    # 妯″瀷鏂囦欢鐨勮矾寰�
+    model_path = 'model/detect/best.onnx'
+    # 鍒濆鍖栨娴嬫ā鍨嬶紝鍔犺浇妯″瀷骞惰幏鍙栨ā鍨嬭緭鍏ヨ妭鐐逛俊鎭拰杈撳叆鍥惧儚鐨勫搴︺�侀珮搴�
+    session, model_inputs, input_width, input_height = init_detect_model(model_path)
+    # 涓夌妯″紡 1涓哄浘鐗囬娴嬶紝骞舵樉绀虹粨鏋滃浘鐗囷紱2涓烘憚鍍忓ご妫�娴嬶紝骞跺疄鏃舵樉绀篎PS锛� 3涓鸿棰戞娴嬶紝骞朵繚瀛樼粨鏋滆棰�
+    mode = 2
+    if mode == 1:
+        # 璇诲彇鍥惧儚鏂囦欢
+        image_data = cv2.imread("street.jpg")
+        # 浣跨敤妫�娴嬫ā鍨嬪璇诲叆鐨勫浘鍍忚繘琛屽璞℃娴�
+        result_image = detect_object(image_data, session, model_inputs, input_width, input_height)
+        # 灏嗘娴嬪悗鐨勫浘鍍忎繚瀛樺埌鏂囦欢
+        cv2.imwrite("output_image.jpg", result_image)
+        # 鍦ㄧ獥鍙d腑鏄剧ず妫�娴嬪悗鐨勫浘鍍�
+        cv2.imshow('Output', result_image)
+        # 绛夊緟鐢ㄦ埛鎸夐敭锛岀劧鍚庡叧闂樉绀虹獥鍙�
+        cv2.waitKey(0)
+    elif mode == 2:
+        # 鎽勫儚澶寸储寮曞彿锛岄�氬父涓�0琛ㄧず绗竴涓憚鍍忓ご
+        camera_index = 0
+
+        # 鎵撳紑鎽勫儚澶�
+        cap = cv2.VideoCapture(camera_index, cv2.CAP_DSHOW)
+        # 璁剧疆鍒嗚鲸鐜�
+        cap.set(cv2.CAP_PROP_FRAME_WIDTH, 3840)  # 瀹藉害
+        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 2160)  # 楂樺害
+        # 妫�鏌ユ憚鍍忓ご鏄惁鎴愬姛鎵撳紑
+        if not cap.isOpened():
+            print("鏃犳硶鎵撳紑鎽勫儚澶�")
+            exit()
+        width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
+        height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
+        print("鎽勫儚澶村垎杈ㄧ巼:", width, "x", height)
+
+
+        # 鍒濆鍖栧抚鏁拌鏁板櫒鍜岃捣濮嬫椂闂�
+        frame_count = 0
+        start_time = time.time()
+
+        # 鐩爣鍥惧儚灏哄
+        target_width = 1024
+        target_height = 768
+
+
+
+        # 寰幆璇诲彇鎽勫儚澶磋棰戞祦
+        while True:
+            # 璇诲彇涓�甯�
+            ret, frame = cap.read()
+            # 妫�鏌ュ抚鏄惁鎴愬姛璇诲彇
+            if not ret:
+                print("Error: Could not read frame.")
+                break
+            # 1920*1080鐨勫浘鍍忥紝涓績瑁佸壀640*480鐨勫尯鍩�
+            cropped_frame = frame[int(height / 2 - target_height / 2):int(height / 2 + target_height / 2),
+                                int(width / 2 - target_width / 2):int(width / 2 + target_width / 2)]
+            # 璋冩暣鍥惧儚灏哄
+            resized_frame = cv2.resize(cropped_frame, (target_width, target_height))
+
+            # 浣跨敤妫�娴嬫ā鍨嬪璇诲叆鐨勫抚杩涜瀵硅薄妫�娴�
+            output_image = detect_object(resized_frame, session, model_inputs, input_width, input_height)
+            # 璁$畻甯ч�熺巼
+            frame_count += 1
+            end_time = time.time()
+            elapsed_time = end_time - start_time
+            fps = frame_count / elapsed_time
+            print(f"FPS: {fps:.2f}")
+            # 灏咶PS缁樺埗鍦ㄥ浘鍍忎笂
+            cv2.putText(output_image, f"FPS: {fps:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
+            # 鍦ㄧ獥鍙d腑鏄剧ず褰撳墠甯�
+            cv2.imshow("Video", output_image)
+            # 鎸変笅 'q' 閿��鍑哄惊鐜�
+            if cv2.waitKey(1) & 0xFF == ord('q'):
+                break
+        # 閲婃斁鎽勫儚澶磋祫婧�
+        cap.release()
+        # 鍏抽棴绐楀彛
+        cv2.destroyAllWindows()
+    elif mode == 3:
+        # 杈撳叆瑙嗛璺緞
+        input_video_path = 'kun.mp4'
+        # 杈撳嚭瑙嗛璺緞
+        output_video_path = 'kun_det.mp4'
+        # 鎵撳紑瑙嗛鏂囦欢
+        cap = cv2.VideoCapture(input_video_path)
+        # 妫�鏌ヨ棰戞槸鍚︽垚鍔熸墦寮�
+        if not cap.isOpened():
+            print("Error: Could not open video.")
+            exit()
+        # 璇诲彇瑙嗛鐨勫熀鏈俊鎭�
+        frame_width = int(cap.get(3))
+        frame_height = int(cap.get(4))
+        fps = cap.get(cv2.CAP_PROP_FPS)
+        # 瀹氫箟瑙嗛缂栫爜鍣ㄥ拰鍒涘缓VideoWriter瀵硅薄
+        fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 鏍规嵁鏂囦欢鍚嶅悗缂�浣跨敤鍚堥�傜殑缂栫爜鍣�
+        out = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))
+        # 鍒濆鍖栧抚鏁拌鏁板櫒鍜岃捣濮嬫椂闂�
+        frame_count = 0
+        start_time = time.time()
+        while True:
+            ret, frame = cap.read()
+            if not ret:
+                print("Info: End of video file.")
+                break
+            # 瀵硅鍏ョ殑甯ц繘琛屽璞℃娴�
+            output_image = detect_object(frame, session, model_inputs, input_width, input_height)
+            # 璁$畻骞舵墦鍗板抚閫熺巼
+            frame_count += 1
+            end_time = time.time()
+            elapsed_time = end_time - start_time
+            if elapsed_time > 0:
+                fps = frame_count / elapsed_time
+                print(f"FPS: {fps:.2f}")
+            # 灏嗗鐞嗗悗鐨勫抚鍐欏叆杈撳嚭瑙嗛
+            out.write(output_image)
+            #锛堝彲閫夛級瀹炴椂鏄剧ず澶勭悊鍚庣殑瑙嗛甯�
+            cv2.imshow("Output Video", output_image)
+            if cv2.waitKey(1) & 0xFF == ord('q'):
+                break
+        # 閲婃斁璧勬簮
+        cap.release()
+        out.release()
+        cv2.destroyAllWindows()
+    else:
+        print("杈撳叆閿欒锛岃妫�鏌ode鐨勮祴鍊�")
diff --git a/pachong.py b/pachong.py
index 4841de4..d82db8f 100644
--- a/pachong.py
+++ b/pachong.py
@@ -1,13 +1,20 @@
+import concurrent.futures
 import os
 import time
 import requests
 import re
 
+
 def imgdata_set(save_path, word, epoch):
+    if not os.path.exists(save_path):
+        os.makedirs(save_path)
+    else:
+        return 0
     q = 0     # 鍋滄鐖彇鍥剧墖鏉′欢
     a = 0     # 鍥剧墖鍚嶇О
     while(True):
         time.sleep(1)
+        print("寮�濮嬬埇鍙栧浘鐗�")
         url = "https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word={}&pn={}&ct=&ic=0&lm=-1&width=0&height=0".format(word, q)
         # word=闇�瑕佹悳绱㈢殑鍚嶅瓧
         headers = {
@@ -16,21 +23,28 @@
         response = requests.get(url, headers=headers)  # 鍙戦�佽姹傝幏鍙栧搷搴�
         html = response.text  # 鑾峰彇鍝嶅簲鐨凥TML鍐呭
         urls = re.findall('"objURL":"(.*?)"', html)  # 浣跨敤姝e垯琛ㄨ揪寮忔彁鍙栧浘鐗嘦RL
-        for url in urls:
-            try:
-                print(a)  # 鍥剧墖鐨勫悕瀛�
-                response = requests.get(url, headers=headers)  # 鍙戦�佽姹傝幏鍙栧浘鐗囧搷搴擡:\yaocai\juhua
-                image = response.content  # 鑾峰彇鍥剧墖鍐呭
-                with open(os.path.join(save_path, "{}.jpg".format(a)), 'wb') as f:  # 灏嗗浘鐗囧唴瀹逛繚瀛樺埌鎸囧畾璺緞
-                    f.write(image)
-                a = a + 1
-            except Exception as e:
-                pass
-            continue
+        print(len(urls))
+        # 浣跨敤concurrent.futures瀹炵幇骞跺彂涓嬭浇
+        with concurrent.futures.ThreadPoolExecutor( max_workers=10) as executor:
+            # 鎻愪氦鎵�鏈変笅杞戒换鍔″苟鏀堕泦future瀵硅薄
+            futures = [executor.submit(download_image, index, headers,save_path,url ) for index,url in enumerate(urls)]
         q = q + 20
         if (q / 20) >= int(epoch):
             break
 
+
+def download_image(a, headers, save_path, url):
+        try:
+            print(a)  # 鍥剧墖鐨勫悕瀛�
+            response = requests.get(url, headers=headers, timeout=10)  # 鍙戦�佽姹傝幏鍙栧浘鐗囧搷搴�
+            # 濡傛灉娌℃湁涓�鐩村搷搴旀�庝箞澶勭悊
+            image = response.content  # 鑾峰彇鍥剧墖鍐呭
+            with open(os.path.join(save_path, "{}.jpg".format(a)), 'wb') as f:  # 灏嗗浘鐗囧唴瀹逛繚瀛樺埌鎸囧畾璺緞
+                f.write(image)
+        except Exception as e:
+            pass
+
+
 if __name__ == "__main__":
     save_path = input('浣犳兂淇濆瓨鐨勮矾寰勶細')  # 璇㈤棶鐢ㄦ埛淇濆瓨璺緞
     word = input('浣犳兂瑕佷笅杞戒粈涔堝浘鐗囷紵璇疯緭鍏�:')  # 璇㈤棶鐢ㄦ埛鎼滅储鍏抽敭璇�
diff --git a/paherbbaidu.py b/paherbbaidu.py
new file mode 100644
index 0000000..493ed57
--- /dev/null
+++ b/paherbbaidu.py
@@ -0,0 +1,46 @@
+import mysql.connector
+from mysql.connector import Error
+from pachong import imgdata_set
+from quchong import quchongmethod
+import shutil
+
+
+# 杩炴帴mysql 鏌ヨ鏁版嵁搴�
+def mysql_connect():
+    try:
+        conn = mysql.connector.connect(host='localhost',
+                                       database='herb',
+                                       user='root',
+                                       password='123456')
+        if conn.is_connected():
+            print('Connected to MySQL database')
+            return conn
+    except Error as e:
+        print(e)
+
+# 鏌ヨdry_herb_info琛�
+def mysql_select(conn):
+    cursor = conn.cursor()
+    sql = "SELECT name, pinyin FROM dry_herb_info where pinyin is not null and pinyin <> ''"
+    cursor.execute(sql)
+    result = cursor.fetchall()
+    return result
+
+
+# main
+if __name__ == '__main__':
+    conn = mysql_connect()
+
+    result = mysql_select(conn)
+
+    for row in result:
+        name = row[0]
+        pinyin = row[1]
+        print(name)
+        print(pinyin)
+        imgdata_set('E:/pachong/'+pinyin, name + '楗墖', 2)
+        quchongmethod('E:/pachong/'+pinyin, 'E:/pachong/2/'+pinyin)
+        #鍒犻櫎涓存椂鏂囦欢澶�
+
+        shutil.rmtree('E:/pachong/2/'+pinyin)
+
diff --git a/pc.py b/pc.py
new file mode 100644
index 0000000..75ac312
--- /dev/null
+++ b/pc.py
@@ -0,0 +1,35 @@
+import os
+import requests
+from bs4 import BeautifulSoup
+from PIL import Image
+from io import BytesIO
+
+
+def fetch_images(keyword, save_path):
+    if not os.path.exists(save_path):
+        os.makedirs(save_path)
+
+    url = f"https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word={keyword}"
+    headers = {
+        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
+    }
+
+    response = requests.get(url, headers=headers)
+    soup = BeautifulSoup(response.text, 'html.parser')
+    img_tags = soup.find_all('ObjURL', class_='obj')
+
+    for i, img_tag in enumerate(img_tags):
+        try:
+            img_url = img_tag['data-src']
+            img_data = requests.get(img_url).content
+            img = Image.open(BytesIO(img_data))
+            img.save(os.path.join(save_path, f"{keyword}_{i}.jpg"))
+            print(f"Downloaded {keyword}_{i}.jpg")
+        except Exception as e:
+            print(f"Failed to download image: {e}")
+
+
+if __name__ == "__main__":
+    keyword = "浜哄弬"
+    save_path = "images"
+    fetch_images(keyword, save_path)
\ No newline at end of file
diff --git a/quchong.py b/quchong.py
index 3a27d70..d48414c 100644
--- a/quchong.py
+++ b/quchong.py
@@ -52,12 +52,9 @@
                 result = "涓ゅ紶鍥剧浉鍚�"
     return result
 
-if __name__ == '__main__':
 
-    load_path = 'E:\yaocai\yinyanghuo'  # 瑕佸幓閲嶇殑鏂囦欢澶�
-    save_path = 'E:\yaocai\\2\\yinyanghuo'  # 绌烘枃浠跺す锛岀敤浜庡瓨鍌ㄦ娴嬪埌鐨勯噸澶嶇殑鐓х墖
+def quchongmethod(load_path, save_path):
     os.makedirs(save_path, exist_ok=True)
-
     # 鑾峰彇鍥剧墖鍒楄〃 file_map锛屽瓧鍏竰鏂囦欢璺緞filename : 鏂囦欢澶у皬image_size}
     file_map = {}
     image_size = 0
@@ -70,13 +67,11 @@
             # print('the full name of the file is %s' % os.path.join(parent, filename))
             image_size = os.path.getsize(os.path.join(parent, filename))
             file_map.setdefault(os.path.join(parent, filename), image_size)
-
     # 鑾峰彇鐨勫浘鐗囧垪琛ㄦ寜 鏂囦欢澶у皬image_size 鎺掑簭
     file_map = sorted(file_map.items(), key=lambda d: d[1], reverse=False)
     file_list = []
     for filename, image_size in file_map:
         file_list.append(filename)
-
     # 鍙栧嚭閲嶅鐨勫浘鐗�
     file_repeat = []
     for currIndex, filename in enumerate(file_list):
@@ -93,16 +88,22 @@
         except Exception as e:
             pass
 
-        if(result == "涓ゅ紶鍥剧浉鍚�"):
+        if (result == "涓ゅ紶鍥剧浉鍚�"):
             file_repeat.append(file_list[currIndex + 1])
             print("鐩稿悓鐨勫浘鐗囷細", file_list[currIndex], file_list[currIndex + 1])
         else:
             print('涓嶅悓鐨勫浘鐗囷細', file_list[currIndex], file_list[currIndex + 1])
         currIndex += 1
-        if currIndex >= (len(file_list)-2):
+        if currIndex >= (len(file_list) - 2):
             break
-
     # 灏嗛噸澶嶇殑鍥剧墖绉诲姩鍒版柊鐨勬枃浠跺す锛屽疄鐜板鍘熸枃浠跺す闄嶉噸
     for image in file_repeat:
         shutil.move(image, save_path)
-        print("姝e湪绉婚櫎閲嶅鐓х墖锛�", image)
\ No newline at end of file
+        print("姝e湪绉婚櫎閲嶅鐓х墖锛�", image)
+
+
+if __name__ == '__main__':
+
+    load_path = 'E:\yaocai\\chenpi'  # 瑕佸幓閲嶇殑鏂囦欢澶�
+    save_path = 'E:\yaocai\\2\\chenpi'  # 绌烘枃浠跺す锛岀敤浜庡瓨鍌ㄦ娴嬪埌鐨勯噸澶嶇殑鐓х墖
+    quchongmethod(load_path,save_path)
\ No newline at end of file
diff --git a/replaceLabelNumber.py b/replaceLabelNumber.py
new file mode 100644
index 0000000..0d9ee10
--- /dev/null
+++ b/replaceLabelNumber.py
@@ -0,0 +1,38 @@
+import os
+
+def process_file(file_path):
+    with open(file_path, 'r') as file:
+        lines = file.readlines()
+
+    with open(file_path, 'w') as file:
+        for line in lines:
+            # 灏濊瘯浠庢瘡涓�琛屼腑鎻愬彇绗竴涓暟瀛�
+            try:
+                number = int(line.split()[0])
+            except ValueError:
+                # 濡傛灉鎻愬彇澶辫触锛岃烦杩囧綋鍓嶈
+                file.write(line)
+                continue
+
+
+            new_number = number + 1
+
+            # 鏇挎崲鍘熷琛屼腑鐨勭涓�涓暟瀛�
+            updated_line = line.replace(str(number), str(new_number), 1)
+
+            # 鍐欏叆鏇存柊鍚庣殑琛�
+            file.write(updated_line)
+
+if __name__ == "__main__":
+    # 鎸囧畾鐩綍
+    target_directory = 'E:\\herb_scan.v1i.yolov8\\valid\\labels'
+
+    # 鑾峰彇鐩綍涓嬬殑鎵�鏈塼xt鏂囦欢
+    txt_files = [file for file in os.listdir(target_directory) if file.endswith('.txt')]
+
+    # 澶勭悊姣忎釜鏂囦欢
+    for txt_file in txt_files:
+        file_path = os.path.join(target_directory, txt_file)
+        process_file(file_path)
+
+    print("澶勭悊瀹屾垚锛�")
diff --git a/save_img.py b/save_img.py
index f588ec0..074e83a 100644
--- a/save_img.py
+++ b/save_img.py
@@ -6,12 +6,17 @@
 
 # 鎵撳紑鎽勫儚澶�
 cap = cv2.VideoCapture(camera_index)
-
+# 璁剧疆鍒嗚鲸鐜�
+# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 3840)  # 瀹藉害
+# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 2160)  # 楂樺害
 # 妫�鏌ユ憚鍍忓ご鏄惁鎴愬姛鎵撳紑
 if not cap.isOpened():
     print("鏃犳硶鎵撳紑鎽勫儚澶�")
     exit()
 
+width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
+height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
+print("鎽勫儚澶村垎杈ㄧ巼:", width, "x", height)
 # 鍥剧墖淇濆瓨璺緞
 save_path = "captured_images/"
 
@@ -36,27 +41,29 @@
         print("鏃犳硶璇诲彇鎽勫儚澶寸敾闈�")
         break
 
+# 瑁佸壀鍥惧儚
+#     cropped_frame = frame[750:1230, 1650:2290]
     # 璋冩暣鍥惧儚灏哄
     resized_frame = cv2.resize(frame, (target_width, target_height))
 
     # 鑾峰彇褰撳墠鏃堕棿
     current_time = time.time()
 
-    # 濡傛灉璺濈涓婁竴娆′繚瀛樺凡缁忚繃鍘�1绉掞紝鍒欎繚瀛樺綋鍓嶇敾闈�
-    if current_time - start_time >= 1.0:
-        # 鐢熸垚淇濆瓨鏂囦欢鍚嶏紝浠ュ綋鍓嶆椂闂村懡鍚�
-        save_name = time.strftime("%Y%m%d%H%M%S", time.localtime()) + ".jpg"
-        # 淇濆瓨璋冩暣灏哄鍚庣殑鍥剧墖
-        cv2.imwrite(save_path + save_name, resized_frame)
-        print("淇濆瓨鍥剧墖:", save_name)
-        # 閲嶇疆璁℃椂鍣�
-        start_time = time.time()
+    #濡傛灉璺濈涓婁竴娆′繚瀛樺凡缁忚繃鍘�1绉掞紝鍒欎繚瀛樺綋鍓嶇敾闈�
+    # if current_time - start_time >= 3.0:
+    #     # 鐢熸垚淇濆瓨鏂囦欢鍚嶏紝浠ュ綋鍓嶆椂闂村懡鍚�
+    #     save_name = time.strftime("%Y%m%d%H%M%S", time.localtime()) + ".jpg"
+    #     # 淇濆瓨璋冩暣灏哄鍚庣殑鍥剧墖
+    #     cv2.imwrite(save_path + save_name, frame)
+    #     print("淇濆瓨鍥剧墖:", save_name)
+    #     # 閲嶇疆璁℃椂鍣�
+    #     start_time = time.time()
 
     # 鏄剧ず鐢婚潰
-    cv2.imshow("Camera", resized_frame)
+    cv2.imshow("Camera", frame)
 
     # 妫�娴嬫寜閿紝濡傛灉鎸変笅q閿垯閫�鍑哄惊鐜�
-    if cv2.waitKey(1000) & 0xFF == ord('q'):
+    if cv2.waitKey(1) & 0xFF == ord('q'):
         break
 
 # 鍏抽棴鎽勫儚澶�
diff --git a/shot_onnx.py b/shot_onnx.py
new file mode 100644
index 0000000..de87634
--- /dev/null
+++ b/shot_onnx.py
@@ -0,0 +1,105 @@
+import cv2
+import time
+import numpy as np
+import onnxruntime
+from scipy.special import softmax
+
+# 鍔犺浇ONNX妯″瀷
+session = onnxruntime.InferenceSession("model/detect/best.onnx")
+# 鎽勫儚澶寸储寮曞彿锛岄�氬父涓�0琛ㄧず绗竴涓憚鍍忓ご
+camera_index = 0
+
+# 鎵撳紑鎽勫儚澶�
+cap = cv2.VideoCapture(camera_index, cv2.CAP_DSHOW)
+# 璁剧疆鍒嗚鲸鐜�
+cap.set(cv2.CAP_PROP_FRAME_WIDTH, 3840)  # 瀹藉害
+cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 2160)  # 楂樺害
+# 妫�鏌ユ憚鍍忓ご鏄惁鎴愬姛鎵撳紑
+if not cap.isOpened():
+    print("鏃犳硶鎵撳紑鎽勫儚澶�")
+    exit()
+
+width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
+height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
+print("鎽勫儚澶村垎杈ㄧ巼:", width, "x", height)
+
+
+
+# 浠巖es.json涓鍙栫被鍒�
+# with open("res1-2.json", "r") as f:
+#     classes = eval(f.read())
+
+
+
+# 鐩爣鍥惧儚灏哄
+target_width = 1024
+target_height = 768
+
+# 璁℃椂鍣�
+start_time = time.time()
+
+# 寰幆璇诲彇鎽勫儚澶寸敾闈�
+while True:
+    ret, frame = cap.read()
+
+    if not ret:
+        print("鏃犳硶璇诲彇鎽勫儚澶寸敾闈�")
+        break
+
+    # 1920*1080鐨勫浘鍍忥紝涓績瑁佸壀640*480鐨勫尯鍩�
+    cropped_frame = frame[int(height / 2 - target_height / 2):int(height / 2 + target_height / 2),
+                    int(width / 2 - target_width / 2):int(width / 2 + target_width / 2)]
+    # 璋冩暣鍥惧儚灏哄
+    resized_frame = cv2.resize(cropped_frame, (target_width, target_height))
+
+    # 鑾峰彇褰撳墠鏃堕棿
+    current_time = time.time()
+
+    #濡傛灉璺濈涓婁竴娆′繚瀛樺凡缁忚繃鍘�1绉掞紝鍒欎繚瀛樺綋鍓嶇敾闈�
+    # if current_time - start_time >= 3.0:
+    #     # 鐢熸垚淇濆瓨鏂囦欢鍚嶏紝浠ュ綋鍓嶆椂闂村懡鍚�
+    #     save_name = time.strftime("%Y%m%d%H%M%S", time.localtime()) + ".jpg"
+    #     # 淇濆瓨璋冩暣灏哄鍚庣殑鍥剧墖
+    #     cv2.imwrite(save_path + save_name, frame)
+    #     print("淇濆瓨鍥剧墖:", save_name)
+    #     # 閲嶇疆璁℃椂鍣�
+    #     start_time = time.time()
+
+    # 棰勫鐞�
+    blob = cv2.dnn.blobFromImage(resized_frame, 1 / 255.0, (640, 640), swapRB=True, crop=False)
+
+    # 妯″瀷鎺ㄧ悊
+    outputs = session.run(None, {session.get_inputs()[0].name: blob})
+
+    output = np.transpose(np.squeeze(outputs[0]));
+    rows = output.shape[0]
+    boxes = []
+    scores = []
+    class_ids = []
+    for i in range(rows):
+        classes_scores = output[i][4:]
+        max_score = np.amax(classes_scores)
+        if max_score > 0.5:
+            classid = np.argmax(classes_scores)
+            scores.append(max_score)
+            class_ids.append(classid)
+
+    print(class_ids)
+    print(scores)
+    # # 杈撳嚭鍓嶅崄涓被鍒�
+    # print("Top 5 Classes:")
+    # for i in top_ten_classes:
+    #     print(f"{classes[i]}: {probabilities[0][i]}")
+
+    # 鏄剧ず鐢婚潰
+    cv2.imshow("Camera", resized_frame)
+
+    # 妫�娴嬫寜閿紝濡傛灉鎸変笅q閿垯閫�鍑哄惊鐜�
+    if cv2.waitKey(1) & 0xFF == ord('q'):
+        break
+
+# 鍏抽棴鎽勫儚澶�
+cap.release()
+
+# 鍏抽棴鎵�鏈夌獥鍙�
+cv2.destroyAllWindows()
diff --git a/speech/deepSpeechTest.py b/speech/deepSpeechTest.py
new file mode 100644
index 0000000..be53e5c
--- /dev/null
+++ b/speech/deepSpeechTest.py
@@ -0,0 +1,20 @@
+from deepspeech import Model
+import scipy.io.wavfile as wav
+import numpy as np
+
+# 鎸囧畾妯″瀷鍜岃瘎鍒嗗櫒鏂囦欢璺緞
+model_path = 'model/deepspeech-0.9.3-models-zh-CN.pbmm'
+scorer_path = 'model/deepspeech-0.9.3-models-zh-CN.scorer'
+
+# 鍒濆鍖朌eepSpeech妯″瀷
+ds = Model(model_path)
+ds.enableExternalScorer(scorer_path)
+
+# 鍔犺浇闊抽鏂囦欢
+audio_path = 'audio/input.wav'
+fs, audio = wav.read(audio_path)
+
+# 杩涜璇煶杞枃瀛�
+text = ds.stt(audio)
+
+print('Transcribed text:', text)
\ No newline at end of file
diff --git a/speech/formatmp3.py b/speech/formatmp3.py
new file mode 100644
index 0000000..2c94476
--- /dev/null
+++ b/speech/formatmp3.py
@@ -0,0 +1,5 @@
+from pydub import AudioSegment
+
+# 鍔犺浇mp3鏂囦欢
+mp3_audio = AudioSegment.from_mp3("input.mp3")
+
diff --git a/speech/whisper2.py b/speech/whisper2.py
new file mode 100644
index 0000000..5ae37cf
--- /dev/null
+++ b/speech/whisper2.py
@@ -0,0 +1 @@
+from faster import WhisperModel
\ No newline at end of file
diff --git a/speech/whisperTest.py b/speech/whisperTest.py
new file mode 100644
index 0000000..7869783
--- /dev/null
+++ b/speech/whisperTest.py
@@ -0,0 +1,10 @@
+import whisper
+model = whisper.load_model("medium")
+result = model.transcribe("audio/input.wav")
+
+print(result["text"])
+
+# tiny 72.1M 鎴戣闂ぇ瀹惰繖涓瘽閲岀粰鏀惧湪鍓嶅彴浜嗘槸鍗$綏鍓嶅彴鏄悧瀵瑰ソ鐨勮阿璋�
+# base 1XXM  鎴戝竻鍝ラ偅涓彂鍔涚粰浠栨斁鍦ㄥ墠鍙颁簡鏄�7鍙蜂簡鍓嶅彴鏄惂瀵瑰ソ鐨勮阿璋㈠ソ
+# small 461M  鍠備綘濂藉摝鎴戦爢棰ㄧ殑涓�鍊嬬暙鍦扮郸鏀惧湪鍓嶅彴浜嗘槸7铏熸〒鍓嶅彴鏄惂灏嶅ソ鐨勮瑵璎�
+# medium 1.42G  鍠備綘濂戒綘濂芥垜韬悗閭e効鏈変釜娉曞緥缁欐斁鍦ㄥ墠鍙颁簡鏄�7鍙锋ゼ鍓嶅彴鏄惂瀵瑰ソ鐨勮阿璋㈠ソ
\ No newline at end of file

--
Gitblit v1.9.3