baoshiwei
2025-04-22 88fc0f9f9b7fd3eb81c958ca41ed822cf3657c47
openvino/identifier.py
copy from identifier.py copy to openvino/identifier.py
Îļþ´Ó identifier.py ¸´ÖÆ
@@ -1,7 +1,8 @@
import time
import cv2
import numpy as np
import onnxruntime
import yaml
from openvino.runtime import Core
class IDENTIFIER:
@@ -12,13 +13,29 @@
    def __call__(self, image):
        return self.idengify(image)
    def read_config(self, path):
        file_path = path+'/metadata.yaml'
        with open(file_path, 'r', encoding="utf-8") as file:
            config = yaml.safe_load(file)
        return config
    def initialize_model(self, path):
        self.session = onnxruntime.InferenceSession(path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
        self.class_names = eval(self.session.get_modelmeta().custom_metadata_map['names'])
        model_path = path + '/best.xml'
        # Initialize OpenVINO Runtime
        self.core = Core()
        # Load the model
        self.model = self.core.read_model(model=model_path)
        # Compile the model
        self.compiled_model = self.core.compile_model(model=self.model, device_name="CPU")
        # Get input and output layers
        self.input_layer = self.compiled_model.input(0)
        N,C,self.input_width,self.input_height = self.input_layer.shape
        self.output_layer = self.compiled_model.output(0)
        # Get class names
        self.class_names = CLASSES = self.read_config(path)['names']
        # Get model info
        self.get_input_details()
        self.get_output_details()
        # self.get_input_details()
        # self.get_output_details()
    def idengify(self, image):
        input_tensor = self.prepare_input(image)
@@ -26,9 +43,9 @@
        # Perform inference on the image
        outputs = self.inference(input_tensor)
        self.herb_probabilities = outputs[0]
        return self.herb_probabilities
        return outputs
    def prepare_input(self, image):
        self.img_height, self.img_width = image.shape[:2]
@@ -48,25 +65,25 @@
        return input_tensor
    def inference(self, input_tensor):
        start = time.perf_counter()
        outputs = self.session.run(self.output_names, {self.input_names[0]: input_tensor})
        ir = self.compiled_model.create_infer_request()
        outs = ir.infer(input_tensor)[self.output_layer]
        # print(f"Inference time: {(time.perf_counter() - start)*1000:.2f} ms")
        return outputs
        return outs
    def get_input_details(self):
        model_inputs = self.session.get_inputs()
        self.input_names = [model_inputs[i].name for i in range(len(model_inputs))]
    # def get_input_details(self):
    #     model_inputs = self.session.get_inputs()
    #     self.input_names = [model_inputs[i].name for i in range(len(model_inputs))]
    #
    #     self.input_shape = model_inputs[0].shape
    #     self.input_height = self.input_shape[2]
    #     self.input_width = self.input_shape[3]
        self.input_shape = model_inputs[0].shape
        self.input_height = self.input_shape[2]
        self.input_width = self.input_shape[3]
    def get_output_details(self):
        model_outputs = self.session.get_outputs()
        self.output_names = [model_outputs[i].name for i in range(len(model_outputs))]
    # def get_output_details(self):
    #     model_outputs = self.session.get_outputs()
    #     self.output_names = [model_outputs[i].name for i in range(len(model_outputs))]
    # ç­‰æ¯”例缩放图片
    def ratioresize(self, im, color=114):