import argparse import time from pathlib import Path import cv2 import torch import torch.backends.cudnn as cudnn from numpy import random debug = True # new added import torch.nn as nn from utils.activations import Hardswish, SiLU from torchinfo import summary nn.modules.activation.SiLU = SiLU nn.modules.activation.Hardswish = Hardswish nn.SiLU = SiLU nn.Hardswish = Hardswish # from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path from utils.plots import plot_one_box from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel def view(): print('\n\n\n==========Starting View Now=============\n\n\n') source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace jit, save, mname, mcore, mlu_det, half_input = opt.jit, opt.save, opt.mname, opt.mcore, opt.mlu_det, opt.half_input from mlu.routine.postprocess import MLU_PostProcessYoloV7, PostProcessPytorchYoloV7, draw_image from models.yolo import get_empty_model model = get_empty_model(opt) import torch_mlu import torch_mlu.core.mlu_model as ct import torch_mlu.core.mlu_quantize as mlu_quantize ct.set_core_number(4) # 设置输入图片的通道顺序,以决定首层卷积对三通道输入的补齐通道顺序。默认是 RGBA 顺序 # ct.set_input_format(0) # 配置MLU core类型 ct.set_core_version(opt.mcore) torch.set_grad_enabled(False) device = ct.mlu_device() print("run on %s ..." % device) weight = weights[0] quantized_net = torch_mlu.core.mlu_quantize.quantize_dynamic_mlu(model) print('\nthe quantized model\'s weight:', weight, '\n\n') state_dict = torch.load(weight) quantized_net.load_state_dict(state_dict, strict=True) # file print('=============The model exactly after :============') print('===layer_name:===') layer_name = list(state_dict.keys()) # 查看指定layer的tensor值 print(layer_name) print('\n\n======The whole model printing:======') print(quantized_net) print('\n\n======The whole model torchinfo:======') summary(model=quantized_net, input_size=(1, 3, 640, 640), device='cpu') # summary(model=quantized_net, input_size=(1, 3, 640, 640), device=device) # # Object # # 获取模型中所有layer的名称 # layer_name = list(quantized_net.state_dict().keys()) # # 查看指定layer的tensor值 # for i in range(len(layer_name)): # print(model.state_dict()[layer_name[i]]) # 查看第2个层的参数值 # 设置为推理模式 quantized_net = quantized_net.float().eval() # file print('=============The model exactly after float().eval():============') print('===layer_name:===') layer_name = list(state_dict.keys()) # 查看指定layer的tensor值 print(layer_name) print('\n\n======The whole model printing:======') print(quantized_net) print('\n\n======The whole model torchinfo:======') summary(model=quantized_net, input_size=(1, 3, 640, 640), device='cpu') # quantized_net.to(device) input = torch.randn(1,3,640,640) from torchviz import make_dot net_plot = make_dot(quantized_net(input),params = dict(quantized_net.named_parameters())) net_plot.view() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)') parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='display results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--no-trace', action='store_true', help='don`t trace model') # new added parser.add_argument('--cfg', type=str, default='yolov7-flow.yaml', help='model.yaml') parser.add_argument('--batch-size', type=int, default=1, help='I dont know why we need batchsize') # ! 自改 parser.add_argument('--jit', action='store_true', help='declaring model type') parser.add_argument('--save', action='store_true', help='whether saving offline model') parser.add_argument('--mname', type=str, default='offline-model', help='name to save cambricon offline model') parser.add_argument('--mcore', type=str, default='MLU270', help='wdnmd') # ! not certain parser.add_argument('--mlu-det', action='store_true', help='declaring wtf?') parser.add_argument('--half-input', action='store_true', help='name to save cambricon offline model') opt = parser.parse_args() print(opt) # check_requirements(exclude=('pycocotools', 'thop')) view()