# I guess that this should be modified from just detect actually # cuz there's no much more things than just export right after detecting import argparse import time from pathlib import Path import cv2 import torch import torch.backends.cudnn as cudnn from numpy import random # new added import torch.nn as nn from utils.activations import Hardswish, SiLU nn.modules.activation.SiLU = SiLU nn.modules.activation.Hardswish=Hardswish nn.SiLU = SiLU nn.Hardswish = Hardswish from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path from utils.plots import plot_one_box from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel def detect(save_img=False): source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace save_img = not opt.nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize set_logging() print(f'opt.device: {opt.device}') device = select_device(opt.device) try: print(device) except: print('device type could not be print out') half = device.type != 'cpu' # half precision only supported on CUDA # Load model # new added # model = attempt_load(weights, map_location=device) # load FP32 model from models.yolo import get_model model = get_model(opt) # print(model) # new added-1 # 配置量化参数 import torch_mlu import torch_mlu.core.mlu_model as ct import torch_mlu.core.mlu_quantize as mlu_quantize qconfig={'use_avg':False, 'data_scale':1.0, 'firstconv':False, 'per_channel': False} # 调用量化接口 quantized_net = mlu_quantize.quantize_dynamic_mlu(model,qconfig_spec=qconfig, dtype='int8', gen_quant=True) # 设置为推理模式 quantized_net = quantized_net.eval().float() # model = quantized_net stride = int(model.stride.max()) # model stride imgsz = check_img_size(imgsz, s=stride) # check img_size if trace: model = TracedModel(model, device, opt.img_size) if half: model.half() # to FP16 # Second-stage classifier 没用的 classify = False if classify: modelc = load_classifier(name='resnet101', n=2) # initialize modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() # Set Dataloader vid_path, vid_writer = None, None if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride) else: dataset = LoadImages(source, img_size=imgsz, stride=stride) # Get names and colors names = model.module.names if hasattr(model, 'module') else model.names names = ['bottle'] colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] # Run inference if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once old_img_w = old_img_h = imgsz old_img_b = 1 t0 = time.time() # 逐张开始,这里测了一张只有6个物体的训练集图片,正常(也测试过验证集、测试集的图片,均正常) for path, img, im0s, vid_cap in dataset: img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 if img.ndimension() == 3: img = img.unsqueeze(0) # Warmup if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]): old_img_b = img.shape[0] old_img_h = img.shape[2] old_img_w = img.shape[3] for i in range(3): model(img, augment=opt.augment)[0] quantized_net(img, augment=opt.augment)[0] # Inference t1 = time_synchronized() with torch.no_grad(): # Calculating gradients would cause a GPU memory leak pred = model(img, augment=opt.augment)[0] print('======origin model prediction finished======') if opt.qua: pred1 = quantized_net(img, augment=opt.augment)[0] print(f'pred1 is on:{pred1.device}') print('======Qua model prediction finished======') t2 = time_synchronized() # Apply NMS pred1 = non_max_suppression(pred1, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) print(f'pred1 is like: {type(pred1)}, {pred1[0].shape}, {pred1[0].dtype}') print(pred1[0],end = '\n\n\n') t3 = time_synchronized() # Apply Classifier if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process detections for i, det in enumerate(pred1): # detections per image if webcam: # batch_size >= 1 p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg # txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else '_{}'.format(frame) ) # img.txt gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum().item() # detections per class # print(f"n = {n} \t", type(n)) # k = int(n) # print(f"k = {k} \t", type(k)) print(type('s')) print(type(n>1)) s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or view_img: # Add bbox to image label = f'{names[int(cls)]} {conf:.2f}' plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1) # Print time (inference + NMS) print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS') # print(f'===wtf s ===Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS') # Stream results if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) print(f" The image with the result is saved in: {save_path}") else: # 'video' or 'stream' if vid_path != save_path: # new video vid_path = save_path if isinstance(vid_writer, cv2.VideoWriter): vid_writer.release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer.write(im0) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' #print(f"Results saved to {save_dir}{s}") print(f'Done. ({time.time() - t0:.3f}s)') # new added-1 # 保存量化模型 qua_weight = opt.qua_weight print("SAVE quantize model in:", qua_weight) torch.save(quantized_net.state_dict(), qua_weight) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)') parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='display results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--no-trace', action='store_true', help='don`t trace model') # new added parser.add_argument('--cfg', type=str, default='yolov7-flow.yaml', help='model.yaml') # new added-1 parser.add_argument('--qua_weight', type=str,default='yolov7_intx.pth', help='model.pt path(s)') parser.add_argument('--qua', action='store_true', help='don`t trace model') opt = parser.parse_args() print(opt) #check_requirements(exclude=('pycocotools', 'thop')) with torch.no_grad(): if opt.update: # update all models (to fix SourceChangeWarning) for opt.weights in ['yolov7.pt']: detect() strip_optimizer(opt.weights) else: detect()