当前位置: 首页 > news >正文

从零开始的目标检测和关键点检测(一):用labelme标注数据集

从零开始的目标检测和关键点检测(一):用labelme标注数据集

  • 1、可视化标注结果
  • 2、划分数据集
  • 3、Lableme2COCO,将json文件转换为MS COCO格式

前言:前段时间用到了mmlab的mmdetction和mmpose,因此以一个小的数据集复现了从数据集制作到模型训练和测试的全流程。希望对想入门mmlab框架的小伙伴有所帮助。主要想做目标检测和关键点检测,因此标注目标检测框和关键点。标注范式:注意关键点只能在一个目标检测框内

数据链接:以标注并转为MS COCO格式数据集
提取码:heak

labelme标注

1、可视化标注结果

查看原图片

import cv2
import numpy as np
import json
import matplotlib.pyplot as pltimg_path = 'IMG_4801_JPG_jpg.rf.121fd7907310ecece04dc4020e07e7f2.jpg'
img_bgr = cv2.imread(img_path)plt.imshow(img_bgr[:,:,::-1])
plt.show()

原图
打印json文件信息

labelme_path = 'IMG_4801_JPG_jpg.rf.121fd7907310ecece04dc4020e07e7f2.json'
with open(labelme_path, 'r', encoding='utf-8') as f:labelme = json.load(f)
print(labelme.keys())
# dict_keys(['version', 'flags', 'shapes', 'imagePath', 'imageData', 'imageHeight', 'imageWidth'])
print(labelme['shapes'])

可视化框(rectangle)标注

# 可视化框(rectangle)标注
# 框(rectangle)可视化配置
bbox_color = (255, 129, 0)           # 框的颜色
bbox_thickness = 5                   # 框的线宽
# 框类别文字
bbox_labelstr = {'font_size':2,         # 字体大小'font_thickness':4,   # 字体粗细'offset_x':0,          # X 方向,文字偏移距离,向右为正'offset_y':-80,       # Y 方向,文字偏移距离,向下为正
}
for each_ann in labelme['shapes']:  # 遍历每一个标注if each_ann['shape_type'] == 'rectangle':  # 筛选出框标注# 框的类别bbox_label = each_ann['label']# 框的两点坐标bbox_keypoints = each_ann['points']bbox_keypoint_A_xy = bbox_keypoints[0]bbox_keypoint_B_xy = bbox_keypoints[1]# 左上角坐标bbox_top_left_x = int(min(bbox_keypoint_A_xy[0], bbox_keypoint_B_xy[0]))bbox_top_left_y = int(min(bbox_keypoint_A_xy[1], bbox_keypoint_B_xy[1]))# 右下角坐标bbox_bottom_right_x = int(max(bbox_keypoint_A_xy[0], bbox_keypoint_B_xy[0]))bbox_bottom_right_y = int(max(bbox_keypoint_A_xy[1], bbox_keypoint_B_xy[1]))# 画矩形:画框img_bgr = cv2.rectangle(img_bgr, (bbox_top_left_x, bbox_top_left_y), (bbox_bottom_right_x, bbox_bottom_right_y),bbox_color, bbox_thickness)# 写框类别文字:图片,文字字符串,文字左上角坐标,字体,字体大小,颜色,字体粗细img_bgr = cv2.putText(img_bgr, bbox_label, (bbox_top_left_x + bbox_labelstr['offset_x'], bbox_top_left_y + bbox_labelstr['offset_y']),cv2.FONT_HERSHEY_SIMPLEX, bbox_labelstr['font_size'], bbox_color,bbox_labelstr['font_thickness'])
plt.imshow(img_bgr[:,:,::-1])
plt.show()

标注框

可视化关键点(keypoint)标注

# 可视化关键点(keypoint)标注
# 关键点配色
kpt_color_map = {'head':{'id':0, 'color':[255,0,0], 'radius':10, 'thickness':-1},'tail':{'id':1, 'color':[0,255,0], 'radius':10, 'thickness':-1}
}# 点类别文字
kpt_labelstr = {'font_size':1,             # 字体大小'font_thickness':4,       # 字体粗细'offset_x':30,             # X 方向,文字偏移距离,向右为正'offset_y':100,            # Y 方向,文字偏移距离,向下为正
}for each_ann in labelme['shapes']:  # 遍历每一个标注if each_ann['shape_type'] == 'point':  # 筛选出关键点标注kpt_label = each_ann['label']  # 该点的类别# 该点的 XY 坐标kpt_xy = each_ann['points'][0]kpt_x, kpt_y = int(kpt_xy[0]), int(kpt_xy[1])# 该点的可视化配置kpt_color = kpt_color_map[kpt_label]['color']  # 颜色kpt_radius = kpt_color_map[kpt_label]['radius']  # 半径kpt_thickness = kpt_color_map[kpt_label]['thickness']  # 线宽(-1代表填充)# 画圆:画该关键点img_bgr = cv2.circle(img_bgr, (kpt_x, kpt_y), kpt_radius, kpt_color, kpt_thickness)# 写该点类别文字:图片,文字字符串,文字左上角坐标,字体,字体大小,颜色,字体粗细img_bgr = cv2.putText(img_bgr, kpt_label, (kpt_x + kpt_labelstr['offset_x'], kpt_y + kpt_labelstr['offset_y']),cv2.FONT_HERSHEY_SIMPLEX, kpt_labelstr['font_size'], kpt_color,kpt_labelstr['font_thickness'])plt.imshow(img_bgr[:,:,::-1])
plt.show()

在这里插入图片描述
保存可视化图片

cv2.imwrite('visualize.jpg', img_bgr)

2、划分数据集

import os
import shutil
import randomfrom tqdm import tqdmDataset_root = '../glue_134_Keypoint'
os.chdir(os.path.join(Dataset_root, 'labelme_jsons'))print('共有 {} 个 labelme 格式的 json 文件'.format(len(os.listdir())))test_frac = 0.2  # 测试集比例
random.seed(123) # 随机数种子,便于复现folder = '.'img_paths = os.listdir(folder)
random.shuffle(img_paths) # 随机打乱val_number = int(len(img_paths) * test_frac) # 测试集文件个数
train_files = img_paths[val_number:]         # 训练集文件名列表
val_files = img_paths[:val_number]           # 测试集文件名列表print('数据集文件总数', len(img_paths))
print('训练集文件个数', len(train_files))
print('测试集文件个数', len(val_files))# 创建文件夹,存放训练集的 labelme格式的 json 标注文件
train_labelme_jsons_folder = 'train_labelme_jsons'
os.mkdir(train_labelme_jsons_folder)for each in tqdm(train_files):src_path = os.path.join(folder, each)dst_path = os.path.join(train_labelme_jsons_folder, each)shutil.move(src_path, dst_path)# 创建文件夹,存放训练集的 labelme格式的 json 标注文件
val_labelme_jsons_folder = 'val_labelme_jsons'
os.mkdir(val_labelme_jsons_folder)for each in tqdm(val_files):src_path = os.path.join(folder, each)dst_path = os.path.join(val_labelme_jsons_folder, each)shutil.move(src_path, dst_path)

在这里插入图片描述

3、Lableme2COCO,将json文件转换为MS COCO格式

导入相关包

import os
import json
import numpy as np

指定数据集信息

Dataset_root = '../glue_134_Keypoint'class_list = {'supercategory': 'glue','id': 1,'name': 'glue','keypoints': ['head', 'tail'], 'skeleton': [[0, 1]]
}

函数-处理单个labelme标注json文件

def process_single_json(labelme, image_id=1):'''输入labelme的json数据,输出coco格式的每个框的关键点标注信息'''global ANN_IDcoco_annotations = []for each_ann in labelme['shapes']:  # 遍历该json文件中的所有标注if each_ann['shape_type'] == 'rectangle':  # 筛选出个体框# 个体框元数据bbox_dict = {}bbox_dict['category_id'] = 1bbox_dict['segmentation'] = []bbox_dict['iscrowd'] = 0bbox_dict['segmentation'] = []bbox_dict['image_id'] = image_idbbox_dict['id'] = ANN_ID# print(ANN_ID)ANN_ID += 1# 获取个体框坐标bbox_left_top_x = min(int(each_ann['points'][0][0]), int(each_ann['points'][1][0]))bbox_left_top_y = min(int(each_ann['points'][0][1]), int(each_ann['points'][1][1]))bbox_right_bottom_x = max(int(each_ann['points'][0][0]), int(each_ann['points'][1][0]))bbox_right_bottom_y = max(int(each_ann['points'][0][1]), int(each_ann['points'][1][1]))bbox_w = bbox_right_bottom_x - bbox_left_top_xbbox_h = bbox_right_bottom_y - bbox_left_top_ybbox_dict['bbox'] = [bbox_left_top_x, bbox_left_top_y, bbox_w, bbox_h]  # 左上角x、y、框的w、hbbox_dict['area'] = bbox_w * bbox_h# 筛选出分割多段线for each_ann in labelme['shapes']:  # 遍历所有标注if each_ann['shape_type'] == 'polygon':  # 筛选出分割多段线标注# 第一个点的坐标first_x = each_ann['points'][0][0]first_y = each_ann['points'][0][1]if (first_x > bbox_left_top_x) & (first_x < bbox_right_bottom_x) & (first_y < bbox_right_bottom_y) & (first_y > bbox_left_top_y):  # 筛选出在该个体框中的关键点bbox_dict['segmentation'] = list(map(lambda x: list(map(lambda y: round(y, 2), x)), each_ann['points']))  # 坐标保留两位小数# bbox_dict['segmentation'] = each_ann['points']# 筛选出该个体框中的所有关键点bbox_keypoints_dict = {}for each_ann in labelme['shapes']:  # 遍历所有标注if each_ann['shape_type'] == 'point':  # 筛选出关键点标注# 关键点横纵坐标x = int(each_ann['points'][0][0])y = int(each_ann['points'][0][1])label = each_ann['label']if (x > bbox_left_top_x) & (x < bbox_right_bottom_x) & (y < bbox_right_bottom_y) & (y > bbox_left_top_y):  # 筛选出在该个体框中的关键点bbox_keypoints_dict[label] = [x, y]bbox_dict['num_keypoints'] = len(bbox_keypoints_dict)# print(bbox_keypoints_dict)# 把关键点按照类别顺序排好bbox_dict['keypoints'] = []for each_class in class_list['keypoints']:if each_class in bbox_keypoints_dict:bbox_dict['keypoints'].append(bbox_keypoints_dict[each_class][0])bbox_dict['keypoints'].append(bbox_keypoints_dict[each_class][1])bbox_dict['keypoints'].append(2)  # 2-可见不遮挡 1-遮挡 0-没有点else:  # 不存在的点,一律为0bbox_dict['keypoints'].append(0)bbox_dict['keypoints'].append(0)bbox_dict['keypoints'].append(0)coco_annotations.append(bbox_dict)return coco_annotations

函数-转换当前目录下所有labelme格式的json文件

def process_folder():IMG_ID = 0ANN_ID = 0# 遍历所有 labelme 格式的 json 文件for labelme_json in os.listdir():if labelme_json.split('.')[-1] == 'json':with open(labelme_json, 'r', encoding='utf-8') as f:labelme = json.load(f)## 提取图像元数据img_dict = {}img_dict['file_name'] = labelme['imagePath']img_dict['height'] = labelme['imageHeight']img_dict['width'] = labelme['imageWidth']img_dict['id'] = IMG_IDcoco['images'].append(img_dict)## 提取框和关键点信息coco_annotations = process_single_json(labelme, image_id=IMG_ID)coco['annotations'] += coco_annotationsIMG_ID += 1print(labelme_json, '已处理完毕')else:pass

处理训练集json文件并保存和验证文件是否正常

coco = {}coco['categories'] = []
coco['categories'].append(class_list)coco['images'] = []
coco['annotations'] = []IMG_ID = 0
ANN_ID = 0path = os.path.join(Dataset_root, 'labelme_jsons', 'train_labelme_jsons')
os.chdir(path)process_folder()# 保存coco标注文件
coco_path = '../../train_coco.json'
with open(coco_path, 'w') as f:json.dump(coco, f, indent=2)os.chdir('../../')os.listdir()from pycocotools.coco import COCOmy_coco = COCO('train_coco.json')

在这里插入图片描述

处理验证集json文件并保存和验证文件是否正常

coco = {}coco['categories'] = []
coco['categories'].append(class_list)coco['images'] = []
coco['annotations'] = []IMG_ID = 0
ANN_ID = 0path = os.path.join('labelme_jsons', 'val_labelme_jsons')
os.chdir(path)process_folder()
# 保存coco标注文件
coco_path = '../../val_coco.json'
with open(coco_path, 'w') as f:json.dump(coco, f, indent=2)os.chdir('../../')my_coco = COCO('val_coco.json')

在这里插入图片描述

至此,用于mmdetection框架的MS COCO格式文件转换完成

http://www.lryc.cn/news/216738.html

相关文章:

  • 【JVM经典面试题(五十二道)】
  • 高效管理:在文件夹名称左边添加关键字,实现批量重命名
  • Leetcode1122. 数组的相对排序
  • CN考研真题知识点二轮归纳(5)
  • windows系统 生成RSA密钥对
  • 大文件分片上传并发
  • 数据结构——基于顺序表实现通讯录
  • 行业追踪,2023-11-03
  • JSPv2之El
  • 出现 gpg: cancelled by user时的处理方法
  • MySQL中表的增删改查
  • web.py python服务器两种模板template使用方法
  • Flutter 01 目录结构入门
  • Esxi安装OpenWrt
  • tuple 简易实现(C++ 模板元编程)
  • Http代理与socks5代理有何区别?如何选择?(二)
  • java中main方法和@Test注解的区别
  • C++进阶语法——STL 标准模板库(下)(Standard Template Library)【学习笔记(七)】
  • 力扣:求最长公共前缀
  • Redis入门04-消息通知
  • 关于idea使用的一些操作设置
  • CLion 2023.2.2(C ++ IDE智能代码编辑器)
  • 企业级API资产如何管理
  • Git https方式拉的代码IDEA推送代码报错
  • C++ capacity()用法总结
  • TensorFlow2.0教程1-Eager
  • Redis学习系统(持续更新中)
  • el-select获取id和name
  • 最简单的驱动程序
  • MFC String类的初始化学习