|
30 | 30 | [170, 0, 255], [255, 0, 255]] |
31 | 31 |
|
32 | 32 |
|
33 | | -def plot_keypoint(image, coordinates, confidence, keypoint_thresh=0.3): |
34 | | - # USE cv2 |
35 | | - joint_visible = confidence[:, :, 0] > keypoint_thresh |
36 | | - coordinates = coco_h36m(coordinates) |
37 | | - for i in range(coordinates.shape[0]): |
38 | | - pts = coordinates[i] |
39 | | - |
40 | | - for joint in pts: |
41 | | - cv2.circle(image, (int(joint[0]), int(joint[1])), 8, (255, 255, 255), 1) |
42 | | - |
43 | | - for color_i, jp in zip(colors, h36m_pairs): |
44 | | - if joint_visible[i, jp[0]] and joint_visible[i, jp[1]]: |
45 | | - pt0 = pts[jp, 0] |
46 | | - pt1 = pts[jp, 1] |
47 | | - pt0_0, pt0_1, pt1_0, pt1_1 = int(pt0[0]), int(pt0[1]), int(pt1[0]), int(pt1[1]) |
48 | | - |
49 | | - cv2.line(image, (pt0_0, pt1_0), (pt0_1, pt1_1), color_i, 6) |
50 | | - # cv2.circle(image,(pt0_0, pt0_1), 2, color_i, thickness=-1) |
51 | | - # cv2.circle(image,(pt1_0, pt1_1), 2, color_i, thickness=-1) |
52 | | - return image |
53 | | - |
54 | | - |
55 | | -def write(x, img): |
56 | | - x = [int(i) for i in x] |
57 | | - c1 = tuple(x[0:2]) |
58 | | - c2 = tuple(x[2:4]) |
59 | | - |
60 | | - color = [0, 97, 255] |
61 | | - label = 'People {}'.format(x[-1]) |
62 | | - cv2.rectangle(img, c1, c2, color, 2) |
63 | | - t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1, 1)[0] |
64 | | - c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4 |
65 | | - cv2.rectangle(img, c1, c2, [0, 128, 255], -1) |
66 | | - cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225, 255, 255], 1) |
67 | | - return img |
68 | | - |
69 | | - |
70 | | -def load_json(file_path): |
71 | | - with open(file_path, 'r') as fr: |
72 | | - video_info = json.load(fr) |
73 | | - |
74 | | - label = video_info['label'] |
75 | | - label_index = video_info['label_index'] |
76 | | - |
77 | | - num_frames = video_info['data'][-1]['frame_index'] |
78 | | - keypoints = np.zeros((2, num_frames, 17, 2), dtype=np.float32) # (M, T, N, 2) |
79 | | - scores = np.zeros((2, num_frames, 17), dtype=np.float32) # (M, T, N) |
80 | | - |
81 | | - for frame_info in video_info['data']: |
82 | | - frame_index = frame_info['frame_index'] |
83 | | - |
84 | | - for index, skeleton_info in enumerate(frame_info['skeleton']): |
85 | | - pose = skeleton_info['pose'] |
86 | | - score = skeleton_info['score'] |
87 | | - bbox = skeleton_info['bbox'] |
88 | | - |
89 | | - if len(bbox) == 0 or index+1 > 2: |
90 | | - continue |
91 | | - |
92 | | - pose = np.asarray(pose, dtype=np.float32) |
93 | | - score = np.asarray(score, dtype=np.float32) |
94 | | - score = score.reshape(-1) |
95 | | - |
96 | | - keypoints[index, frame_index-1] = pose |
97 | | - scores[index, frame_index-1] = score |
98 | | - |
99 | | - new_kpts = [] |
100 | | - for i in range(keypoints.shape[0]): |
101 | | - kps = keypoints[i] |
102 | | - if np.sum(kps) != 0.: |
103 | | - new_kpts.append(kps) |
104 | | - |
105 | | - new_kpts = np.asarray(new_kpts, dtype=np.float32) |
106 | | - scores = np.asarray(scores, dtype=np.float32) |
107 | | - scores = scores[:, :, :, np.newaxis] |
108 | | - return new_kpts, scores, label, label_index |
109 | | - |
110 | 33 |
|
111 | 34 | def box_to_center_scale(box, model_image_width, model_image_height): |
112 | 35 | """convert a box to center,scale information required for pose transformation |
|
0 commit comments