With this I managed to save the video, but the problem I get is that the color 
is wrong, and the video length doesn't match. The output video tends to be 
shorter and much faster. I fixed the color issue by converting the frame back 
to BGR before writing it:

    cap = cv2.VideoCapture(folder_raw + video_fname)
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))

    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))

    size = (frame_width, frame_height)

    fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')

    result = cv2.VideoWriter(folder_result + 'demo_' + video_fname,
                        fourcc, fps, size)

    start = time.time()

    while(True):
        end = time.time()
        sec = end-start
        cap.set(cv2.CAP_PROP_POS_FRAMES, round(fps * sec))
        ret, frame = cap.read()
    
        if(ret):
            frame = mx.nd.array(cv2.cvtColor(frame, 
cv2.COLOR_BGR2RGB)).astype('uint8')
            x, frame = 
gluoncv.data.transforms.presets.ssd.transform_test(frame, short=240)
            x = x.as_in_context(context)
        
            class_IDs, scores, bounding_boxs = detector(x)
            pose_input, upscale_bbox = detector_to_alpha_pose(frame,
                                                              class_IDs,
                                                              scores,
                                                              bounding_boxs)
        
            if upscale_bbox is not None:
                predicted_heatmap = estimator(pose_input.as_in_context(context))
                pred_coords, confidence = heatmap_to_coord(predicted_heatmap, 
upscale_bbox)
                img = cv_plot_keypoints(frame, pred_coords,
                                                       confidence, class_IDs,
                                                       bounding_boxs, scores,
                                                       box_thresh=0.5, 
keypoint_thresh=0.1)
            
                img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                img = cv2.resize(img, size)
                result.write(img)
            else:
                frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
                result.write(frame)
        else:
            break
        
        if cv2.waitKey(1) == 27: # ESC to stop
            break

    cap.release()
    result.release()

    cv2.destroyAllWindows()
    print("The video was successfully saved.")





---
[Visit 
Topic](https://discuss.mxnet.apache.org/t/how-to-run-pose-estimation-alphapose-with-video-instead-of-cam/6645/5)
 or reply to this email to respond.

You are receiving this because you enabled mailing list mode.

To unsubscribe from these emails, [click 
here](https://discuss.mxnet.apache.org/email/unsubscribe/946538f12410e79361979a5d8f1de0a29e9d41a54162219b42deaa51b12220a7).

Reply via email to