mirror of
https://github.com/RetroDECK/ES-DE.git
synced 2025-04-10 19:15:13 +00:00
Added proper frame drop support to the FFmpeg video player.
Also made multiple large optimizations.
This commit is contained in:
parent
b4492abccd
commit
c5a1555de3
|
@ -16,6 +16,7 @@
|
||||||
#include "resources/TextureResource.h"
|
#include "resources/TextureResource.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <iomanip>
|
||||||
|
|
||||||
enum AVHWDeviceType VideoFFmpegComponent::sDeviceType = AV_HWDEVICE_TYPE_NONE;
|
enum AVHWDeviceType VideoFFmpegComponent::sDeviceType = AV_HWDEVICE_TYPE_NONE;
|
||||||
enum AVPixelFormat VideoFFmpegComponent::sPixelFormat = AV_PIX_FMT_NONE;
|
enum AVPixelFormat VideoFFmpegComponent::sPixelFormat = AV_PIX_FMT_NONE;
|
||||||
|
@ -165,17 +166,22 @@ void VideoFFmpegComponent::render(const glm::mat4& parentTrans)
|
||||||
mPictureMutex.lock();
|
mPictureMutex.lock();
|
||||||
|
|
||||||
if (!mOutputPicture.hasBeenRendered) {
|
if (!mOutputPicture.hasBeenRendered) {
|
||||||
// Copy the contents of mOutputPicture to a temporary vector in order to call
|
// Move the contents of mOutputPicture to a temporary vector in order to call
|
||||||
// initFromPixels() only after the mutex unlock. This significantly reduces the
|
// initFromPixels() only after the mutex unlock. This significantly reduces the
|
||||||
// lock waits in outputFrames().
|
// lock waits in outputFrames().
|
||||||
size_t pictureSize = mOutputPicture.pictureRGBA.size();
|
size_t pictureSize = mOutputPicture.pictureRGBA.size();
|
||||||
std::vector<uint8_t> tempPictureRGBA(pictureSize);
|
std::vector<uint8_t> tempPictureRGBA;
|
||||||
int pictureWidth = 0;
|
int pictureWidth = 0;
|
||||||
int pictureHeight = 0;
|
int pictureHeight = 0;
|
||||||
|
|
||||||
if (pictureSize > 0) {
|
if (pictureSize > 0) {
|
||||||
tempPictureRGBA.insert(tempPictureRGBA.begin(), mOutputPicture.pictureRGBA.begin(),
|
tempPictureRGBA.insert(tempPictureRGBA.begin(),
|
||||||
|
std::make_move_iterator(mOutputPicture.pictureRGBA.begin()),
|
||||||
|
std::make_move_iterator(mOutputPicture.pictureRGBA.end()));
|
||||||
|
|
||||||
|
mOutputPicture.pictureRGBA.erase(mOutputPicture.pictureRGBA.begin(),
|
||||||
mOutputPicture.pictureRGBA.end());
|
mOutputPicture.pictureRGBA.end());
|
||||||
|
|
||||||
pictureWidth = mOutputPicture.width;
|
pictureWidth = mOutputPicture.width;
|
||||||
pictureHeight = mOutputPicture.height;
|
pictureHeight = mOutputPicture.height;
|
||||||
|
|
||||||
|
@ -528,7 +534,21 @@ void VideoFFmpegComponent::readFrames()
|
||||||
if (mVideoFrameQueue.size() > 300 || mAudioFrameQueue.size() > 600)
|
if (mVideoFrameQueue.size() > 300 || mAudioFrameQueue.size() > 600)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
int readLoops = 1;
|
||||||
|
|
||||||
|
// If we can't keep up the audio processing, then drop video frames as it's much worse
|
||||||
|
// to have stuttering audio than a lower video framerate.
|
||||||
|
if (mAudioStreamIndex >= 0 && mAudioFrameCount > mAudioTargetQueueSize / 2) {
|
||||||
|
if (static_cast<int>(mAudioFrameQueue.size()) < mAudioTargetQueueSize / 6)
|
||||||
|
readLoops = 5;
|
||||||
|
else if (static_cast<int>(mAudioFrameQueue.size()) < mAudioTargetQueueSize / 4)
|
||||||
|
readLoops = 3;
|
||||||
|
else if (static_cast<int>(mAudioFrameQueue.size()) < mAudioTargetQueueSize / 2)
|
||||||
|
readLoops = 2;
|
||||||
|
}
|
||||||
|
|
||||||
if (mVideoCodecContext && mFormatContext) {
|
if (mVideoCodecContext && mFormatContext) {
|
||||||
|
for (int i = 0; i < readLoops; i++) {
|
||||||
if (static_cast<int>(mVideoFrameQueue.size()) < mVideoTargetQueueSize ||
|
if (static_cast<int>(mVideoFrameQueue.size()) < mVideoTargetQueueSize ||
|
||||||
(mAudioStreamIndex >= 0 &&
|
(mAudioStreamIndex >= 0 &&
|
||||||
static_cast<int>(mAudioFrameQueue.size()) < mAudioTargetQueueSize)) {
|
static_cast<int>(mAudioFrameQueue.size()) < mAudioTargetQueueSize)) {
|
||||||
|
@ -538,18 +558,29 @@ void VideoFFmpegComponent::readFrames()
|
||||||
!avcodec_receive_frame(mVideoCodecContext, mVideoFrame)) {
|
!avcodec_receive_frame(mVideoCodecContext, mVideoFrame)) {
|
||||||
|
|
||||||
int returnValue = 0;
|
int returnValue = 0;
|
||||||
|
mVideoFrameReadCount++;
|
||||||
|
|
||||||
if (mSWDecoder) {
|
if (mSWDecoder) {
|
||||||
|
// Drop the frame if necessary.
|
||||||
|
if (i == 0 || mAudioFrameCount == 0) {
|
||||||
returnValue = av_buffersrc_add_frame_flags(
|
returnValue = av_buffersrc_add_frame_flags(
|
||||||
mVBufferSrcContext, mVideoFrame, AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT);
|
mVBufferSrcContext, mVideoFrame,
|
||||||
|
AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
mVideoFrameDroppedCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
if (i == 0 || mAudioFrameCount == 0) {
|
||||||
AVFrame* destFrame = nullptr;
|
AVFrame* destFrame = nullptr;
|
||||||
destFrame = av_frame_alloc();
|
destFrame = av_frame_alloc();
|
||||||
|
|
||||||
if (mVideoFrame->format == sPixelFormat) {
|
if (mVideoFrame->format == sPixelFormat) {
|
||||||
if (av_hwframe_transfer_data(destFrame, mVideoFrame, 0) < 0) {
|
if (av_hwframe_transfer_data(destFrame, mVideoFrame, 0) <
|
||||||
LOG(LogError) << "VideoFFmpegComponent::readFrames(): "
|
0) {
|
||||||
|
LOG(LogError)
|
||||||
|
<< "VideoFFmpegComponent::readFrames(): "
|
||||||
"Couldn't transfer decoded video frame to "
|
"Couldn't transfer decoded video frame to "
|
||||||
"system memory";
|
"system memory";
|
||||||
av_frame_free(&destFrame);
|
av_frame_free(&destFrame);
|
||||||
|
@ -560,7 +591,8 @@ void VideoFFmpegComponent::readFrames()
|
||||||
destFrame->pts = mVideoFrame->pts;
|
destFrame->pts = mVideoFrame->pts;
|
||||||
destFrame->pkt_dts = mVideoFrame->pkt_dts;
|
destFrame->pkt_dts = mVideoFrame->pkt_dts;
|
||||||
destFrame->pict_type = mVideoFrame->pict_type;
|
destFrame->pict_type = mVideoFrame->pict_type;
|
||||||
destFrame->chroma_location = mVideoFrame->chroma_location;
|
destFrame->chroma_location =
|
||||||
|
mVideoFrame->chroma_location;
|
||||||
destFrame->pkt_pos = mVideoFrame->pkt_pos;
|
destFrame->pkt_pos = mVideoFrame->pkt_pos;
|
||||||
destFrame->pkt_duration = mVideoFrame->pkt_duration;
|
destFrame->pkt_duration = mVideoFrame->pkt_duration;
|
||||||
destFrame->pkt_size = mVideoFrame->pkt_size;
|
destFrame->pkt_size = mVideoFrame->pkt_size;
|
||||||
|
@ -572,9 +604,14 @@ void VideoFFmpegComponent::readFrames()
|
||||||
}
|
}
|
||||||
|
|
||||||
returnValue = av_buffersrc_add_frame_flags(
|
returnValue = av_buffersrc_add_frame_flags(
|
||||||
mVBufferSrcContext, destFrame, AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT);
|
mVBufferSrcContext, destFrame,
|
||||||
|
AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT);
|
||||||
av_frame_free(&destFrame);
|
av_frame_free(&destFrame);
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
|
mVideoFrameDroppedCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (returnValue < 0) {
|
if (returnValue < 0) {
|
||||||
LOG(LogError) << "VideoFFmpegComponent::readFrames(): "
|
LOG(LogError) << "VideoFFmpegComponent::readFrames(): "
|
||||||
|
@ -614,6 +651,11 @@ void VideoFFmpegComponent::readFrames()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
|
// The target queue sizes have been reached.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (readFrameReturn < 0)
|
if (readFrameReturn < 0)
|
||||||
|
@ -649,10 +691,11 @@ void VideoFFmpegComponent::getProcessedFrames()
|
||||||
|
|
||||||
int bufferSize = mVideoFrameResampled->width * mVideoFrameResampled->height * 4;
|
int bufferSize = mVideoFrameResampled->width * mVideoFrameResampled->height * 4;
|
||||||
|
|
||||||
currFrame.frameRGBA.insert(currFrame.frameRGBA.begin(), &mVideoFrameResampled->data[0][0],
|
currFrame.frameRGBA.insert(
|
||||||
&mVideoFrameResampled->data[0][bufferSize]);
|
currFrame.frameRGBA.begin(), std::make_move_iterator(&mVideoFrameResampled->data[0][0]),
|
||||||
|
std::make_move_iterator(&mVideoFrameResampled->data[0][bufferSize]));
|
||||||
|
|
||||||
mVideoFrameQueue.push(currFrame);
|
mVideoFrameQueue.push(std::move(currFrame));
|
||||||
av_frame_unref(mVideoFrameResampled);
|
av_frame_unref(mVideoFrameResampled);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -680,7 +723,7 @@ void VideoFFmpegComponent::getProcessedFrames()
|
||||||
&mAudioFrameResampled->data[0][0],
|
&mAudioFrameResampled->data[0][0],
|
||||||
&mAudioFrameResampled->data[0][bufferSize]);
|
&mAudioFrameResampled->data[0][bufferSize]);
|
||||||
|
|
||||||
mAudioFrameQueue.push(currFrame);
|
mAudioFrameQueue.push(std::move(currFrame));
|
||||||
av_frame_unref(mAudioFrameResampled);
|
av_frame_unref(mAudioFrameResampled);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -751,6 +794,14 @@ void VideoFFmpegComponent::outputFrames()
|
||||||
LOG(LogDebug) << "Total video frames processed / video frame queue size: "
|
LOG(LogDebug) << "Total video frames processed / video frame queue size: "
|
||||||
<< mVideoFrameCount << " / "
|
<< mVideoFrameCount << " / "
|
||||||
<< std::to_string(mVideoFrameQueue.size());
|
<< std::to_string(mVideoFrameQueue.size());
|
||||||
|
if (mVideoFrameDroppedCount > 0) {
|
||||||
|
LOG(LogDebug) << "Video frames dropped: " << mVideoFrameDroppedCount << " of "
|
||||||
|
<< mVideoFrameReadCount << " (" << std::setprecision(2)
|
||||||
|
<< (static_cast<float>(mVideoFrameDroppedCount) /
|
||||||
|
static_cast<float>(mVideoFrameReadCount)) *
|
||||||
|
100.0f
|
||||||
|
<< "%)";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mPictureMutex.lock();
|
mPictureMutex.lock();
|
||||||
|
@ -1136,6 +1187,8 @@ void VideoFFmpegComponent::startVideo()
|
||||||
mEndOfVideo = false;
|
mEndOfVideo = false;
|
||||||
mVideoFrameCount = 0;
|
mVideoFrameCount = 0;
|
||||||
mAudioFrameCount = 0;
|
mAudioFrameCount = 0;
|
||||||
|
mVideoFrameReadCount = 0;
|
||||||
|
mVideoFrameDroppedCount = 0;
|
||||||
mOutputPicture = {};
|
mOutputPicture = {};
|
||||||
|
|
||||||
// Get an empty texture for rendering the video.
|
// Get an empty texture for rendering the video.
|
||||||
|
|
|
@ -158,6 +158,8 @@ private:
|
||||||
|
|
||||||
int mAudioFrameCount;
|
int mAudioFrameCount;
|
||||||
int mVideoFrameCount;
|
int mVideoFrameCount;
|
||||||
|
int mVideoFrameReadCount;
|
||||||
|
int mVideoFrameDroppedCount;
|
||||||
|
|
||||||
double mAccumulatedTime;
|
double mAccumulatedTime;
|
||||||
bool mStartTimeAccumulation;
|
bool mStartTimeAccumulation;
|
||||||
|
|
Loading…
Reference in a new issue