Fixed a video/audio synchronization issue in VideoFFmpegComponent.

This commit is contained in:
Leon Styhre 2021-05-10 18:54:17 +02:00
parent d9eda97ca6
commit a1209dfc5d

View file

@ -200,17 +200,14 @@ void VideoFFmpegComponent::readFrames()
int allocatedSize = 0; int allocatedSize = 0;
// The pts value is the presentation time, i.e. the time stamp when // The pts value is the presentation time, i.e. the time stamp when
// the frame (picture) should be displayed. // the frame (picture) should be displayed. The packet dts value is
double pts = 0.0l; // used for the basis of the calculation as per the recommendation
// This is needed to avoid a potential divide by zero. // in the FFmpeg documentation for the av_read_frame function.
if (mVideoFrame->pkt_duration) double pts = static_cast<double>(mPacket->dts) *
pts = static_cast<double>(mVideoFrame->pts) * mVideoTimeBase / av_q2d(mVideoStream->time_base);
static_cast<double>(mVideoFrame->pkt_duration);
else
pts = static_cast<double>(mVideoFrame->pts) * mVideoTimeBase;
// Conversion using libswscale. Bicubic interpolation gives a good // Conversion using libswscale. Bicubic interpolation gives a good
// balance between speed and image quality. // balance between speed and quality.
struct SwsContext* conversionContext = sws_getContext( struct SwsContext* conversionContext = sws_getContext(
mVideoCodecContext->coded_width, mVideoCodecContext->coded_width,
mVideoCodecContext->coded_height, mVideoCodecContext->coded_height,