2021-05-09 20:52:26 +00:00
|
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
//
|
|
|
|
// EmulationStation Desktop Edition
|
|
|
|
// VideoFFmpegComponent.cpp
|
|
|
|
//
|
|
|
|
// Video player based on FFmpeg.
|
|
|
|
//
|
|
|
|
|
2021-08-18 16:52:41 +00:00
|
|
|
#define DEBUG_VIDEO false
|
|
|
|
|
2021-05-09 20:52:26 +00:00
|
|
|
#include "components/VideoFFmpegComponent.h"
|
|
|
|
|
|
|
|
#include "AudioManager.h"
|
|
|
|
#include "Settings.h"
|
|
|
|
#include "Window.h"
|
2021-07-07 18:31:46 +00:00
|
|
|
#include "resources/TextureResource.h"
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-03-14 18:51:48 +00:00
|
|
|
#include <SDL2/SDL.h>
|
|
|
|
|
2021-08-18 16:52:41 +00:00
|
|
|
#include <algorithm>
|
2021-11-07 17:14:38 +00:00
|
|
|
#include <iomanip>
|
2021-07-04 09:36:16 +00:00
|
|
|
|
2022-09-30 21:20:36 +00:00
|
|
|
#if LIBAVUTIL_VERSION_MAJOR >= 57 && LIBAVUTIL_VERSION_MINOR >= 28
|
|
|
|
// FFmpeg 5.1 and above.
|
|
|
|
#define CHANNELS ch_layout.nb_channels
|
|
|
|
#else
|
|
|
|
#define CHANNELS channels
|
|
|
|
#endif
|
|
|
|
|
2022-01-19 17:01:54 +00:00
|
|
|
VideoFFmpegComponent::VideoFFmpegComponent()
|
2022-03-14 18:51:48 +00:00
|
|
|
: mRenderer {Renderer::getInstance()}
|
|
|
|
, mRectangleOffset {0.0f, 0.0f}
|
2022-02-19 19:31:54 +00:00
|
|
|
, mFrameProcessingThread {nullptr}
|
2022-01-16 17:18:28 +00:00
|
|
|
, mFormatContext {nullptr}
|
|
|
|
, mVideoStream {nullptr}
|
|
|
|
, mAudioStream {nullptr}
|
|
|
|
, mVideoCodec {nullptr}
|
|
|
|
, mAudioCodec {nullptr}
|
|
|
|
, mHardwareCodec {nullptr}
|
|
|
|
, mHwContext {nullptr}
|
|
|
|
, mVideoCodecContext {nullptr}
|
|
|
|
, mAudioCodecContext {nullptr}
|
|
|
|
, mVBufferSrcContext {nullptr}
|
|
|
|
, mVBufferSinkContext {nullptr}
|
|
|
|
, mVFilterGraph {nullptr}
|
|
|
|
, mVFilterInputs {nullptr}
|
|
|
|
, mVFilterOutputs {nullptr}
|
|
|
|
, mABufferSrcContext {nullptr}
|
|
|
|
, mABufferSinkContext {nullptr}
|
|
|
|
, mAFilterGraph {nullptr}
|
|
|
|
, mAFilterInputs {nullptr}
|
|
|
|
, mAFilterOutputs {nullptr}
|
|
|
|
, mVideoTargetQueueSize {0}
|
|
|
|
, mAudioTargetQueueSize {0}
|
|
|
|
, mVideoTimeBase {0.0l}
|
|
|
|
, mAccumulatedTime {0.0l}
|
|
|
|
, mStartTimeAccumulation {false}
|
|
|
|
, mDecodedFrame {false}
|
|
|
|
, mEndOfVideo {false}
|
2021-05-09 20:52:26 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2022-11-07 22:58:22 +00:00
|
|
|
void VideoFFmpegComponent::setResize(const float width, const float height)
|
2021-05-09 20:52:26 +00:00
|
|
|
{
|
|
|
|
// This resize function is used when stretching videos to full screen in the video screensaver.
|
2022-01-16 11:09:55 +00:00
|
|
|
mTargetSize = glm::vec2 {width, height};
|
2021-05-09 20:52:26 +00:00
|
|
|
mTargetIsMax = false;
|
2022-11-07 22:58:22 +00:00
|
|
|
mStaticImage.setResize(mTargetSize);
|
2021-05-09 20:52:26 +00:00
|
|
|
resize();
|
|
|
|
}
|
|
|
|
|
|
|
|
void VideoFFmpegComponent::setMaxSize(float width, float height)
|
|
|
|
{
|
|
|
|
// This resize function is used in most instances, such as non-stretched video screensaver
|
|
|
|
// and the gamelist videos.
|
2022-01-16 11:09:55 +00:00
|
|
|
mTargetSize = glm::vec2 {width, height};
|
2021-05-09 20:52:26 +00:00
|
|
|
mTargetIsMax = true;
|
|
|
|
mStaticImage.setMaxSize(width, height);
|
|
|
|
resize();
|
|
|
|
}
|
|
|
|
|
|
|
|
void VideoFFmpegComponent::resize()
|
|
|
|
{
|
|
|
|
if (!mTexture)
|
|
|
|
return;
|
|
|
|
|
2022-01-16 11:09:55 +00:00
|
|
|
const glm::vec2 textureSize {static_cast<float>(mVideoWidth), static_cast<float>(mVideoHeight)};
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
if (textureSize == glm::vec2 {0.0f, 0.0f})
|
2021-05-09 20:52:26 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (mTargetIsMax) {
|
|
|
|
mSize = textureSize;
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
glm::vec2 resizeScale {mTargetSize.x / mSize.x, mTargetSize.y / mSize.y};
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-08-16 16:25:01 +00:00
|
|
|
if (resizeScale.x < resizeScale.y) {
|
|
|
|
mSize.x *= resizeScale.x;
|
|
|
|
mSize.y *= resizeScale.x;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
else {
|
2021-08-16 16:25:01 +00:00
|
|
|
mSize.x *= resizeScale.y;
|
|
|
|
mSize.y *= resizeScale.y;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
|
2021-08-16 16:25:01 +00:00
|
|
|
mSize.x = (mSize.y / textureSize.y) * textureSize.x;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// If both components are set, we just stretch.
|
|
|
|
// If no components are set, we don't resize at all.
|
2022-09-29 20:32:57 +00:00
|
|
|
mSize = mTargetSize == glm::vec2 {0.0f, 0.0f} ? textureSize : mTargetSize;
|
2021-05-09 20:52:26 +00:00
|
|
|
|
|
|
|
// If only one component is set, we resize in a way that maintains aspect ratio.
|
2021-08-16 16:25:01 +00:00
|
|
|
if (!mTargetSize.x && mTargetSize.y) {
|
2022-10-16 14:40:52 +00:00
|
|
|
mSize.y = mTargetSize.y;
|
2021-08-16 16:25:01 +00:00
|
|
|
mSize.x = (mSize.y / textureSize.y) * textureSize.x;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
2021-08-16 16:25:01 +00:00
|
|
|
else if (mTargetSize.x && !mTargetSize.y) {
|
2022-10-16 14:40:52 +00:00
|
|
|
mSize.y = (mTargetSize.x / textureSize.x) * textureSize.y;
|
2021-08-16 16:25:01 +00:00
|
|
|
mSize.x = (mSize.y / textureSize.y) * textureSize.x;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
onSizeChanged();
|
|
|
|
}
|
|
|
|
|
2021-08-15 17:30:31 +00:00
|
|
|
void VideoFFmpegComponent::render(const glm::mat4& parentTrans)
|
2021-05-09 20:52:26 +00:00
|
|
|
{
|
2022-08-18 20:38:09 +00:00
|
|
|
if (!mVisible || mOpacity == 0.0f || mThemeOpacity == 0.0f)
|
2022-02-12 16:46:31 +00:00
|
|
|
return;
|
|
|
|
|
2022-02-19 16:04:23 +00:00
|
|
|
if (!mHasVideo && mStaticImagePath == "")
|
|
|
|
return;
|
|
|
|
|
2022-01-16 11:09:55 +00:00
|
|
|
glm::mat4 trans {parentTrans * getTransform()};
|
2021-05-09 20:52:26 +00:00
|
|
|
GuiComponent::renderChildren(trans);
|
|
|
|
|
|
|
|
if (mIsPlaying && mFormatContext) {
|
|
|
|
Renderer::Vertex vertices[4];
|
2022-09-16 21:30:51 +00:00
|
|
|
mRenderer->setMatrix(trans);
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-02-12 16:46:31 +00:00
|
|
|
unsigned int rectColor {0x000000FF};
|
|
|
|
|
2022-09-16 21:54:13 +00:00
|
|
|
if (!mGeneralFade && mThemeOpacity != 1.0f)
|
|
|
|
rectColor = static_cast<int>(mThemeOpacity * 255.0f);
|
|
|
|
if (mGeneralFade && (mOpacity != 1.0f || mThemeOpacity != 1.0f))
|
2022-08-18 20:38:09 +00:00
|
|
|
rectColor = static_cast<int>(mFadeIn * mOpacity * mThemeOpacity * 255.0f);
|
2022-02-12 16:46:31 +00:00
|
|
|
|
2021-05-09 20:52:26 +00:00
|
|
|
// Render the black rectangle behind the video.
|
|
|
|
if (mVideoRectangleCoords.size() == 4) {
|
2022-03-14 18:51:48 +00:00
|
|
|
mRenderer->drawRect(mVideoRectangleCoords[0], mVideoRectangleCoords[1],
|
|
|
|
mVideoRectangleCoords[2], mVideoRectangleCoords[3], // Line break.
|
|
|
|
rectColor, rectColor);
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
|
2022-03-11 22:17:04 +00:00
|
|
|
// This is needed to avoid a slight gap before the video starts playing.
|
|
|
|
if (!mDecodedFrame)
|
|
|
|
return;
|
|
|
|
|
2021-07-07 18:31:46 +00:00
|
|
|
// clang-format off
|
2022-03-11 22:17:04 +00:00
|
|
|
vertices[0] = {{0.0f + mRectangleOffset.x, 0.0f + mRectangleOffset.y }, {0.0f, 0.0f}, 0xFFFFFFFF};
|
|
|
|
vertices[1] = {{0.0f + mRectangleOffset.x, mSize.y + mRectangleOffset.y }, {0.0f, 1.0f}, 0xFFFFFFFF};
|
|
|
|
vertices[2] = {{mSize.x + mRectangleOffset.x, 0.0f + + mRectangleOffset.y }, {1.0f, 0.0f}, 0xFFFFFFFF};
|
|
|
|
vertices[3] = {{mSize.x + mRectangleOffset.x, mSize.y + + mRectangleOffset.y}, {1.0f, 1.0f}, 0xFFFFFFFF};
|
2021-07-07 18:31:46 +00:00
|
|
|
// clang-format on
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-12-12 20:51:27 +00:00
|
|
|
vertices[0].color = mColorShift;
|
|
|
|
vertices[1].color = mColorGradientHorizontal ? mColorShift : mColorShiftEnd;
|
|
|
|
vertices[2].color = mColorGradientHorizontal ? mColorShiftEnd : mColorShift;
|
|
|
|
vertices[3].color = mColorShiftEnd;
|
|
|
|
|
2021-05-09 20:52:26 +00:00
|
|
|
// Round vertices.
|
2021-11-17 16:48:49 +00:00
|
|
|
for (int i = 0; i < 4; ++i)
|
2022-03-11 22:51:41 +00:00
|
|
|
vertices[i].position = glm::round(vertices[i].position);
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-03-17 18:33:09 +00:00
|
|
|
if (mFadeIn < 1.0f || mThemeOpacity < 1.0f)
|
2022-03-11 22:17:04 +00:00
|
|
|
vertices->opacity = mFadeIn * mThemeOpacity;
|
|
|
|
|
2022-12-14 19:17:41 +00:00
|
|
|
vertices->brightness = mBrightness;
|
2022-03-17 18:33:09 +00:00
|
|
|
vertices->saturation = mSaturation * mThemeSaturation;
|
2022-03-11 22:51:41 +00:00
|
|
|
vertices->dimming = mDimming;
|
2021-11-16 16:49:05 +00:00
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
std::unique_lock<std::mutex> pictureLock {mPictureMutex};
|
2021-05-11 15:35:55 +00:00
|
|
|
|
2021-05-19 17:47:40 +00:00
|
|
|
if (!mOutputPicture.hasBeenRendered) {
|
2021-11-07 17:14:38 +00:00
|
|
|
// Move the contents of mOutputPicture to a temporary vector in order to call
|
2021-05-19 17:47:40 +00:00
|
|
|
// initFromPixels() only after the mutex unlock. This significantly reduces the
|
|
|
|
// lock waits in outputFrames().
|
2022-09-29 20:32:57 +00:00
|
|
|
size_t pictureSize {mOutputPicture.pictureRGBA.size()};
|
2021-11-07 17:14:38 +00:00
|
|
|
std::vector<uint8_t> tempPictureRGBA;
|
2022-09-29 20:32:57 +00:00
|
|
|
int pictureWidth {0};
|
|
|
|
int pictureHeight {0};
|
2021-05-19 17:47:40 +00:00
|
|
|
|
|
|
|
if (pictureSize > 0) {
|
2021-11-07 17:14:38 +00:00
|
|
|
tempPictureRGBA.insert(tempPictureRGBA.begin(),
|
|
|
|
std::make_move_iterator(mOutputPicture.pictureRGBA.begin()),
|
|
|
|
std::make_move_iterator(mOutputPicture.pictureRGBA.end()));
|
|
|
|
|
|
|
|
mOutputPicture.pictureRGBA.erase(mOutputPicture.pictureRGBA.begin(),
|
|
|
|
mOutputPicture.pictureRGBA.end());
|
|
|
|
|
2021-05-19 17:47:40 +00:00
|
|
|
pictureWidth = mOutputPicture.width;
|
|
|
|
pictureHeight = mOutputPicture.height;
|
|
|
|
|
|
|
|
mOutputPicture.hasBeenRendered = true;
|
|
|
|
}
|
|
|
|
|
2021-11-15 21:43:06 +00:00
|
|
|
pictureLock.unlock();
|
2021-05-11 15:35:55 +00:00
|
|
|
|
2021-05-19 17:47:40 +00:00
|
|
|
if (pictureSize > 0) {
|
|
|
|
// Build a texture for the video frame.
|
|
|
|
mTexture->initFromPixels(&tempPictureRGBA.at(0), pictureWidth, pictureHeight);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2021-11-15 21:43:06 +00:00
|
|
|
pictureLock.unlock();
|
2021-05-19 17:47:40 +00:00
|
|
|
}
|
2021-05-11 15:35:55 +00:00
|
|
|
|
2022-02-19 16:04:23 +00:00
|
|
|
if (mTexture != nullptr)
|
|
|
|
mTexture->bind();
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-05-16 11:12:31 +00:00
|
|
|
// Render scanlines if this option is enabled. However, if this is the media viewer
|
|
|
|
// or the video screensaver, then skip this as the scanline rendering is then handled
|
2022-03-12 13:22:27 +00:00
|
|
|
// in those modules as a post-processing step.
|
2022-02-12 16:46:31 +00:00
|
|
|
if (!mScreensaverMode && !mMediaViewerMode) {
|
2022-08-18 20:38:09 +00:00
|
|
|
vertices[0].opacity = mFadeIn * mOpacity * mThemeOpacity;
|
2022-02-12 16:46:31 +00:00
|
|
|
if ((mLegacyTheme && Settings::getInstance()->getBool("GamelistVideoScanlines")) ||
|
2022-02-15 21:13:11 +00:00
|
|
|
(!mLegacyTheme && mRenderScanlines)) {
|
2022-03-14 21:30:24 +00:00
|
|
|
vertices[0].shaders = Renderer::Shader::SCANLINES;
|
2022-02-15 21:13:11 +00:00
|
|
|
}
|
2022-02-12 16:46:31 +00:00
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-10-27 22:08:41 +00:00
|
|
|
mRenderer->drawTriangleStrips(&vertices[0], 4, Renderer::BlendFactor::SRC_ALPHA,
|
|
|
|
Renderer::BlendFactor::ONE_MINUS_SRC_ALPHA);
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
else {
|
2022-01-29 17:41:22 +00:00
|
|
|
if (mVisible)
|
|
|
|
VideoComponent::renderSnapshot(parentTrans);
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
void VideoFFmpegComponent::updatePlayer()
|
2021-05-09 20:52:26 +00:00
|
|
|
{
|
2022-02-19 16:04:23 +00:00
|
|
|
if (mPaused || !mFormatContext)
|
2021-05-09 20:52:26 +00:00
|
|
|
return;
|
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
// Output any audio that has been added by the processing thread.
|
2022-09-29 20:32:57 +00:00
|
|
|
std::unique_lock<std::mutex> audioLock {mAudioMutex};
|
2021-05-29 08:58:51 +00:00
|
|
|
if (mOutputAudio.size()) {
|
2021-11-15 21:43:06 +00:00
|
|
|
AudioManager::getInstance().processStream(&mOutputAudio.at(0),
|
|
|
|
static_cast<unsigned int>(mOutputAudio.size()));
|
2021-05-29 08:58:51 +00:00
|
|
|
mOutputAudio.clear();
|
|
|
|
}
|
|
|
|
|
2021-05-09 20:52:26 +00:00
|
|
|
if (mIsActuallyPlaying && mStartTimeAccumulation) {
|
2021-11-16 16:49:05 +00:00
|
|
|
mAccumulatedTime =
|
|
|
|
mAccumulatedTime +
|
2021-07-07 18:31:46 +00:00
|
|
|
static_cast<double>(std::chrono::duration_cast<std::chrono::nanoseconds>(
|
|
|
|
std::chrono::high_resolution_clock::now() - mTimeReference)
|
|
|
|
.count()) /
|
2021-11-16 16:49:05 +00:00
|
|
|
1000000000.0l;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mTimeReference = std::chrono::high_resolution_clock::now();
|
|
|
|
|
2021-11-15 21:43:06 +00:00
|
|
|
audioLock.unlock();
|
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
if (!mFrameProcessingThread) {
|
2021-11-15 21:43:06 +00:00
|
|
|
AudioManager::getInstance().unmuteStream();
|
2021-05-12 20:49:24 +00:00
|
|
|
mFrameProcessingThread =
|
2021-07-07 18:31:46 +00:00
|
|
|
std::make_unique<std::thread>(&VideoFFmpegComponent::frameProcessing, this);
|
2021-05-29 08:58:51 +00:00
|
|
|
}
|
2021-05-11 15:35:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void VideoFFmpegComponent::frameProcessing()
|
|
|
|
{
|
2021-05-29 08:58:51 +00:00
|
|
|
mWindow->increaseVideoPlayerCount();
|
2021-05-11 15:35:55 +00:00
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
bool videoFilter {false};
|
|
|
|
bool audioFilter {false};
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
videoFilter = setupVideoFilters();
|
|
|
|
|
|
|
|
if (mAudioCodecContext)
|
|
|
|
audioFilter = setupAudioFilters();
|
|
|
|
|
2022-02-19 16:04:23 +00:00
|
|
|
while (mIsPlaying && !mPaused && videoFilter && (!mAudioCodecContext || audioFilter)) {
|
2021-05-29 08:58:51 +00:00
|
|
|
readFrames();
|
2021-11-16 16:49:05 +00:00
|
|
|
if (!mIsPlaying)
|
|
|
|
break;
|
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
getProcessedFrames();
|
2021-11-16 16:49:05 +00:00
|
|
|
if (!mIsPlaying)
|
|
|
|
break;
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-11-16 16:49:05 +00:00
|
|
|
outputFrames();
|
2021-11-15 21:43:06 +00:00
|
|
|
|
2021-05-11 15:35:55 +00:00
|
|
|
// This 1 ms wait makes sure that the thread does not consume all available CPU cycles.
|
|
|
|
SDL_Delay(1);
|
|
|
|
}
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
if (videoFilter) {
|
|
|
|
avfilter_inout_free(&mVFilterInputs);
|
|
|
|
avfilter_inout_free(&mVFilterOutputs);
|
|
|
|
avfilter_free(mVBufferSrcContext);
|
|
|
|
avfilter_free(mVBufferSinkContext);
|
|
|
|
avfilter_graph_free(&mVFilterGraph);
|
|
|
|
mVBufferSrcContext = nullptr;
|
|
|
|
mVBufferSinkContext = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (audioFilter) {
|
|
|
|
avfilter_inout_free(&mAFilterInputs);
|
|
|
|
avfilter_inout_free(&mAFilterOutputs);
|
|
|
|
avfilter_free(mABufferSrcContext);
|
|
|
|
avfilter_free(mABufferSinkContext);
|
|
|
|
avfilter_graph_free(&mAFilterGraph);
|
|
|
|
mABufferSrcContext = nullptr;
|
|
|
|
mABufferSinkContext = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
mWindow->decreaseVideoPlayerCount();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool VideoFFmpegComponent::setupVideoFilters()
|
|
|
|
{
|
2022-09-29 20:32:57 +00:00
|
|
|
int returnValue {0};
|
2022-10-01 15:19:53 +00:00
|
|
|
std::string errorMessage(512, '\0');
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
mVFilterInputs = avfilter_inout_alloc();
|
|
|
|
mVFilterOutputs = avfilter_inout_alloc();
|
|
|
|
|
|
|
|
if (!(mVFilterGraph = avfilter_graph_alloc())) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupVideoFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't allocate filter graph";
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Limit the libavfilter video processing to two additional threads.
|
|
|
|
// Not sure why the actual thread count is one less than specified.
|
|
|
|
mVFilterGraph->nb_threads = 3;
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
const AVFilter* bufferSrc {avfilter_get_by_name("buffer")};
|
2021-05-29 08:58:51 +00:00
|
|
|
if (!bufferSrc) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupVideoFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't find \"buffer\" filter";
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
const AVFilter* bufferSink {avfilter_get_by_name("buffersink")};
|
2021-05-29 08:58:51 +00:00
|
|
|
if (!bufferSink) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupVideoFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't find \"buffersink\" filter";
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Some codecs such as H.264 need the width to be in increments of 16 pixels.
|
2022-09-29 20:32:57 +00:00
|
|
|
int width {mVideoCodecContext->width};
|
|
|
|
int height {mVideoCodecContext->height};
|
|
|
|
int modulo {mVideoCodecContext->width % 16};
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
if (modulo > 0)
|
|
|
|
width += 16 - modulo;
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
std::string filterArguments;
|
|
|
|
filterArguments.append("width=")
|
|
|
|
.append(std::to_string(width))
|
|
|
|
.append(":")
|
|
|
|
.append("height=")
|
|
|
|
.append(std::to_string(height))
|
|
|
|
.append(":pix_fmt=")
|
|
|
|
.append(av_get_pix_fmt_name(mVideoCodecContext->pix_fmt))
|
|
|
|
.append(":time_base=")
|
|
|
|
.append(std::to_string(mVideoStream->time_base.num))
|
|
|
|
.append("/")
|
|
|
|
.append(std::to_string(mVideoStream->time_base.den))
|
|
|
|
.append(":sar=")
|
|
|
|
.append(std::to_string(mVideoCodecContext->sample_aspect_ratio.num))
|
|
|
|
.append("/")
|
|
|
|
.append(std::to_string(mVideoCodecContext->sample_aspect_ratio.den));
|
2021-07-07 18:31:46 +00:00
|
|
|
|
|
|
|
returnValue = avfilter_graph_create_filter(&mVBufferSrcContext, bufferSrc, "in",
|
|
|
|
filterArguments.c_str(), nullptr, mVFilterGraph);
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
if (returnValue < 0) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupVideoFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't create filter instance for buffer source: "
|
2022-10-01 15:19:53 +00:00
|
|
|
<< av_make_error_string(&errorMessage[0], sizeof(errorMessage), returnValue);
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-07-07 18:31:46 +00:00
|
|
|
returnValue = avfilter_graph_create_filter(&mVBufferSinkContext, bufferSink, "out", nullptr,
|
|
|
|
nullptr, mVFilterGraph);
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
if (returnValue < 0) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupVideoFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't create filter instance for buffer sink: "
|
2022-10-01 15:19:53 +00:00
|
|
|
<< av_make_error_string(&errorMessage[0], sizeof(errorMessage), returnValue);
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Endpoints for the filter graph.
|
|
|
|
mVFilterInputs->name = av_strdup("out");
|
|
|
|
mVFilterInputs->filter_ctx = mVBufferSinkContext;
|
|
|
|
mVFilterInputs->pad_idx = 0;
|
|
|
|
mVFilterInputs->next = nullptr;
|
|
|
|
|
|
|
|
mVFilterOutputs->name = av_strdup("in");
|
|
|
|
mVFilterOutputs->filter_ctx = mVBufferSrcContext;
|
|
|
|
mVFilterOutputs->pad_idx = 0;
|
|
|
|
mVFilterOutputs->next = nullptr;
|
|
|
|
|
|
|
|
std::string filterDescription;
|
|
|
|
|
|
|
|
// Whether to upscale the frame rate to 60 FPS.
|
|
|
|
if (Settings::getInstance()->getBool("VideoUpscaleFrameRate")) {
|
2022-09-29 20:32:57 +00:00
|
|
|
|
|
|
|
if (modulo > 0) {
|
|
|
|
filterDescription.append("scale=width=")
|
|
|
|
.append(std::to_string(width))
|
|
|
|
.append(":height=")
|
|
|
|
.append(std::to_string(height))
|
|
|
|
.append(",fps=fps=60,");
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
filterDescription.append("fps=fps=60,");
|
|
|
|
}
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
// The "framerate" filter is a more advanced way to upscale the frame rate using
|
|
|
|
// interpolation. However I have not been able to get this to work with slice
|
|
|
|
// threading so the performance is poor. As such it's disabled for now.
|
2022-09-29 20:32:57 +00:00
|
|
|
// if (modulo > 0) {
|
|
|
|
// filterDescription.append("scale=width=")
|
|
|
|
// .append(std::to_string(width))
|
|
|
|
// .append(":height=")
|
|
|
|
// .append(std::to_string(height))
|
|
|
|
// .append(",framerate=fps=60,");
|
|
|
|
// }
|
|
|
|
// else {
|
|
|
|
// filterDescription.append("framerate=fps=60,");
|
|
|
|
// }
|
2021-05-29 08:58:51 +00:00
|
|
|
}
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
filterDescription.append("format=pix_fmts=")
|
2022-10-27 22:08:41 +00:00
|
|
|
.append(std::string(av_get_pix_fmt_name(AV_PIX_FMT_BGRA)));
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
returnValue = avfilter_graph_parse_ptr(mVFilterGraph, filterDescription.c_str(),
|
2021-07-07 18:31:46 +00:00
|
|
|
&mVFilterInputs, &mVFilterOutputs, nullptr);
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
if (returnValue < 0) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupVideoFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't add graph filter: "
|
2022-10-01 15:19:53 +00:00
|
|
|
<< av_make_error_string(&errorMessage[0], sizeof(errorMessage), returnValue);
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
returnValue = avfilter_graph_config(mVFilterGraph, nullptr);
|
|
|
|
|
|
|
|
if (returnValue < 0) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupVideoFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't configure graph: "
|
2022-10-01 15:19:53 +00:00
|
|
|
<< av_make_error_string(&errorMessage[0], sizeof(errorMessage), returnValue);
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool VideoFFmpegComponent::setupAudioFilters()
|
|
|
|
{
|
2022-09-29 20:32:57 +00:00
|
|
|
int returnValue {0};
|
2022-10-01 15:19:53 +00:00
|
|
|
std::string errorMessage(512, '\0');
|
2022-09-29 20:32:57 +00:00
|
|
|
const int outSampleRates[] {AudioManager::getInstance().sAudioFormat.freq, -1};
|
|
|
|
const enum AVSampleFormat outSampleFormats[] {AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE};
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
mAFilterInputs = avfilter_inout_alloc();
|
|
|
|
mAFilterOutputs = avfilter_inout_alloc();
|
|
|
|
|
|
|
|
if (!(mAFilterGraph = avfilter_graph_alloc())) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupAudioFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't allocate filter graph";
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Limit the libavfilter audio processing to one additional thread.
|
|
|
|
// Not sure why the actual thread count is one less than specified.
|
|
|
|
mAFilterGraph->nb_threads = 2;
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
const AVFilter* bufferSrc {avfilter_get_by_name("abuffer")};
|
2021-05-29 08:58:51 +00:00
|
|
|
if (!bufferSrc) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupAudioFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't find \"abuffer\" filter";
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
const AVFilter* bufferSink {avfilter_get_by_name("abuffersink")};
|
2021-05-29 08:58:51 +00:00
|
|
|
if (!bufferSink) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupAudioFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't find \"abuffersink\" filter";
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-09-30 21:20:36 +00:00
|
|
|
std::string channelLayout(128, '\0');
|
|
|
|
|
|
|
|
#if LIBAVUTIL_VERSION_MAJOR >= 57 && LIBAVUTIL_VERSION_MINOR >= 28
|
|
|
|
// FFmpeg 5.1 and above.
|
|
|
|
AVChannelLayout chLayout {};
|
|
|
|
av_channel_layout_from_mask(&chLayout, mAudioCodecContext->ch_layout.u.mask);
|
2022-10-01 15:19:53 +00:00
|
|
|
av_channel_layout_describe(&chLayout, &channelLayout[0], sizeof(channelLayout));
|
2022-09-30 21:20:36 +00:00
|
|
|
av_channel_layout_uninit(&chLayout);
|
|
|
|
#else
|
|
|
|
av_get_channel_layout_string(&channelLayout[0], sizeof(channelLayout),
|
|
|
|
mAudioCodecContext->CHANNELS, mAudioCodecContext->channel_layout);
|
|
|
|
#endif
|
2021-05-29 08:58:51 +00:00
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
std::string filterArguments;
|
|
|
|
filterArguments.append("time_base=")
|
|
|
|
.append(std::to_string(mAudioStream->time_base.num))
|
|
|
|
.append("/")
|
|
|
|
.append(std::to_string(mAudioStream->time_base.den))
|
|
|
|
.append(":sample_rate=")
|
|
|
|
.append(std::to_string(mAudioCodecContext->sample_rate))
|
|
|
|
.append(":sample_fmt=")
|
|
|
|
.append(av_get_sample_fmt_name(mAudioCodecContext->sample_fmt))
|
|
|
|
.append(":channel_layout=")
|
|
|
|
.append(channelLayout);
|
2021-07-07 18:31:46 +00:00
|
|
|
|
|
|
|
returnValue = avfilter_graph_create_filter(&mABufferSrcContext, bufferSrc, "in",
|
|
|
|
filterArguments.c_str(), nullptr, mAFilterGraph);
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
if (returnValue < 0) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupAudioFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't create filter instance for buffer source: "
|
2022-10-01 15:19:53 +00:00
|
|
|
<< av_make_error_string(&errorMessage[0], sizeof(errorMessage), returnValue);
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-07-07 18:31:46 +00:00
|
|
|
returnValue = avfilter_graph_create_filter(&mABufferSinkContext, bufferSink, "out", nullptr,
|
|
|
|
nullptr, mAFilterGraph);
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
if (returnValue < 0) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupAudioFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't create filter instance for buffer sink: "
|
2022-10-01 15:19:53 +00:00
|
|
|
<< av_make_error_string(&errorMessage[0], sizeof(errorMessage), returnValue);
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Endpoints for the filter graph.
|
|
|
|
mAFilterInputs->name = av_strdup("out");
|
|
|
|
mAFilterInputs->filter_ctx = mABufferSinkContext;
|
|
|
|
mAFilterInputs->pad_idx = 0;
|
|
|
|
mAFilterInputs->next = nullptr;
|
|
|
|
|
|
|
|
mAFilterOutputs->name = av_strdup("in");
|
|
|
|
mAFilterOutputs->filter_ctx = mABufferSrcContext;
|
|
|
|
mAFilterOutputs->pad_idx = 0;
|
|
|
|
mAFilterOutputs->next = nullptr;
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
std::string filterDescription;
|
|
|
|
filterDescription.append("aresample=")
|
|
|
|
.append(std::to_string(outSampleRates[0]) + ",")
|
|
|
|
.append("aformat=sample_fmts=")
|
|
|
|
.append(av_get_sample_fmt_name(outSampleFormats[0]))
|
|
|
|
.append(":channel_layouts=stereo,")
|
|
|
|
.append("asetnsamples=n=1024:p=0");
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
returnValue = avfilter_graph_parse_ptr(mAFilterGraph, filterDescription.c_str(),
|
2021-07-07 18:31:46 +00:00
|
|
|
&mAFilterInputs, &mAFilterOutputs, nullptr);
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
if (returnValue < 0) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupAudioFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't add graph filter: "
|
2022-10-01 15:19:53 +00:00
|
|
|
<< av_make_error_string(&errorMessage[0], sizeof(errorMessage), returnValue);
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
returnValue = avfilter_graph_config(mAFilterGraph, nullptr);
|
|
|
|
|
|
|
|
if (returnValue < 0) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::setupAudioFilters(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't configure graph: "
|
2022-10-01 15:19:53 +00:00
|
|
|
<< av_make_error_string(&errorMessage[0], sizeof(errorMessage), returnValue);
|
2021-05-29 08:58:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void VideoFFmpegComponent::readFrames()
|
|
|
|
{
|
2022-09-29 20:32:57 +00:00
|
|
|
int readFrameReturn {0};
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
// It's not clear if this can actually happen in practise, but in theory we could
|
|
|
|
// continue to load frames indefinitely and run out of memory if invalid PTS values
|
|
|
|
// are presented by FFmpeg.
|
|
|
|
if (mVideoFrameQueue.size() > 300 || mAudioFrameQueue.size() > 600)
|
|
|
|
return;
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
int readLoops {1};
|
2021-11-07 17:14:38 +00:00
|
|
|
|
|
|
|
// If we can't keep up the audio processing, then drop video frames as it's much worse
|
|
|
|
// to have stuttering audio than a lower video framerate.
|
|
|
|
if (mAudioStreamIndex >= 0 && mAudioFrameCount > mAudioTargetQueueSize / 2) {
|
|
|
|
if (static_cast<int>(mAudioFrameQueue.size()) < mAudioTargetQueueSize / 6)
|
|
|
|
readLoops = 5;
|
|
|
|
else if (static_cast<int>(mAudioFrameQueue.size()) < mAudioTargetQueueSize / 4)
|
|
|
|
readLoops = 3;
|
|
|
|
else if (static_cast<int>(mAudioFrameQueue.size()) < mAudioTargetQueueSize / 2)
|
|
|
|
readLoops = 2;
|
|
|
|
}
|
|
|
|
|
2021-05-09 20:52:26 +00:00
|
|
|
if (mVideoCodecContext && mFormatContext) {
|
2021-11-17 16:48:49 +00:00
|
|
|
for (int i = 0; i < readLoops; ++i) {
|
2021-11-07 17:14:38 +00:00
|
|
|
if (static_cast<int>(mVideoFrameQueue.size()) < mVideoTargetQueueSize ||
|
|
|
|
(mAudioStreamIndex >= 0 &&
|
|
|
|
static_cast<int>(mAudioFrameQueue.size()) < mAudioTargetQueueSize)) {
|
|
|
|
while ((readFrameReturn = av_read_frame(mFormatContext, mPacket)) >= 0) {
|
|
|
|
if (mPacket->stream_index == mVideoStreamIndex) {
|
|
|
|
if (!avcodec_send_packet(mVideoCodecContext, mPacket) &&
|
|
|
|
!avcodec_receive_frame(mVideoCodecContext, mVideoFrame)) {
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
int returnValue {0};
|
2021-11-17 16:48:49 +00:00
|
|
|
++mVideoFrameReadCount;
|
2021-11-07 17:14:38 +00:00
|
|
|
|
|
|
|
if (mSWDecoder) {
|
|
|
|
// Drop the frame if necessary.
|
|
|
|
if (i == 0 || mAudioFrameCount == 0) {
|
|
|
|
returnValue = av_buffersrc_add_frame_flags(
|
|
|
|
mVBufferSrcContext, mVideoFrame,
|
|
|
|
AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT);
|
2021-07-14 17:13:25 +00:00
|
|
|
}
|
|
|
|
else {
|
2021-11-17 16:48:49 +00:00
|
|
|
++mVideoFrameDroppedCount;
|
2021-07-14 17:13:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2021-11-07 17:14:38 +00:00
|
|
|
if (i == 0 || mAudioFrameCount == 0) {
|
2022-09-29 20:32:57 +00:00
|
|
|
AVFrame* destFrame {nullptr};
|
2021-11-07 17:14:38 +00:00
|
|
|
destFrame = av_frame_alloc();
|
|
|
|
|
|
|
|
if (mVideoFrame->format == sPixelFormat) {
|
|
|
|
if (av_hwframe_transfer_data(destFrame, mVideoFrame, 0) <
|
|
|
|
0) {
|
|
|
|
LOG(LogError)
|
|
|
|
<< "VideoFFmpegComponent::readFrames(): "
|
|
|
|
"Couldn't transfer decoded video frame to "
|
|
|
|
"system memory";
|
|
|
|
av_frame_free(&destFrame);
|
|
|
|
av_packet_unref(mPacket);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
destFrame->pts = mVideoFrame->pts;
|
|
|
|
destFrame->pkt_dts = mVideoFrame->pkt_dts;
|
|
|
|
destFrame->pict_type = mVideoFrame->pict_type;
|
|
|
|
destFrame->chroma_location =
|
|
|
|
mVideoFrame->chroma_location;
|
|
|
|
destFrame->pkt_pos = mVideoFrame->pkt_pos;
|
|
|
|
destFrame->pkt_duration = mVideoFrame->pkt_duration;
|
|
|
|
destFrame->pkt_size = mVideoFrame->pkt_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::readFrames(): "
|
|
|
|
"Couldn't decode video frame";
|
|
|
|
}
|
|
|
|
|
|
|
|
returnValue = av_buffersrc_add_frame_flags(
|
|
|
|
mVBufferSrcContext, destFrame,
|
|
|
|
AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT);
|
|
|
|
av_frame_free(&destFrame);
|
|
|
|
}
|
|
|
|
else {
|
2021-11-17 16:48:49 +00:00
|
|
|
++mVideoFrameDroppedCount;
|
2021-11-07 17:14:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (returnValue < 0) {
|
2021-07-14 17:13:25 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::readFrames(): "
|
2021-11-07 17:14:38 +00:00
|
|
|
"Couldn't add video frame to buffer source";
|
2021-07-14 17:13:25 +00:00
|
|
|
}
|
|
|
|
|
2021-11-07 17:14:38 +00:00
|
|
|
av_packet_unref(mPacket);
|
|
|
|
break;
|
2021-07-09 17:47:33 +00:00
|
|
|
}
|
2021-11-07 17:14:38 +00:00
|
|
|
else {
|
|
|
|
av_packet_unref(mPacket);
|
2021-05-29 08:58:51 +00:00
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
2021-11-07 17:14:38 +00:00
|
|
|
else if (mPacket->stream_index == mAudioStreamIndex) {
|
|
|
|
if (!avcodec_send_packet(mAudioCodecContext, mPacket) &&
|
|
|
|
!avcodec_receive_frame(mAudioCodecContext, mAudioFrame)) {
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-11-07 17:14:38 +00:00
|
|
|
// We have an audio frame that needs conversion and resampling.
|
2022-09-29 20:32:57 +00:00
|
|
|
int returnValue {av_buffersrc_add_frame_flags(
|
|
|
|
mABufferSrcContext, mAudioFrame, AV_BUFFERSRC_FLAG_KEEP_REF)};
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-11-07 17:14:38 +00:00
|
|
|
if (returnValue < 0) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::readFrames(): "
|
|
|
|
"Couldn't add audio frame to buffer source";
|
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-11-07 17:14:38 +00:00
|
|
|
av_packet_unref(mPacket);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
av_packet_unref(mPacket);
|
|
|
|
}
|
2021-05-29 08:58:51 +00:00
|
|
|
}
|
2021-07-14 17:13:25 +00:00
|
|
|
else {
|
2021-11-07 17:14:38 +00:00
|
|
|
// Ignore any stream that is not video or audio.
|
2021-07-14 17:13:25 +00:00
|
|
|
av_packet_unref(mPacket);
|
|
|
|
}
|
|
|
|
}
|
2021-11-07 17:14:38 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// The target queue sizes have been reached.
|
|
|
|
break;
|
2021-05-29 08:58:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-11-16 16:49:05 +00:00
|
|
|
if (readFrameReturn < 0)
|
2021-05-29 08:58:51 +00:00
|
|
|
mEndOfVideo = true;
|
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
void VideoFFmpegComponent::getProcessedFrames()
|
|
|
|
{
|
|
|
|
// Video frames.
|
|
|
|
while (av_buffersink_get_frame(mVBufferSinkContext, mVideoFrameResampled) >= 0) {
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-10-01 15:19:53 +00:00
|
|
|
// Save frame into the queue for later processing.
|
2021-05-29 08:58:51 +00:00
|
|
|
VideoFrame currFrame;
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-10-01 15:19:53 +00:00
|
|
|
// This is likely unnecessary as AV_PIX_FMT_RGBA always uses 4 bytes per pixel.
|
|
|
|
// const int bytesPerPixel {
|
|
|
|
// av_get_padded_bits_per_pixel(av_pix_fmt_desc_get(AV_PIX_FMT_RGBA)) / 8};
|
|
|
|
const int bytesPerPixel {4};
|
|
|
|
const int width {mVideoFrameResampled->linesize[0] / bytesPerPixel};
|
|
|
|
|
|
|
|
currFrame.width = width;
|
2021-05-29 08:58:51 +00:00
|
|
|
currFrame.height = mVideoFrameResampled->height;
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
mVideoFrameResampled->best_effort_timestamp = mVideoFrameResampled->pkt_dts;
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
// The PTS value is the presentation time, i.e. the time stamp when the frame
|
|
|
|
// (picture) should be displayed. The packet DTS value is used for the basis of
|
|
|
|
// the calculation as per the recommendation in the FFmpeg documentation for
|
|
|
|
// the av_read_frame function.
|
2022-10-01 15:19:53 +00:00
|
|
|
const double pts {static_cast<double>(mVideoFrameResampled->pkt_dts) *
|
|
|
|
av_q2d(mVideoStream->time_base)};
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
// Needs to be adjusted if changing the rate?
|
2022-10-01 15:19:53 +00:00
|
|
|
const double frameDuration {static_cast<double>(mVideoFrameResampled->pkt_duration) *
|
|
|
|
av_q2d(mVideoStream->time_base)};
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
currFrame.pts = pts;
|
|
|
|
currFrame.frameDuration = frameDuration;
|
|
|
|
|
2022-10-01 15:19:53 +00:00
|
|
|
const int bufferSize {width * mVideoFrameResampled->height * 4};
|
2021-05-29 08:58:51 +00:00
|
|
|
|
2021-11-07 17:14:38 +00:00
|
|
|
currFrame.frameRGBA.insert(
|
|
|
|
currFrame.frameRGBA.begin(), std::make_move_iterator(&mVideoFrameResampled->data[0][0]),
|
|
|
|
std::make_move_iterator(&mVideoFrameResampled->data[0][bufferSize]));
|
2021-05-29 08:58:51 +00:00
|
|
|
|
2021-11-09 21:40:08 +00:00
|
|
|
mVideoFrameQueue.emplace(std::move(currFrame));
|
2021-05-29 08:58:51 +00:00
|
|
|
av_frame_unref(mVideoFrameResampled);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Audio frames.
|
|
|
|
// When resampling, we may not always get a frame returned from the sink as there may not
|
|
|
|
// have been enough data available to the filter.
|
2021-07-07 18:31:46 +00:00
|
|
|
while (mAudioCodecContext &&
|
|
|
|
av_buffersink_get_frame(mABufferSinkContext, mAudioFrameResampled) >= 0) {
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
AudioFrame currFrame;
|
2021-07-04 09:36:16 +00:00
|
|
|
AVRational timeBase;
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
mAudioFrameResampled->best_effort_timestamp = mAudioFrameResampled->pts;
|
|
|
|
|
|
|
|
timeBase.num = 1;
|
|
|
|
timeBase.den = mAudioFrameResampled->sample_rate;
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
double pts {mAudioFrameResampled->pts * av_q2d(timeBase)};
|
2021-05-29 08:58:51 +00:00
|
|
|
currFrame.pts = pts;
|
|
|
|
|
2022-09-30 21:20:36 +00:00
|
|
|
int bufferSize {mAudioFrameResampled->nb_samples * mAudioFrameResampled->CHANNELS *
|
2022-09-29 20:32:57 +00:00
|
|
|
av_get_bytes_per_sample(AV_SAMPLE_FMT_FLT)};
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
currFrame.resampledData.insert(currFrame.resampledData.begin(),
|
2021-07-07 18:31:46 +00:00
|
|
|
&mAudioFrameResampled->data[0][0],
|
|
|
|
&mAudioFrameResampled->data[0][bufferSize]);
|
2021-05-29 08:58:51 +00:00
|
|
|
|
2021-11-09 21:40:08 +00:00
|
|
|
mAudioFrameQueue.emplace(std::move(currFrame));
|
2021-05-29 08:58:51 +00:00
|
|
|
av_frame_unref(mAudioFrameResampled);
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-10 16:08:45 +00:00
|
|
|
void VideoFFmpegComponent::outputFrames()
|
2021-05-09 20:52:26 +00:00
|
|
|
{
|
|
|
|
// Check if we should start counting the time (i.e. start playing the video).
|
|
|
|
// The audio stream controls when the playback and time counting starts, assuming
|
|
|
|
// there is an audio track.
|
|
|
|
if (!mAudioCodecContext || (mAudioCodecContext && !mAudioFrameQueue.empty())) {
|
|
|
|
if (!mStartTimeAccumulation) {
|
2022-09-29 20:32:57 +00:00
|
|
|
std::unique_lock<std::mutex> audioLock {mAudioMutex};
|
2021-05-09 20:52:26 +00:00
|
|
|
mTimeReference = std::chrono::high_resolution_clock::now();
|
|
|
|
mStartTimeAccumulation = true;
|
|
|
|
mIsActuallyPlaying = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
// Process the audio frames that have a PTS value below mAccumulatedTime (plus a small
|
2021-05-12 20:55:00 +00:00
|
|
|
// buffer to avoid underflows).
|
2021-05-09 20:52:26 +00:00
|
|
|
while (!mAudioFrameQueue.empty()) {
|
2022-09-08 16:38:19 +00:00
|
|
|
// In very rare instances video files are broken and start with a high PTS value for
|
|
|
|
// the first frame. In this case set the accumulated time value to this PTS value if
|
|
|
|
// the audio frame queue is filled, otherwise the stream will never start playing.
|
|
|
|
if (mAudioFrameCount == 0 &&
|
|
|
|
mAudioFrameQueue.size() == static_cast<size_t>(mAudioTargetQueueSize) &&
|
|
|
|
mAccumulatedTime < mAudioFrameQueue.front().pts) {
|
|
|
|
mAccumulatedTime = mAudioFrameQueue.front().pts;
|
|
|
|
}
|
2021-11-16 16:49:05 +00:00
|
|
|
if (mAudioFrameQueue.front().pts < mAccumulatedTime + AUDIO_BUFFER) {
|
2021-05-09 20:52:26 +00:00
|
|
|
// Enable only when needed, as this generates a lot of debug output.
|
2021-07-04 09:36:16 +00:00
|
|
|
if (DEBUG_VIDEO) {
|
2021-07-07 18:31:46 +00:00
|
|
|
LOG(LogDebug) << "Processing audio frame with PTS: "
|
|
|
|
<< mAudioFrameQueue.front().pts;
|
|
|
|
LOG(LogDebug) << "Total audio frames processed / audio frame queue size: "
|
|
|
|
<< mAudioFrameCount << " / "
|
|
|
|
<< std::to_string(mAudioFrameQueue.size());
|
2021-07-04 09:36:16 +00:00
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
bool outputSound {false};
|
2021-05-16 11:12:31 +00:00
|
|
|
|
|
|
|
if ((!mScreensaverMode && !mMediaViewerMode) &&
|
2022-02-20 15:07:27 +00:00
|
|
|
Settings::getInstance()->getBool("ViewsVideoAudio"))
|
2021-05-16 11:12:31 +00:00
|
|
|
outputSound = true;
|
2021-07-07 18:31:46 +00:00
|
|
|
else if (mScreensaverMode && Settings::getInstance()->getBool("ScreensaverVideoAudio"))
|
2021-05-16 11:12:31 +00:00
|
|
|
outputSound = true;
|
2021-07-07 18:31:46 +00:00
|
|
|
else if (mMediaViewerMode && Settings::getInstance()->getBool("MediaViewerVideoAudio"))
|
2021-05-16 11:12:31 +00:00
|
|
|
outputSound = true;
|
|
|
|
|
|
|
|
if (outputSound) {
|
2021-05-29 08:58:51 +00:00
|
|
|
// The audio is output to AudioManager from updatePlayer() in the main thread.
|
2022-09-29 20:32:57 +00:00
|
|
|
std::unique_lock<std::mutex> audioLock {mAudioMutex};
|
2021-05-29 08:58:51 +00:00
|
|
|
|
2021-11-18 16:25:09 +00:00
|
|
|
mOutputAudio.insert(
|
|
|
|
mOutputAudio.end(),
|
|
|
|
std::make_move_iterator(mAudioFrameQueue.front().resampledData.begin()),
|
|
|
|
std::make_move_iterator(mAudioFrameQueue.front().resampledData.end()));
|
2021-05-29 08:58:51 +00:00
|
|
|
|
2021-11-15 21:43:06 +00:00
|
|
|
audioLock.unlock();
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
mAudioFrameQueue.pop();
|
2021-11-17 16:48:49 +00:00
|
|
|
++mAudioFrameCount;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
// Process all available video frames that have a PTS value below mAccumulatedTime.
|
2021-05-11 15:35:55 +00:00
|
|
|
// But if more than one frame is processed here, it means that the computer can't
|
|
|
|
// keep up for some reason.
|
2021-11-16 16:49:05 +00:00
|
|
|
while (mIsActuallyPlaying && !mVideoFrameQueue.empty()) {
|
2022-09-08 16:38:19 +00:00
|
|
|
// This workaround for broken files with a high PTS value for the first frame is only
|
|
|
|
// applied if there are no audio streams available.
|
|
|
|
if (!mAudioCodecContext && !mDecodedFrame &&
|
|
|
|
mVideoFrameQueue.size() == static_cast<size_t>(mVideoTargetQueueSize) &&
|
|
|
|
mAccumulatedTime < mVideoFrameQueue.front().pts) {
|
|
|
|
mAccumulatedTime = mVideoFrameQueue.front().pts;
|
|
|
|
}
|
|
|
|
|
2021-11-16 16:49:05 +00:00
|
|
|
if (mVideoFrameQueue.front().pts < mAccumulatedTime) {
|
2021-05-09 20:52:26 +00:00
|
|
|
// Enable only when needed, as this generates a lot of debug output.
|
2021-07-04 09:36:16 +00:00
|
|
|
if (DEBUG_VIDEO) {
|
2021-07-07 18:31:46 +00:00
|
|
|
LOG(LogDebug) << "Processing video frame with PTS: "
|
|
|
|
<< mVideoFrameQueue.front().pts;
|
|
|
|
LOG(LogDebug) << "Total video frames processed / video frame queue size: "
|
|
|
|
<< mVideoFrameCount << " / "
|
|
|
|
<< std::to_string(mVideoFrameQueue.size());
|
2021-11-07 17:14:38 +00:00
|
|
|
if (mVideoFrameDroppedCount > 0) {
|
|
|
|
LOG(LogDebug) << "Video frames dropped: " << mVideoFrameDroppedCount << " of "
|
|
|
|
<< mVideoFrameReadCount << " (" << std::setprecision(2)
|
|
|
|
<< (static_cast<float>(mVideoFrameDroppedCount) /
|
|
|
|
static_cast<float>(mVideoFrameReadCount)) *
|
|
|
|
100.0f
|
|
|
|
<< "%)";
|
|
|
|
}
|
2021-07-04 09:36:16 +00:00
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
std::unique_lock<std::mutex> pictureLock {mPictureMutex};
|
2021-05-11 15:35:55 +00:00
|
|
|
|
2021-05-19 17:47:40 +00:00
|
|
|
// Give some leeway for frames that have not yet been rendered but that have pts
|
|
|
|
// values with a time difference relative to the frame duration that is under a
|
|
|
|
// certain threshold. In this case, give the renderer an additional chance to output
|
|
|
|
// the frames. If the difference exceeds the threshold though, then skip them as
|
|
|
|
// otherwise videos would just slow down instead of skipping frames when the computer
|
|
|
|
// can't keep up. This approach primarily decreases stuttering for videos with frame
|
2021-05-29 08:58:51 +00:00
|
|
|
// rates close to, or at, the rendering frame rate, for example 59.94 and 60 FPS.
|
|
|
|
if (mDecodedFrame && !mOutputPicture.hasBeenRendered) {
|
2022-09-29 20:32:57 +00:00
|
|
|
double timeDifference {mAccumulatedTime - mVideoFrameQueue.front().pts -
|
|
|
|
mVideoFrameQueue.front().frameDuration * 2.0};
|
2021-05-19 17:47:40 +00:00
|
|
|
if (timeDifference < mVideoFrameQueue.front().frameDuration) {
|
2021-11-15 21:43:06 +00:00
|
|
|
pictureLock.unlock();
|
2021-05-19 17:47:40 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-11 15:35:55 +00:00
|
|
|
mOutputPicture.pictureRGBA.clear();
|
2021-11-18 16:25:09 +00:00
|
|
|
mOutputPicture.pictureRGBA.insert(
|
|
|
|
mOutputPicture.pictureRGBA.begin(),
|
|
|
|
std::make_move_iterator(mVideoFrameQueue.front().frameRGBA.begin()),
|
|
|
|
std::make_move_iterator(mVideoFrameQueue.front().frameRGBA.end()));
|
2021-05-11 15:35:55 +00:00
|
|
|
|
|
|
|
mOutputPicture.width = mVideoFrameQueue.front().width;
|
|
|
|
mOutputPicture.height = mVideoFrameQueue.front().height;
|
2021-05-19 17:47:40 +00:00
|
|
|
mOutputPicture.hasBeenRendered = false;
|
2021-05-11 15:35:55 +00:00
|
|
|
|
2021-11-15 21:43:06 +00:00
|
|
|
mDecodedFrame = true;
|
|
|
|
|
|
|
|
pictureLock.unlock();
|
2021-05-11 15:35:55 +00:00
|
|
|
|
2021-05-09 20:52:26 +00:00
|
|
|
mVideoFrameQueue.pop();
|
2021-11-17 16:48:49 +00:00
|
|
|
++mVideoFrameCount;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VideoFFmpegComponent::calculateBlackRectangle()
|
|
|
|
{
|
|
|
|
// Calculate the position and size for the black rectangle that will be rendered behind
|
|
|
|
// videos. If the option to display pillarboxes (and letterboxes) is enabled, then this
|
2022-09-16 21:30:51 +00:00
|
|
|
// would extend to the entire video area (if above the threshold as defined below) or
|
2021-05-09 20:52:26 +00:00
|
|
|
// otherwise it will exactly match the video size. The reason to add a black rectangle
|
|
|
|
// behind videos in this second instance is that the scanline rendering will make the
|
|
|
|
// video partially transparent so this may avoid some unforseen issues with some themes.
|
2022-10-16 14:40:52 +00:00
|
|
|
// In general, adding very narrow pillarboxes or letterboxes doesn't look good, so by
|
|
|
|
// default this is not done unless the size of the video vs the overall video area is
|
|
|
|
// above the threshold defined by mPillarboxThreshold. By default this is set to 0.85
|
|
|
|
// for the X axis and 0.90 for the Y axis, but this is theme-controllable via the
|
|
|
|
// pillarboxThreshold property.
|
2022-02-19 19:31:54 +00:00
|
|
|
if (mVideoAreaPos != glm::vec2 {0.0f, 0.0f} && mVideoAreaSize != glm::vec2 {0.0f, 0.0f}) {
|
2021-05-09 20:52:26 +00:00
|
|
|
mVideoRectangleCoords.clear();
|
2022-02-19 19:31:54 +00:00
|
|
|
mRectangleOffset = {0.0f, 0.0f};
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-02-12 16:46:31 +00:00
|
|
|
if ((mLegacyTheme && Settings::getInstance()->getBool("GamelistVideoPillarbox")) ||
|
|
|
|
(!mLegacyTheme && mDrawPillarboxes)) {
|
2022-09-29 20:32:57 +00:00
|
|
|
float rectHeight {0.0f};
|
|
|
|
float rectWidth {0.0f};
|
2021-05-09 20:52:26 +00:00
|
|
|
// Video is in landscape orientation.
|
2021-08-16 16:25:01 +00:00
|
|
|
if (mSize.x > mSize.y) {
|
2021-05-09 20:52:26 +00:00
|
|
|
// Checking the Y size should not normally be required as landscape format
|
|
|
|
// should mean the height can't be higher than the max size defined by the
|
|
|
|
// theme. But as the height in mSize is provided by FFmpeg in integer format
|
|
|
|
// and then scaled, there could be rounding errors that make the video height
|
|
|
|
// slightly higher than allowed. It's only a single pixel or a few pixels, but
|
|
|
|
// it's still visible for some videos.
|
2022-10-16 14:40:52 +00:00
|
|
|
if (mSize.y < mVideoAreaSize.y &&
|
|
|
|
mSize.y / mVideoAreaSize.y < mPillarboxThreshold.y)
|
2021-08-16 16:25:01 +00:00
|
|
|
rectHeight = mVideoAreaSize.y;
|
2021-05-09 20:52:26 +00:00
|
|
|
else
|
2021-08-16 16:25:01 +00:00
|
|
|
rectHeight = mSize.y;
|
2022-10-16 14:40:52 +00:00
|
|
|
if (mSize.x < mVideoAreaSize.x &&
|
|
|
|
mSize.x / mVideoAreaSize.x < mPillarboxThreshold.x)
|
2021-08-16 16:25:01 +00:00
|
|
|
rectWidth = mVideoAreaSize.x;
|
2021-05-09 20:52:26 +00:00
|
|
|
else
|
2021-08-16 16:25:01 +00:00
|
|
|
rectWidth = mSize.x;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
// Video is in portrait orientation (or completely square).
|
|
|
|
else {
|
2022-10-16 14:40:52 +00:00
|
|
|
if (mSize.x <= mVideoAreaSize.x &&
|
|
|
|
mSize.x / mVideoAreaSize.x < mPillarboxThreshold.x)
|
|
|
|
rectWidth = mVideoAreaSize.x;
|
|
|
|
else
|
|
|
|
rectWidth = mSize.x;
|
2021-08-16 16:25:01 +00:00
|
|
|
rectHeight = mSize.y;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
2022-02-19 19:31:54 +00:00
|
|
|
// If an origin value other than 0.5 is used, then create an offset for centering
|
2022-09-16 21:30:51 +00:00
|
|
|
// the video correctly.
|
2022-02-19 19:31:54 +00:00
|
|
|
if (mOrigin != glm::vec2 {0.5f, 0.5f}) {
|
|
|
|
if (rectWidth > mSize.x)
|
|
|
|
mRectangleOffset.x -= (rectWidth - mSize.x) * (mOrigin.x - 0.5f);
|
|
|
|
else if (rectHeight > mSize.y)
|
|
|
|
mRectangleOffset.y -= (rectHeight - mSize.y) * (mOrigin.y - 0.5f);
|
|
|
|
}
|
2022-09-16 21:30:51 +00:00
|
|
|
|
|
|
|
// Populate the rectangle coordinates to be used in render().
|
|
|
|
const float offsetX {rectWidth - mSize.x};
|
|
|
|
const float offsetY {rectHeight - mSize.y};
|
|
|
|
mVideoRectangleCoords.emplace_back(std::round((-offsetX / 2.0f) + mRectangleOffset.x));
|
|
|
|
mVideoRectangleCoords.emplace_back(std::round((-offsetY / 2.0f) + mRectangleOffset.y));
|
|
|
|
mVideoRectangleCoords.emplace_back(std::round(rectWidth));
|
|
|
|
mVideoRectangleCoords.emplace_back(std::round(rectHeight));
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
// If the option to display pillarboxes is disabled, then make the rectangle equivalent
|
|
|
|
// to the size of the video.
|
|
|
|
else {
|
2022-09-16 21:30:51 +00:00
|
|
|
mVideoRectangleCoords.emplace_back(0.0f);
|
|
|
|
mVideoRectangleCoords.emplace_back(0.0f);
|
2021-11-09 21:40:08 +00:00
|
|
|
mVideoRectangleCoords.emplace_back(std::round(mSize.x));
|
|
|
|
mVideoRectangleCoords.emplace_back(std::round(mSize.y));
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-14 17:13:25 +00:00
|
|
|
void VideoFFmpegComponent::detectHWDecoder()
|
|
|
|
{
|
|
|
|
#if defined(__APPLE__)
|
|
|
|
LOG(LogDebug) << "VideoFFmpegComponent::detectHWDecoder(): Using hardware decoder VideoToolbox";
|
|
|
|
sDeviceType = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
|
|
|
|
return;
|
|
|
|
#elif defined(_WIN64)
|
2022-09-29 20:32:57 +00:00
|
|
|
bool hasDXVA2 {false};
|
|
|
|
bool hasD3D11VA {false};
|
2021-07-14 17:13:25 +00:00
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
AVBufferRef* testContext {nullptr};
|
|
|
|
AVHWDeviceType tempDevice {AV_HWDEVICE_TYPE_NONE};
|
2021-07-14 17:13:25 +00:00
|
|
|
|
|
|
|
while ((tempDevice = av_hwdevice_iterate_types(tempDevice)) != AV_HWDEVICE_TYPE_NONE) {
|
|
|
|
// The Direct3D 11 decoder detection seems to cause stability issues on some machines
|
|
|
|
// so disabling it for now.
|
|
|
|
if (tempDevice == AV_HWDEVICE_TYPE_DXVA2) {
|
|
|
|
// if (tempDevice == AV_HWDEVICE_TYPE_DXVA2 || tempDevice == AV_HWDEVICE_TYPE_D3D11VA) {
|
|
|
|
if (av_hwdevice_ctx_create(&testContext, tempDevice, nullptr, nullptr, 0) >= 0) {
|
|
|
|
if (tempDevice == AV_HWDEVICE_TYPE_DXVA2)
|
|
|
|
hasDXVA2 = true;
|
|
|
|
else
|
|
|
|
hasD3D11VA = true;
|
|
|
|
}
|
|
|
|
av_buffer_unref(&testContext);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prioritize DXVA2.
|
|
|
|
if (hasDXVA2) {
|
|
|
|
LOG(LogDebug) << "VideoFFmpegComponent::detectHWDecoder(): Using hardware decoder DXVA2";
|
|
|
|
sDeviceType = AV_HWDEVICE_TYPE_DXVA2;
|
|
|
|
}
|
|
|
|
else if (hasD3D11VA) {
|
|
|
|
LOG(LogDebug) << "VideoFFmpegComponent::detectHWDecoder(): Using hardware decoder D3D11VA";
|
|
|
|
sDeviceType = AV_HWDEVICE_TYPE_D3D11VA;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
LOG(LogWarning) << "VideoFFmpegComponent::detectHWDecoder(): Unable to detect any usable "
|
|
|
|
"hardware decoder";
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
// This would mostly be Linux, but possibly also BSD Unix.
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
bool hasVAAPI {false};
|
|
|
|
bool hasVDPAU {false};
|
2021-07-14 17:13:25 +00:00
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
AVBufferRef* testContext {nullptr};
|
|
|
|
AVHWDeviceType tempDevice {AV_HWDEVICE_TYPE_NONE};
|
2021-07-14 17:13:25 +00:00
|
|
|
|
|
|
|
while ((tempDevice = av_hwdevice_iterate_types(tempDevice)) != AV_HWDEVICE_TYPE_NONE) {
|
|
|
|
if (tempDevice == AV_HWDEVICE_TYPE_VDPAU || tempDevice == AV_HWDEVICE_TYPE_VAAPI) {
|
|
|
|
if (av_hwdevice_ctx_create(&testContext, tempDevice, nullptr, nullptr, 0) >= 0) {
|
|
|
|
if (tempDevice == AV_HWDEVICE_TYPE_VAAPI)
|
|
|
|
hasVAAPI = true;
|
|
|
|
else
|
|
|
|
hasVDPAU = true;
|
|
|
|
}
|
|
|
|
av_buffer_unref(&testContext);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prioritize VAAPI.
|
|
|
|
if (hasVAAPI) {
|
|
|
|
LOG(LogDebug) << "VideoFFmpegComponent::detectHWDecoder(): Using hardware decoder VAAPI";
|
|
|
|
sDeviceType = AV_HWDEVICE_TYPE_VAAPI;
|
|
|
|
}
|
|
|
|
else if (hasVDPAU) {
|
|
|
|
LOG(LogDebug) << "VideoFFmpegComponent::detectHWDecoder(): Using hardware decoder VDPAU";
|
|
|
|
sDeviceType = AV_HWDEVICE_TYPE_VDPAU;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
LOG(LogWarning) << "VideoFFmpegComponent::detectHWDecoder(): Unable to detect any "
|
|
|
|
"usable hardware decoder";
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
bool VideoFFmpegComponent::decoderInitHW()
|
|
|
|
{
|
|
|
|
// This should only be required the first time any video is played.
|
|
|
|
if (sDeviceType == AV_HWDEVICE_TYPE_NONE)
|
|
|
|
detectHWDecoder();
|
|
|
|
|
|
|
|
// If there is no device, the detection failed.
|
|
|
|
if (sDeviceType == AV_HWDEVICE_TYPE_NONE)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// If the hardware decoding of the file was previously unsuccessful during the program
|
|
|
|
// session, then don't attempt it again.
|
|
|
|
if (std::find(sSWDecodedVideos.begin(), sSWDecodedVideos.end(), mVideoPath) !=
|
|
|
|
sSWDecodedVideos.end()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// 50 is just an arbitrary number so we don't potentially get stuck in an endless loop.
|
2021-11-17 16:48:49 +00:00
|
|
|
for (int i = 0; i < 50; ++i) {
|
2022-09-29 20:32:57 +00:00
|
|
|
const AVCodecHWConfig* config {avcodec_get_hw_config(mHardwareCodec, i)};
|
2021-07-14 17:13:25 +00:00
|
|
|
if (!config) {
|
|
|
|
LOG(LogDebug) << "VideoFFmpegComponent::decoderInitHW(): Hardware decoder \""
|
|
|
|
<< av_hwdevice_get_type_name(sDeviceType)
|
|
|
|
<< "\" does not seem to support codec \"" << mHardwareCodec->name << "\"";
|
|
|
|
}
|
|
|
|
else if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
|
|
|
|
config->device_type == sDeviceType) {
|
|
|
|
sPixelFormat = config->pix_fmt;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the pixel format is not set properly, then hardware decoding won't work for the file.
|
|
|
|
if (sPixelFormat == AV_PIX_FMT_NONE)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (av_hwdevice_ctx_create(&mHwContext, sDeviceType, nullptr, nullptr, 0) < 0) {
|
|
|
|
LOG(LogDebug) << "VideoFFmpegComponent::decoderInitHW(): Unable to open hardware device \""
|
|
|
|
<< av_hwdevice_get_type_name(sDeviceType) << "\"";
|
|
|
|
av_buffer_unref(&mHwContext);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Callback function for AVCodecContext.
|
|
|
|
// clang-format off
|
|
|
|
auto formatFunc =
|
|
|
|
[](AVCodecContext* ctx, const enum AVPixelFormat* pix_fmts) -> enum AVPixelFormat {
|
|
|
|
|
|
|
|
const enum AVPixelFormat* pixelFormats;
|
|
|
|
|
2021-11-17 16:48:49 +00:00
|
|
|
for (pixelFormats = pix_fmts; *pixelFormats != -1; ++pixelFormats)
|
2021-07-14 17:13:25 +00:00
|
|
|
if (*pixelFormats == sPixelFormat)
|
|
|
|
return static_cast<enum AVPixelFormat>(sPixelFormat);
|
|
|
|
|
|
|
|
return AV_PIX_FMT_NONE;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Check if the video can actually be hardware decoded (unless this has already been done).
|
|
|
|
if (std::find(sHWDecodedVideos.begin(), sHWDecodedVideos.end(), mVideoPath) ==
|
|
|
|
sHWDecodedVideos.end()) {
|
|
|
|
|
|
|
|
// clang-format on
|
2022-09-29 20:32:57 +00:00
|
|
|
AVCodecContext* checkCodecContext {avcodec_alloc_context3(mHardwareCodec)};
|
2021-07-14 17:13:25 +00:00
|
|
|
|
|
|
|
if (avcodec_parameters_to_context(checkCodecContext, mVideoStream->codecpar)) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::decoderInitHW(): "
|
2021-07-14 17:13:25 +00:00
|
|
|
"Couldn't fill the video codec context parameters for file \""
|
|
|
|
<< mVideoPath << "\"";
|
|
|
|
avcodec_free_context(&checkCodecContext);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
else {
|
2022-09-29 20:32:57 +00:00
|
|
|
bool onlySWDecode {false};
|
2021-07-14 17:13:25 +00:00
|
|
|
|
|
|
|
checkCodecContext->get_format = formatFunc;
|
|
|
|
checkCodecContext->hw_device_ctx = av_buffer_ref(mHwContext);
|
|
|
|
|
|
|
|
if (avcodec_open2(checkCodecContext, mHardwareCodec, nullptr)) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::decoderInitHW(): "
|
2021-07-14 17:13:25 +00:00
|
|
|
"Couldn't initialize the video codec context for file \""
|
|
|
|
<< mVideoPath << "\"";
|
|
|
|
}
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
AVPacket* checkPacket {av_packet_alloc()};
|
|
|
|
int readFrameReturn {0};
|
2021-07-14 17:13:25 +00:00
|
|
|
|
|
|
|
while ((readFrameReturn = av_read_frame(mFormatContext, checkPacket)) == 0) {
|
|
|
|
if (checkPacket->stream_index != mVideoStreamIndex)
|
|
|
|
av_packet_unref(checkPacket);
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Supplying a packet to the decoder will cause an immediate error for some videos
|
|
|
|
// while others will require that one or several frame receive attempts are performed
|
|
|
|
// before we get a definitive result. On error we fall back to the software decoder.
|
|
|
|
if (readFrameReturn == 0 && checkPacket->stream_index == mVideoStreamIndex) {
|
|
|
|
if (avcodec_send_packet(checkCodecContext, checkPacket) < 0) {
|
|
|
|
// Save the file path to the list of videos that require software decoding
|
|
|
|
// so we don't have to check it again during the program session.
|
|
|
|
sSWDecodedVideos.emplace_back(mVideoPath);
|
|
|
|
onlySWDecode = true;
|
|
|
|
}
|
|
|
|
else {
|
2022-09-29 20:32:57 +00:00
|
|
|
AVFrame* checkFrame {av_frame_alloc()};
|
2021-07-14 17:13:25 +00:00
|
|
|
|
|
|
|
onlySWDecode = true;
|
|
|
|
|
|
|
|
// For some videos we need to process at least one extra frame to verify
|
|
|
|
// that the hardware encoder can actually be used, otherwise the fallback
|
|
|
|
// to software decoding would take place when it's not necessary.
|
2021-11-17 16:48:49 +00:00
|
|
|
for (int i = 0; i < 3; ++i) {
|
2021-07-14 17:13:25 +00:00
|
|
|
if (avcodec_receive_frame(checkCodecContext, checkFrame) < 0) {
|
|
|
|
av_packet_unref(checkPacket);
|
|
|
|
while (av_read_frame(mFormatContext, checkPacket) == 0) {
|
|
|
|
if (checkPacket->stream_index != mVideoStreamIndex)
|
|
|
|
av_packet_unref(checkPacket);
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
avcodec_send_packet(checkCodecContext, checkPacket);
|
|
|
|
av_packet_unref(checkPacket);
|
|
|
|
|
|
|
|
if (avcodec_receive_frame(checkCodecContext, checkFrame) == 0) {
|
|
|
|
onlySWDecode = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
onlySWDecode = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
onlySWDecode = false;
|
|
|
|
}
|
|
|
|
av_packet_unref(checkPacket);
|
|
|
|
av_frame_unref(checkFrame);
|
|
|
|
}
|
|
|
|
|
|
|
|
av_frame_free(&checkFrame);
|
|
|
|
|
|
|
|
if (onlySWDecode == false) {
|
|
|
|
// Save the file path to the list of videos that work with hardware
|
|
|
|
// decoding so we don't have to check it again during the program session.
|
|
|
|
sHWDecodedVideos.emplace_back(mVideoPath);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
av_packet_free(&checkPacket);
|
|
|
|
avcodec_free_context(&checkCodecContext);
|
|
|
|
|
|
|
|
// Seek back to the start position of the file.
|
|
|
|
av_seek_frame(mFormatContext, -1, 0, AVSEEK_FLAG_ANY);
|
|
|
|
|
|
|
|
if (onlySWDecode)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The hardware decoding check passed successfully or it was done previously for the file.
|
|
|
|
// Now perform the real setup.
|
|
|
|
mVideoCodecContext = avcodec_alloc_context3(mHardwareCodec);
|
|
|
|
|
|
|
|
if (!mVideoCodecContext) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::decoderInitHW(): "
|
2021-07-14 17:13:25 +00:00
|
|
|
"Couldn't allocate video codec context for file \""
|
|
|
|
<< mVideoPath << "\"";
|
|
|
|
avcodec_free_context(&mVideoCodecContext);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (avcodec_parameters_to_context(mVideoCodecContext, mVideoStream->codecpar)) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::decoderInitHW(): "
|
2021-07-14 17:13:25 +00:00
|
|
|
"Couldn't fill the video codec context parameters for file \""
|
|
|
|
<< mVideoPath << "\"";
|
|
|
|
avcodec_free_context(&mVideoCodecContext);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
mVideoCodecContext->get_format = formatFunc;
|
|
|
|
mVideoCodecContext->hw_device_ctx = av_buffer_ref(mHwContext);
|
|
|
|
|
|
|
|
if (avcodec_open2(mVideoCodecContext, mHardwareCodec, nullptr)) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::decoderInitHW(): "
|
2021-07-14 17:13:25 +00:00
|
|
|
"Couldn't initialize the video codec context for file \""
|
|
|
|
<< mVideoPath << "\"";
|
|
|
|
avcodec_free_context(&mVideoCodecContext);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-02-19 16:04:23 +00:00
|
|
|
void VideoFFmpegComponent::startVideoStream()
|
2021-05-09 20:52:26 +00:00
|
|
|
{
|
2022-03-06 21:52:13 +00:00
|
|
|
if (!mVisible || mThemeOpacity == 0.0f)
|
2022-02-20 14:04:05 +00:00
|
|
|
return;
|
|
|
|
|
2022-02-19 16:04:23 +00:00
|
|
|
mIsPlaying = true;
|
|
|
|
|
2021-05-09 20:52:26 +00:00
|
|
|
if (!mFormatContext) {
|
2021-07-14 17:13:25 +00:00
|
|
|
mHardwareCodec = nullptr;
|
|
|
|
mHwContext = nullptr;
|
2021-05-11 15:35:55 +00:00
|
|
|
mFrameProcessingThread = nullptr;
|
2021-05-09 20:52:26 +00:00
|
|
|
mVideoWidth = 0;
|
|
|
|
mVideoHeight = 0;
|
|
|
|
mAccumulatedTime = 0;
|
|
|
|
mStartTimeAccumulation = false;
|
2021-07-14 17:13:25 +00:00
|
|
|
mSWDecoder = true;
|
2021-05-09 20:52:26 +00:00
|
|
|
mDecodedFrame = false;
|
|
|
|
mEndOfVideo = false;
|
|
|
|
mVideoFrameCount = 0;
|
|
|
|
mAudioFrameCount = 0;
|
2021-11-07 17:14:38 +00:00
|
|
|
mVideoFrameReadCount = 0;
|
|
|
|
mVideoFrameDroppedCount = 0;
|
2021-05-19 17:47:40 +00:00
|
|
|
mOutputPicture = {};
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-06-22 16:17:06 +00:00
|
|
|
// Get an empty texture for rendering the video.
|
|
|
|
mTexture = TextureResource::get("");
|
|
|
|
|
2021-05-09 20:52:26 +00:00
|
|
|
// This is used for the audio and video synchronization.
|
|
|
|
mTimeReference = std::chrono::high_resolution_clock::now();
|
|
|
|
|
|
|
|
// Clear the video and audio frame queues.
|
|
|
|
std::queue<VideoFrame>().swap(mVideoFrameQueue);
|
|
|
|
std::queue<AudioFrame>().swap(mAudioFrameQueue);
|
|
|
|
|
2022-09-29 20:32:57 +00:00
|
|
|
std::string filePath {"file:" + mVideoPath};
|
2021-05-09 20:52:26 +00:00
|
|
|
|
|
|
|
// This will disable the FFmpeg logging, so comment this out if debug info is needed.
|
|
|
|
av_log_set_callback(nullptr);
|
|
|
|
|
|
|
|
// File operations and basic setup.
|
|
|
|
|
|
|
|
if (avformat_open_input(&mFormatContext, filePath.c_str(), nullptr, nullptr)) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::startVideoStream(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't open video file \""
|
|
|
|
<< mVideoPath << "\"";
|
2021-05-09 20:52:26 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (avformat_find_stream_info(mFormatContext, nullptr)) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::startVideoStream(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't read stream information from video file \""
|
|
|
|
<< mVideoPath << "\"";
|
2021-05-09 20:52:26 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mVideoStreamIndex = -1;
|
|
|
|
mAudioStreamIndex = -1;
|
|
|
|
|
|
|
|
// Video stream setup.
|
|
|
|
|
2021-11-08 16:58:36 +00:00
|
|
|
#if defined(VIDEO_HW_DECODING)
|
2022-09-29 20:32:57 +00:00
|
|
|
bool hwDecoding {Settings::getInstance()->getBool("VideoHardwareDecoding")};
|
2021-11-08 16:58:36 +00:00
|
|
|
#else
|
2022-09-29 20:32:57 +00:00
|
|
|
bool hwDecoding {false};
|
2021-07-14 17:13:25 +00:00
|
|
|
#endif
|
|
|
|
|
2022-03-06 13:38:19 +00:00
|
|
|
#if LIBAVUTIL_VERSION_MAJOR > 56
|
|
|
|
mVideoStreamIndex = av_find_best_stream(mFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1,
|
|
|
|
const_cast<const AVCodec**>(&mHardwareCodec), 0);
|
|
|
|
#else
|
2021-07-07 18:31:46 +00:00
|
|
|
mVideoStreamIndex =
|
2021-07-14 17:13:25 +00:00
|
|
|
av_find_best_stream(mFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &mHardwareCodec, 0);
|
2022-03-06 13:38:19 +00:00
|
|
|
#endif
|
2021-05-09 20:52:26 +00:00
|
|
|
|
|
|
|
if (mVideoStreamIndex < 0) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::startVideoStream(): "
|
2021-07-07 18:31:46 +00:00
|
|
|
"Couldn't retrieve video stream for file \""
|
|
|
|
<< mVideoPath << "\"";
|
2021-07-14 17:13:25 +00:00
|
|
|
avformat_close_input(&mFormatContext);
|
|
|
|
avformat_free_context(mFormatContext);
|
2021-05-09 20:52:26 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mVideoStream = mFormatContext->streams[mVideoStreamIndex];
|
|
|
|
mVideoWidth = mFormatContext->streams[mVideoStreamIndex]->codecpar->width;
|
|
|
|
mVideoHeight = mFormatContext->streams[mVideoStreamIndex]->codecpar->height;
|
|
|
|
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogDebug) << "VideoFFmpegComponent::startVideoStream(): "
|
2021-07-14 17:13:25 +00:00
|
|
|
<< "Playing video \"" << mVideoPath << "\" (codec: "
|
|
|
|
<< avcodec_get_name(
|
|
|
|
mFormatContext->streams[mVideoStreamIndex]->codecpar->codec_id)
|
|
|
|
<< ", decoder: " << (hwDecoding ? "hardware" : "software") << ")";
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-07-14 17:13:25 +00:00
|
|
|
if (hwDecoding)
|
|
|
|
mSWDecoder = decoderInitHW();
|
|
|
|
else
|
|
|
|
mSWDecoder = true;
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-07-14 17:13:25 +00:00
|
|
|
if (mSWDecoder) {
|
|
|
|
// The hardware decoder initialization failed, which can happen for a number of reasons.
|
|
|
|
if (hwDecoding) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogDebug)
|
|
|
|
<< "VideoFFmpegComponent::startVideoStream(): Hardware decoding failed, "
|
|
|
|
"falling back to software decoder";
|
2021-07-14 17:13:25 +00:00
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-07-14 17:13:25 +00:00
|
|
|
mVideoCodec =
|
|
|
|
const_cast<AVCodec*>(avcodec_find_decoder(mVideoStream->codecpar->codec_id));
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-07-14 17:13:25 +00:00
|
|
|
if (!mVideoCodec) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::startVideoStream(): "
|
2021-07-14 17:13:25 +00:00
|
|
|
"Couldn't find a suitable video codec for file \""
|
|
|
|
<< mVideoPath << "\"";
|
|
|
|
return;
|
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-07-14 17:13:25 +00:00
|
|
|
mVideoCodecContext = avcodec_alloc_context3(mVideoCodec);
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-07-14 17:13:25 +00:00
|
|
|
if (!mVideoCodecContext) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::startVideoStream(): "
|
2021-07-14 17:13:25 +00:00
|
|
|
"Couldn't allocate video codec context for file \""
|
|
|
|
<< mVideoPath << "\"";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mVideoCodec->capabilities & AV_CODEC_CAP_TRUNCATED)
|
|
|
|
mVideoCodecContext->flags |= AV_CODEC_FLAG_TRUNCATED;
|
|
|
|
|
|
|
|
if (avcodec_parameters_to_context(mVideoCodecContext, mVideoStream->codecpar)) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::startVideoStream(): "
|
2021-07-14 17:13:25 +00:00
|
|
|
"Couldn't fill the video codec context parameters for file \""
|
|
|
|
<< mVideoPath << "\"";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (avcodec_open2(mVideoCodecContext, mVideoCodec, nullptr)) {
|
2022-02-19 16:04:23 +00:00
|
|
|
LOG(LogError) << "VideoFFmpegComponent::startVideoStream(): "
|
2021-07-14 17:13:25 +00:00
|
|
|
"Couldn't initialize the video codec context for file \""
|
|
|
|
<< mVideoPath << "\"";
|
|
|
|
return;
|
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
|
2021-07-14 17:13:25 +00:00
|
|
|
// Audio stream setup, optional as some videos do not have any audio tracks.
|
2022-02-19 20:45:31 +00:00
|
|
|
// Audio can also be disabled per video via the theme configuration.
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-02-19 20:45:31 +00:00
|
|
|
if (mPlayAudio) {
|
|
|
|
mAudioStreamIndex =
|
|
|
|
av_find_best_stream(mFormatContext, AVMEDIA_TYPE_AUDIO, -1, -1, nullptr, 0);
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-02-19 20:45:31 +00:00
|
|
|
if (mAudioStreamIndex < 0) {
|
|
|
|
LOG(LogDebug) << "VideoFFmpegComponent::startVideoStream(): "
|
|
|
|
"File does not seem to contain any audio streams";
|
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-02-19 20:45:31 +00:00
|
|
|
if (mAudioStreamIndex >= 0) {
|
|
|
|
mAudioStream = mFormatContext->streams[mAudioStreamIndex];
|
|
|
|
mAudioCodec =
|
|
|
|
const_cast<AVCodec*>(avcodec_find_decoder(mAudioStream->codecpar->codec_id));
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-02-19 20:45:31 +00:00
|
|
|
if (!mAudioCodec) {
|
|
|
|
LOG(LogError) << "Couldn't find a suitable audio codec for file \""
|
|
|
|
<< mVideoPath << "\"";
|
|
|
|
return;
|
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-02-19 20:45:31 +00:00
|
|
|
mAudioCodecContext = avcodec_alloc_context3(mAudioCodec);
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-02-19 20:45:31 +00:00
|
|
|
if (mAudioCodec->capabilities & AV_CODEC_CAP_TRUNCATED)
|
|
|
|
mAudioCodecContext->flags |= AV_CODEC_FLAG_TRUNCATED;
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-02-19 20:45:31 +00:00
|
|
|
// Some formats want separate stream headers.
|
|
|
|
if (mAudioCodecContext->flags & AVFMT_GLOBALHEADER)
|
|
|
|
mAudioCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-02-19 20:45:31 +00:00
|
|
|
if (avcodec_parameters_to_context(mAudioCodecContext, mAudioStream->codecpar)) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::startVideoStream(): "
|
|
|
|
"Couldn't fill the audio codec context parameters for file \""
|
|
|
|
<< mVideoPath << "\"";
|
|
|
|
return;
|
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2022-02-19 20:45:31 +00:00
|
|
|
if (avcodec_open2(mAudioCodecContext, mAudioCodec, nullptr)) {
|
|
|
|
LOG(LogError) << "VideoFFmpegComponent::startVideoStream(): "
|
|
|
|
"Couldn't initialize the audio codec context for file \""
|
|
|
|
<< mVideoPath << "\"";
|
|
|
|
return;
|
|
|
|
}
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mVideoTimeBase = 1.0l / av_q2d(mVideoStream->avg_frame_rate);
|
2021-05-29 08:58:51 +00:00
|
|
|
|
|
|
|
// Set some reasonable target queue sizes (buffers).
|
|
|
|
mVideoTargetQueueSize = static_cast<int>(av_q2d(mVideoStream->avg_frame_rate) / 2.0l);
|
2021-07-07 18:31:46 +00:00
|
|
|
if (mAudioStreamIndex >= 0)
|
2022-09-30 21:20:36 +00:00
|
|
|
mAudioTargetQueueSize = mAudioStream->codecpar->CHANNELS * 15;
|
2021-05-09 20:52:26 +00:00
|
|
|
else
|
2021-05-29 08:58:51 +00:00
|
|
|
mAudioTargetQueueSize = 30;
|
2021-05-09 20:52:26 +00:00
|
|
|
|
|
|
|
mPacket = av_packet_alloc();
|
|
|
|
mVideoFrame = av_frame_alloc();
|
2021-05-29 08:58:51 +00:00
|
|
|
mVideoFrameResampled = av_frame_alloc();
|
2021-05-09 20:52:26 +00:00
|
|
|
mAudioFrame = av_frame_alloc();
|
2021-05-29 08:58:51 +00:00
|
|
|
mAudioFrameResampled = av_frame_alloc();
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
// Resize the video surface, which is needed both for the gamelist view and for
|
|
|
|
// the video screeensaver.
|
2021-05-09 20:52:26 +00:00
|
|
|
resize();
|
|
|
|
|
|
|
|
// Calculate pillarbox/letterbox sizes.
|
|
|
|
calculateBlackRectangle();
|
|
|
|
|
2021-05-29 08:58:51 +00:00
|
|
|
mFadeIn = 0.0f;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-29 18:18:33 +00:00
|
|
|
void VideoFFmpegComponent::stopVideoPlayer(bool muteAudio)
|
2021-05-09 20:52:26 +00:00
|
|
|
{
|
2022-09-29 18:18:33 +00:00
|
|
|
if (muteAudio)
|
|
|
|
muteVideoPlayer();
|
2022-02-19 16:04:23 +00:00
|
|
|
|
2021-05-09 20:52:26 +00:00
|
|
|
mIsPlaying = false;
|
|
|
|
mIsActuallyPlaying = false;
|
2022-02-19 16:04:23 +00:00
|
|
|
mPaused = false;
|
2021-05-09 20:52:26 +00:00
|
|
|
mEndOfVideo = false;
|
2021-06-22 16:17:06 +00:00
|
|
|
mTexture.reset();
|
2021-05-09 20:52:26 +00:00
|
|
|
|
2021-05-11 15:35:55 +00:00
|
|
|
if (mFrameProcessingThread) {
|
2021-05-29 08:58:51 +00:00
|
|
|
if (mWindow->getVideoPlayerCount() == 0)
|
2021-11-15 21:43:06 +00:00
|
|
|
AudioManager::getInstance().muteStream();
|
2021-05-11 15:35:55 +00:00
|
|
|
// Wait for the thread execution to complete.
|
|
|
|
mFrameProcessingThread->join();
|
2021-05-12 20:49:24 +00:00
|
|
|
mFrameProcessingThread.reset();
|
2021-05-29 08:58:51 +00:00
|
|
|
mOutputAudio.clear();
|
2021-05-11 15:35:55 +00:00
|
|
|
}
|
|
|
|
|
2021-05-09 20:52:26 +00:00
|
|
|
// Clear the video and audio frame queues.
|
|
|
|
std::queue<VideoFrame>().swap(mVideoFrameQueue);
|
|
|
|
std::queue<AudioFrame>().swap(mAudioFrameQueue);
|
|
|
|
|
2021-07-14 17:13:25 +00:00
|
|
|
// Clear the audio buffer.
|
2022-01-04 23:08:50 +00:00
|
|
|
if (AudioManager::sAudioDevice != 0)
|
|
|
|
AudioManager::getInstance().clearStream();
|
2021-07-14 17:13:25 +00:00
|
|
|
|
2021-05-09 20:52:26 +00:00
|
|
|
if (mFormatContext) {
|
|
|
|
av_frame_free(&mVideoFrame);
|
2021-05-29 08:58:51 +00:00
|
|
|
av_frame_free(&mVideoFrameResampled);
|
2021-05-09 20:52:26 +00:00
|
|
|
av_frame_free(&mAudioFrame);
|
2021-05-29 08:58:51 +00:00
|
|
|
av_frame_free(&mAudioFrameResampled);
|
2021-05-09 20:52:26 +00:00
|
|
|
av_packet_unref(mPacket);
|
|
|
|
av_packet_free(&mPacket);
|
2021-07-14 17:13:25 +00:00
|
|
|
av_buffer_unref(&mHwContext);
|
2021-05-09 20:52:26 +00:00
|
|
|
avcodec_free_context(&mVideoCodecContext);
|
|
|
|
avcodec_free_context(&mAudioCodecContext);
|
|
|
|
avformat_close_input(&mFormatContext);
|
|
|
|
avformat_free_context(mFormatContext);
|
|
|
|
mVideoCodecContext = nullptr;
|
|
|
|
mAudioCodecContext = nullptr;
|
|
|
|
mFormatContext = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-19 16:04:23 +00:00
|
|
|
void VideoFFmpegComponent::pauseVideoPlayer()
|
2021-05-09 20:52:26 +00:00
|
|
|
{
|
2022-02-19 16:04:23 +00:00
|
|
|
muteVideoPlayer();
|
|
|
|
mPaused = true;
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void VideoFFmpegComponent::handleLooping()
|
|
|
|
{
|
2021-11-16 16:49:05 +00:00
|
|
|
if (mIsPlaying && mEndOfVideo) {
|
2021-05-09 20:52:26 +00:00
|
|
|
// If the screensaver video swap time is set to 0, it means we should
|
|
|
|
// skip to the next game when the video has finished playing.
|
|
|
|
if (mScreensaverMode &&
|
2021-07-07 18:31:46 +00:00
|
|
|
Settings::getInstance()->getInt("ScreensaverSwapVideoTimeout") == 0) {
|
2021-05-09 20:52:26 +00:00
|
|
|
mWindow->screensaverTriggerNextGame();
|
|
|
|
}
|
|
|
|
else {
|
2022-02-19 16:04:23 +00:00
|
|
|
stopVideoPlayer();
|
|
|
|
startVideoStream();
|
2021-05-09 20:52:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-02-19 16:04:23 +00:00
|
|
|
|
|
|
|
void VideoFFmpegComponent::muteVideoPlayer()
|
|
|
|
{
|
|
|
|
if (AudioManager::sAudioDevice != 0) {
|
|
|
|
AudioManager::getInstance().clearStream();
|
|
|
|
AudioManager::getInstance().muteStream();
|
|
|
|
}
|
|
|
|
}
|