Browse Source

Merge pull request #3185

initramfs (26):
      feat(video): redesign and improve VideoFrame class
      fix(video): fix CoreAV and VideoSurface to conform to new VideoFrame
      refactor(video): rename and make the frame alignment propety public
      fix(video): fix memory leak caused by unfreed buffers in CoreVideoSource
      fix(video): fix slanted video when video size is not divisible by 8
      refactor(video): use a new ToxAVFrame structure instead of vpx_image
      refactor(video): static cast video dimensions to suppress warnings
      feat(video): adds an ID parameter to the VideoSource class
      refactor(video): internalize frame reference counting
      feat(video): add accessor functions for sourceID and frameID
      refactor(video): make type aliases public
      refactor(video): use generics to simply VideoFrame conversion functions
      refactor(video): rename ToxAVFrame to ToxYUVFrame and add documentation
      refactor(video): update documentation to match new format (issue #3559)
      refactor(videoframe): correct mistakes in commit documentation format
      fix(video): fix a use-after-free with VideoFrame
      fix(video): added declaration for missing biglock in CameraSource
      docs(video): remove old unnecessary comment pertaining to removed code
      fix(video): fix invalid VideoSource ID allocation
      fix(video): specify color ranges for pixel formats that are not YUV
      fix(video): use a QReadWriteLock to manage camera access
      fix(video): force the use of non-deprecated pixel formats for YUV
      refactor(video): update code and documentation to honour QSize validity
      refactor(videoframe): move all inline/template functions into source
      fix(video): guard storeVideoFrame() against freeing in-use memory
      feat(video): add a isValid() function to ToxTUVFrame
reviewable/pr3602/r1
sudden6 9 years ago
parent
commit
2045585c77
No known key found for this signature in database
GPG Key ID: 279509B499E032B9
  1. 1
      qtox.pro
  2. 14
      src/core/coreav.cpp
  3. 121
      src/video/camerasource.cpp
  4. 6
      src/video/camerasource.h
  5. 22
      src/video/corevideosource.cpp
  6. 890
      src/video/videoframe.cpp
  7. 138
      src/video/videoframe.h
  8. 31
      src/video/videosource.cpp
  9. 39
      src/video/videosource.h
  10. 2
      src/video/videosurface.cpp

1
qtox.pro

@ -415,6 +415,7 @@ SOURCES += \
src/persistence/db/rawdatabase.cpp \ src/persistence/db/rawdatabase.cpp \
src/persistence/history.cpp \ src/persistence/history.cpp \
src/video/videoframe.cpp \ src/video/videoframe.cpp \
src/video/videosource.cpp \
src/video/cameradevice.cpp \ src/video/cameradevice.cpp \
src/video/camerasource.cpp \ src/video/camerasource.cpp \
src/video/corevideosource.cpp \ src/video/corevideosource.cpp \

14
src/core/coreav.cpp

@ -360,12 +360,10 @@ void CoreAV::sendCallVideo(uint32_t callId, std::shared_ptr<VideoFrame> vframe)
call.nullVideoBitrate = false; call.nullVideoBitrate = false;
} }
// This frame shares vframe's buffers, we don't call vpx_img_free but just delete it ToxYUVFrame frame = vframe->toToxYUVFrame();
vpx_image* frame = vframe->toVpxImage();
if (frame->fmt == VPX_IMG_FMT_NONE) if(!frame)
{ {
qWarning() << "Invalid frame";
vpx_img_free(frame);
return; return;
} }
@ -374,8 +372,8 @@ void CoreAV::sendCallVideo(uint32_t callId, std::shared_ptr<VideoFrame> vframe)
TOXAV_ERR_SEND_FRAME err; TOXAV_ERR_SEND_FRAME err;
int retries = 0; int retries = 0;
do { do {
if (!toxav_video_send_frame(toxav, callId, frame->d_w, frame->d_h, if (!toxav_video_send_frame(toxav, callId, frame.width, frame.height,
frame->planes[0], frame->planes[1], frame->planes[2], &err)) frame.y, frame.u, frame.v, &err))
{ {
if (err == TOXAV_ERR_SEND_FRAME_SYNC) if (err == TOXAV_ERR_SEND_FRAME_SYNC)
{ {
@ -390,8 +388,6 @@ void CoreAV::sendCallVideo(uint32_t callId, std::shared_ptr<VideoFrame> vframe)
} while (err == TOXAV_ERR_SEND_FRAME_SYNC && retries < 5); } while (err == TOXAV_ERR_SEND_FRAME_SYNC && retries < 5);
if (err == TOXAV_ERR_SEND_FRAME_SYNC) if (err == TOXAV_ERR_SEND_FRAME_SYNC)
qDebug() << "toxav_video_send_frame error: Lock busy, dropping frame"; qDebug() << "toxav_video_send_frame error: Lock busy, dropping frame";
vpx_img_free(frame);
} }
void CoreAV::micMuteToggle(uint32_t callId) void CoreAV::micMuteToggle(uint32_t callId)

121
src/video/camerasource.cpp

@ -23,7 +23,8 @@ extern "C" {
#include <libavformat/avformat.h> #include <libavformat/avformat.h>
#include <libswscale/swscale.h> #include <libswscale/swscale.h>
} }
#include <QMutexLocker> #include <QWriteLocker>
#include <QReadLocker>
#include <QDebug> #include <QDebug>
#include <QtConcurrent/QtConcurrentRun> #include <QtConcurrent/QtConcurrentRun>
#include <memory> #include <memory>
@ -74,9 +75,6 @@ open but the device closed if there are zero subscribers.
@var QMutex CameraSource::biglock @var QMutex CameraSource::biglock
@brief True when locked. Faster than mutexes for video decoding. @brief True when locked. Faster than mutexes for video decoding.
@var QMutex CameraSource::freelistLock
@brief True when locked. Faster than mutexes for video decoding.
@var std::atomic_bool CameraSource::streamBlocker @var std::atomic_bool CameraSource::streamBlocker
@brief Holds the streaming thread still when true @brief Holds the streaming thread still when true
@ -141,12 +139,10 @@ void CameraSource::open(const QString& deviceName)
void CameraSource::open(const QString& DeviceName, VideoMode Mode) void CameraSource::open(const QString& DeviceName, VideoMode Mode)
{ {
streamBlocker = true; QWriteLocker locker{&streamMutex};
QMutexLocker l{&biglock};
if (DeviceName == deviceName && Mode == mode) if (DeviceName == deviceName && Mode == mode)
{ {
streamBlocker = false;
return; return;
} }
@ -159,8 +155,6 @@ void CameraSource::open(const QString& DeviceName, VideoMode Mode)
if (subscriptions && _isOpen) if (subscriptions && _isOpen)
openDevice(); openDevice();
streamBlocker = false;
} }
/** /**
@ -180,20 +174,15 @@ bool CameraSource::isOpen()
CameraSource::~CameraSource() CameraSource::~CameraSource()
{ {
QMutexLocker l{&biglock}; QWriteLocker locker{&streamMutex};
if (!_isOpen) if (!_isOpen)
{
return; return;
}
// Free all remaining VideoFrame // Free all remaining VideoFrame
// Locking must be done precisely this way to avoid races VideoFrame::untrackFrames(id, true);
for (int i = 0; i < freelist.size(); i++)
{
std::shared_ptr<VideoFrame> vframe = freelist[i].lock();
if (!vframe)
continue;
vframe->releaseFrame();
}
if (cctx) if (cctx)
avcodec_free_context(&cctx); avcodec_free_context(&cctx);
@ -208,9 +197,7 @@ CameraSource::~CameraSource()
device = nullptr; device = nullptr;
} }
// Memfence so the stream thread sees a nullptr device locker.unlock();
std::atomic_thread_fence(std::memory_order_release);
l.unlock();
// Synchronize with our stream thread // Synchronize with our stream thread
while (streamFuture.isRunning()) while (streamFuture.isRunning())
@ -219,7 +206,7 @@ CameraSource::~CameraSource()
bool CameraSource::subscribe() bool CameraSource::subscribe()
{ {
QMutexLocker l{&biglock}; QWriteLocker locker{&streamMutex};
if (!_isOpen) if (!_isOpen)
{ {
@ -238,18 +225,13 @@ bool CameraSource::subscribe()
device = nullptr; device = nullptr;
cctx = cctxOrig = nullptr; cctx = cctxOrig = nullptr;
videoStreamIndex = -1; videoStreamIndex = -1;
// Memfence so the stream thread sees a nullptr device
std::atomic_thread_fence(std::memory_order_release);
return false; return false;
} }
} }
void CameraSource::unsubscribe() void CameraSource::unsubscribe()
{ {
streamBlocker = true; QWriteLocker locker{&streamMutex};
QMutexLocker l{&biglock};
streamBlocker = false;
if (!_isOpen) if (!_isOpen)
{ {
@ -266,11 +248,6 @@ void CameraSource::unsubscribe()
if (subscriptions - 1 == 0) if (subscriptions - 1 == 0)
{ {
closeDevice(); closeDevice();
l.unlock();
// Synchronize with our stream thread
while (streamFuture.isRunning())
QThread::yieldCurrentThread();
} }
else else
{ {
@ -372,20 +349,10 @@ bool CameraSource::openDevice()
*/ */
void CameraSource::closeDevice() void CameraSource::closeDevice()
{ {
qDebug() << "Closing device "<<deviceName; qDebug() << "Closing device " << deviceName;
// Free all remaining VideoFrame // Free all remaining VideoFrame
// Locking must be done precisely this way to avoid races VideoFrame::untrackFrames(id, true);
for (int i = 0; i < freelist.size(); i++)
{
std::shared_ptr<VideoFrame> vframe = freelist[i].lock();
if (!vframe)
continue;
vframe->releaseFrame();
}
freelist.clear();
freelist.squeeze();
// Free our resources and close the device // Free our resources and close the device
videoStreamIndex = -1; videoStreamIndex = -1;
@ -394,8 +361,6 @@ void CameraSource::closeDevice()
cctxOrig = nullptr; cctxOrig = nullptr;
while (device && !device->close()) {} while (device && !device->close()) {}
device = nullptr; device = nullptr;
// Memfence so the stream thread sees a nullptr device
std::atomic_thread_fence(std::memory_order_release);
} }
/** /**
@ -410,8 +375,6 @@ void CameraSource::stream()
if (!frame) if (!frame)
return; return;
frame->opaque = nullptr;
AVPacket packet; AVPacket packet;
if (av_read_frame(device->context, &packet) < 0) if (av_read_frame(device->context, &packet) < 0)
return; return;
@ -425,14 +388,8 @@ void CameraSource::stream()
if (!frameFinished) if (!frameFinished)
return; return;
freelistLock.lock(); VideoFrame* vframe = new VideoFrame(id, frame);
emit frameAvailable(vframe->trackFrame());
int freeFreelistSlot = getFreelistSlotLockless();
auto frameFreeCb = std::bind(&CameraSource::freelistCallback, this, freeFreelistSlot);
std::shared_ptr<VideoFrame> vframe = std::make_shared<VideoFrame>(frame, frameFreeCb);
freelist.append(vframe);
freelistLock.unlock();
emit frameAvailable(vframe);
} }
// Free the packet that was allocated by av_read_frame // Free the packet that was allocated by av_read_frame
@ -441,56 +398,14 @@ void CameraSource::stream()
forever forever
{ {
biglock.lock(); QReadLocker locker{&streamMutex};
// When a thread makes device null, it releases it, so we acquire here // Exit if device is no longer valid
std::atomic_thread_fence(std::memory_order_acquire); if(!device)
if (!device)
{ {
biglock.unlock(); break;
return;
} }
streamLoop(); streamLoop();
// Give a chance to other functions to pick up the lock if needed
biglock.unlock();
while (streamBlocker)
QThread::yieldCurrentThread();
QThread::yieldCurrentThread();
} }
} }
/**
@brief CameraSource::freelistCallback
@param freelistIndex
All VideoFrames must be deleted or released before we can close the device
or the device will forcibly free them, and then ~VideoFrame() will double free.
In theory very careful coding from our users could ensure all VideoFrames
die before unsubscribing, even the ones currently in flight in the metatype system.
But that's just asking for trouble and mysterious crashes, so we'll just
maintain a freelist and have all VideoFrames tell us when they die so we can forget them.
*/
void CameraSource::freelistCallback(int freelistIndex)
{
QMutexLocker l{&freelistLock};
freelist[freelistIndex].reset();
}
/**
@brief Get the index of a free slot in the freelist.
@note Callers must hold the freelistLock.
@return Index of a free slot.
*/
int CameraSource::getFreelistSlotLockless()
{
int size = freelist.size();
for (int i = 0; i < size; ++i)
if (freelist[i].expired())
return i;
freelist.resize(size + (size>>1) + 4); // Arbitrary growth strategy, should work well
return size;
}

6
src/video/camerasource.h

@ -24,6 +24,7 @@
#include <QString> #include <QString>
#include <QFuture> #include <QFuture>
#include <QVector> #include <QVector>
#include <QReadWriteLock>
#include <atomic> #include <atomic>
#include "src/video/videosource.h" #include "src/video/videosource.h"
#include "src/video/videomode.h" #include "src/video/videomode.h"
@ -55,20 +56,17 @@ private:
CameraSource(); CameraSource();
~CameraSource(); ~CameraSource();
void stream(); void stream();
void freelistCallback(int freelistIndex);
int getFreelistSlotLockless();
bool openDevice(); bool openDevice();
void closeDevice(); void closeDevice();
private: private:
QVector<std::weak_ptr<VideoFrame>> freelist;
QFuture<void> streamFuture; QFuture<void> streamFuture;
QString deviceName; QString deviceName;
CameraDevice* device; CameraDevice* device;
VideoMode mode; VideoMode mode;
AVCodecContext* cctx, *cctxOrig; AVCodecContext* cctx, *cctxOrig;
int videoStreamIndex; int videoStreamIndex;
QMutex biglock, freelistLock; QReadWriteLock streamMutex;
std::atomic_bool _isOpen; std::atomic_bool _isOpen;
std::atomic_bool streamBlocker; std::atomic_bool streamBlocker;
std::atomic_int subscriptions; std::atomic_int subscriptions;

22
src/video/corevideosource.cpp

@ -70,22 +70,19 @@ void CoreVideoSource::pushFrame(const vpx_image_t* vpxframe)
AVFrame* avframe = av_frame_alloc(); AVFrame* avframe = av_frame_alloc();
if (!avframe) if (!avframe)
return; return;
avframe->width = width; avframe->width = width;
avframe->height = height; avframe->height = height;
avframe->format = AV_PIX_FMT_YUV420P; avframe->format = AV_PIX_FMT_YUV420P;
int imgBufferSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width, height, 1); int bufSize = av_image_alloc(avframe->data, avframe->linesize,
uint8_t* buf = (uint8_t*)av_malloc(imgBufferSize); width, height,
if (!buf) static_cast<AVPixelFormat>(AV_PIX_FMT_YUV420P), VideoFrame::dataAlignment);
{
if(bufSize < 0){
av_frame_free(&avframe); av_frame_free(&avframe);
return; return;
} }
avframe->opaque = buf;
uint8_t** data = avframe->data;
int* linesize = avframe->linesize;
av_image_fill_arrays(data, linesize, buf, AV_PIX_FMT_YUV420P, width, height, 1);
for (int i = 0; i < 3; i++) for (int i = 0; i < 3; i++)
{ {
@ -96,14 +93,13 @@ void CoreVideoSource::pushFrame(const vpx_image_t* vpxframe)
for (int j = 0; j < size; j++) for (int j = 0; j < size; j++)
{ {
uint8_t *dst = avframe->data[i] + dstStride * j; uint8_t* dst = avframe->data[i] + dstStride * j;
uint8_t *src = vpxframe->planes[i] + srcStride * j; uint8_t* src = vpxframe->planes[i] + srcStride * j;
memcpy(dst, src, minStride); memcpy(dst, src, minStride);
} }
} }
vframe = std::make_shared<VideoFrame>(avframe); vframe = std::make_shared<VideoFrame>(id, avframe, true);
emit frameAvailable(vframe); emit frameAvailable(vframe);
} }

890
src/video/videoframe.cpp

@ -17,294 +17,790 @@
along with qTox. If not, see <http://www.gnu.org/licenses/>. along with qTox. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <iostream> #include "videoframe.h"
#include <QMutexLocker> extern "C"{
#include <QDebug>
#include <vpx/vpx_image.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h> #include <libavutil/imgutils.h>
#include <libswscale/swscale.h> #include <libswscale/swscale.h>
} }
#include "videoframe.h"
#include "camerasource.h"
/** /**
@class VideoFrame * @struct ToxYUVFrame
* @brief A simple structure to represent a ToxYUV video frame (corresponds to a frame encoded
VideoFrame takes ownership of an AVFrame* and allows fast conversions to other formats * under format: AV_PIX_FMT_YUV420P [FFmpeg] or VPX_IMG_FMT_I420 [WebM]).
Ownership of all video frame buffers is kept by the VideoFrame, even after conversion *
All references to the frame data become invalid when the VideoFrame is deleted * This structure exists for convenience and code clarity when ferrying YUV420 frames from one
We try to avoid pixel format conversions as much as possible, at the cost of some memory * source to another. The buffers pointed to by the struct should not be owned by the struct nor
All methods are thread-safe. If provided freelistCallback will be called by the destructor, * should they be freed from the struct, instead this struct functions only as a simple alias to a
unless releaseFrame was called in between. * more complicated frame container like AVFrame.
*/ *
* The creation of this structure was done to replace existing code which mis-used vpx_image
* structs when passing frame data to toxcore.
*
*
* @class VideoFrame
* @brief An ownernship and management class for AVFrames.
*
* VideoFrame takes ownership of an AVFrame* and allows fast conversions to other formats.
* Ownership of all video frame buffers is kept by the VideoFrame, even after conversion. All
* references to the frame data become invalid when the VideoFrame is deleted. We try to avoid
* pixel format conversions as much as possible, at the cost of some memory.
*
* Every function in this class is thread safe apart from concurrent construction and deletion of
* the object.
*
* This class uses the phrase "frame alignment" to specify the property that each frame's width is
* equal to it's maximum linesize. Note: this is NOT "data alignment" which specifies how allocated
* buffers are aligned in memory. Though internally the two are related, unless otherwise specified
* all instances of the term "alignment" exposed from public functions refer to frame alignment.
*
* Frame alignment is an important concept because ToxAV does not support frames with linesizes not
* directly equal to the width.
*
*
* @var VideoFrame::dataAlignment
* @brief Data alignment parameter used to populate AVFrame buffers.
*
* This field is public in effort to standardize the data alignment parameter for all AVFrame
* allocations.
*
* It's currently set to 32-byte alignment for AVX2 support.
*
*
* @class FrameBufferKey
* @brief A class representing a structure that stores frame properties to be used as the key
* value for a std::unordered_map.
*/
// Initialize static fields
VideoFrame::AtomicIDType VideoFrame::frameIDs {0};
std::unordered_map<VideoFrame::IDType, QMutex> VideoFrame::mutexMap {};
std::unordered_map<VideoFrame::IDType, std::unordered_map<VideoFrame::IDType, std::weak_ptr<VideoFrame>>> VideoFrame::refsMap {};
QReadWriteLock VideoFrame::refsLock {};
VideoFrame::VideoFrame(AVFrame* frame, int w, int h, int fmt, std::function<void()> freelistCallback) /**
: freelistCallback{freelistCallback}, * @brief Constructs a new instance of a VideoFrame, sourced by a given AVFrame pointer.
frameOther{nullptr}, frameYUV420{nullptr}, frameRGB24{nullptr}, *
width{w}, height{h}, pixFmt{fmt} * @param sourceID the VideoSource's ID to track the frame under.
* @param sourceFrame the source AVFrame pointer to use, must be valid.
* @param dimensions the dimensions of the AVFrame, obtained from the AVFrame if not given.
* @param pixFmt the pixel format of the AVFrame, obtained from the AVFrame if not given.
* @param freeSourceFrame whether to free the source frame buffers or not.
*/
VideoFrame::VideoFrame(IDType sourceID, AVFrame* sourceFrame, QRect dimensions, int pixFmt, bool freeSourceFrame)
: frameID(frameIDs++),
sourceID(sourceID),
sourceDimensions(dimensions),
sourceFrameKey(getFrameKey(dimensions.size(), pixFmt, sourceFrame->linesize[0])),
freeSourceFrame(freeSourceFrame)
{ {
// Silences pointless swscale warning spam
// See libswscale/utils.c:1153 @ 74f0bd3 // We override the pixel format in the case a deprecated one is used
frame->color_range = AVCOL_RANGE_MPEG; switch(pixFmt)
if (pixFmt == AV_PIX_FMT_YUVJ420P) {
pixFmt = AV_PIX_FMT_YUV420P; case AV_PIX_FMT_YUVJ420P:
else if (pixFmt == AV_PIX_FMT_YUVJ411P) {
pixFmt = AV_PIX_FMT_YUV411P; sourcePixelFormat = AV_PIX_FMT_YUV420P;
else if (pixFmt == AV_PIX_FMT_YUVJ422P) sourceFrame->color_range = AVCOL_RANGE_MPEG;
pixFmt = AV_PIX_FMT_YUV422P; break;
else if (pixFmt == AV_PIX_FMT_YUVJ444P)
pixFmt = AV_PIX_FMT_YUV444P;
else if (pixFmt == AV_PIX_FMT_YUVJ440P)
pixFmt = AV_PIX_FMT_YUV440P;
else
frame->color_range = AVCOL_RANGE_UNSPECIFIED;
if (pixFmt == AV_PIX_FMT_YUV420P) {
frameYUV420 = frame;
} else if (pixFmt == AV_PIX_FMT_RGB24) {
frameRGB24 = frame;
} else {
frameOther = frame;
} }
}
VideoFrame::VideoFrame(AVFrame* frame, std::function<void()> freelistCallback) case AV_PIX_FMT_YUVJ411P:
: VideoFrame{frame, frame->width, frame->height, frame->format, freelistCallback} {
{ sourcePixelFormat = AV_PIX_FMT_YUV411P;
sourceFrame->color_range = AVCOL_RANGE_MPEG;
break;
}
case AV_PIX_FMT_YUVJ422P:
{
sourcePixelFormat = AV_PIX_FMT_YUV422P;
sourceFrame->color_range = AVCOL_RANGE_MPEG;
break;
}
case AV_PIX_FMT_YUVJ444P:
{
sourcePixelFormat = AV_PIX_FMT_YUV444P;
sourceFrame->color_range = AVCOL_RANGE_MPEG;
break;
}
case AV_PIX_FMT_YUVJ440P:
{
sourcePixelFormat = AV_PIX_FMT_YUV440P;
sourceFrame->color_range = AVCOL_RANGE_MPEG;
break;
}
default:{
sourcePixelFormat = pixFmt;
sourceFrame->color_range = AVCOL_RANGE_UNSPECIFIED;
}
}
frameBuffer[sourceFrameKey] = sourceFrame;
} }
VideoFrame::VideoFrame(AVFrame* frame) VideoFrame::VideoFrame(IDType sourceID, AVFrame* sourceFrame, bool freeSourceFrame)
: VideoFrame{frame, frame->width, frame->height, frame->format, nullptr} : VideoFrame(sourceID, sourceFrame, QRect {0, 0, sourceFrame->width, sourceFrame->height}, sourceFrame->format, freeSourceFrame){}
/**
* @brief Destructor for VideoFrame.
*/
VideoFrame::~VideoFrame()
{ {
// Release frame
frameLock.lockForWrite();
deleteFrameBuffer();
frameLock.unlock();
// Delete tracked reference
refsLock.lockForRead();
if(refsMap.count(sourceID) > 0)
{
QMutex& sourceMutex = mutexMap[sourceID];
sourceMutex.lock();
refsMap[sourceID].erase(frameID);
sourceMutex.unlock();
}
refsLock.unlock();
} }
/** /**
@brief VideoFrame constructor. Disable copy. * @brief Returns the validity of this VideoFrame.
@note Use a shared_ptr if you need copies. *
*/ * A VideoFrame is valid if it manages at least one AVFrame. A VideoFrame can be invalidated
VideoFrame::~VideoFrame() * by calling releaseFrame() on it.
*
* @return true if the VideoFrame is valid, false otherwise.
*/
bool VideoFrame::isValid()
{ {
if (freelistCallback) frameLock.lockForRead();
freelistCallback(); bool retValue = frameBuffer.size() > 0;
frameLock.unlock();
releaseFrameLockless(); return retValue;
} }
/** /**
@brief Converts the VideoFrame to a QImage that shares our internal video buffer. * @brief Causes the VideoFrame class to maintain an internal reference for the frame.
@param size Size of resulting image. *
@return Converted image to RGB24 color model. * The internal reference is managed via a std::weak_ptr such that it doesn't inhibit
*/ * destruction of the object once all external references are no longer reachable.
QImage VideoFrame::toQImage(QSize size) *
* @return a std::shared_ptr holding a reference to this frame.
*/
std::shared_ptr<VideoFrame> VideoFrame::trackFrame()
{ {
if (!convertToRGB24(size)) // Add frame to tracked reference list
return QImage(); refsLock.lockForRead();
if(refsMap.count(sourceID) == 0)
{
// We need to add a new source to our reference map, obtain write lock
refsLock.unlock();
refsLock.lockForWrite();
}
QMutex& sourceMutex = mutexMap[sourceID];
sourceMutex.lock();
std::shared_ptr<VideoFrame> ret {this};
refsMap[sourceID][frameID] = ret;
QMutexLocker locker(&biglock); sourceMutex.unlock();
refsLock.unlock();
return QImage(*frameRGB24->data, frameRGB24->width, frameRGB24->height, *frameRGB24->linesize, QImage::Format_RGB888); return ret;
} }
/** /**
@brief Converts the VideoFrame to a vpx_image_t. * @brief Untracks all the frames for the given VideoSource, releasing them if specified.
Converts the VideoFrame to a vpx_image_t that shares our internal video buffer. *
@return Converted image to vpx_image format. * This function causes all internally tracked frames for the given VideoSource to be dropped.
*/ * If the releaseFrames option is set to true, the frames are sequentially released on the
vpx_image *VideoFrame::toVpxImage() * caller's thread in an unspecified order.
*
* @param sourceID the ID of the VideoSource to untrack frames from.
* @param releaseFrames true to release the frames as necessary, false otherwise. Defaults to
* false.
*/
void VideoFrame::untrackFrames(const VideoFrame::IDType& sourceID, bool releaseFrames)
{ {
vpx_image* img = vpx_img_alloc(nullptr, VPX_IMG_FMT_I420, width, height, 0); refsLock.lockForWrite();
if(refsMap.count(sourceID) == 0)
{
// No tracking reference exists for source, simply return
refsLock.unlock();
if (!convertToYUV420()) return;
return img; }
for (int i = 0; i < 3; i++) if(releaseFrames)
{ {
int dstStride = img->stride[i]; QMutex& sourceMutex = mutexMap[sourceID];
int srcStride = frameYUV420->linesize[i];
int minStride = std::min(dstStride, srcStride); sourceMutex.lock();
int size = (i == 0) ? img->d_h : img->d_h / 2;
for (int j = 0; j < size; j++) for(auto& frameIterator : refsMap[sourceID])
{ {
uint8_t *dst = img->planes[i] + dstStride * j; std::shared_ptr<VideoFrame> frame = frameIterator.second.lock();
uint8_t *src = frameYUV420->data[i] + srcStride * j;
memcpy(dst, src, minStride); if(frame)
{
frame->releaseFrame();
}
} }
sourceMutex.unlock();
} }
return img;
refsMap[sourceID].clear();
mutexMap.erase(sourceID);
refsMap.erase(sourceID);
refsLock.unlock();
} }
bool VideoFrame::convertToRGB24(QSize size) /**
* @brief Releases all frames managed by this VideoFrame and invalidates it.
*/
void VideoFrame::releaseFrame()
{ {
QMutexLocker locker(&biglock); frameLock.lockForWrite();
AVFrame* sourceFrame; deleteFrameBuffer();
if (frameOther)
{ frameLock.unlock();
sourceFrame = frameOther; }
}
else if (frameYUV420) /**
* @brief Retrieves an AVFrame derived from the source based on the given parameters.
*
* If a given frame does not exist, this function will perform appropriate conversions to
* return a frame that fulfills the given parameters.
*
* @param frameSize the dimensions of the frame to get. Defaults to source frame size if frameSize
* is invalid.
* @param pixelFormat the desired pixel format of the frame.
* @param requireAligned true if the returned frame must be frame aligned, false if not.
* @return a pointer to a AVFrame with the given parameters or nullptr if the VideoFrame is no
* longer valid.
*/
const AVFrame* VideoFrame::getAVFrame(QSize frameSize, const int pixelFormat, const bool requireAligned)
{
if(!frameSize.isValid())
{ {
sourceFrame = frameYUV420; frameSize = sourceDimensions.size();
} }
else
// Since we are retrieving the AVFrame* directly, we merely need to pass the arguement through
const std::function<AVFrame*(AVFrame* const)> converter = [](AVFrame* const frame)
{ {
qWarning() << "None of the frames are valid! Did someone release us?"; return frame;
return false; };
}
//std::cout << "converting to RGB24" << std::endl; // We need an explicit null pointer holding object to pass to toGenericObject()
AVFrame* nullPointer = nullptr;
if (size.isEmpty()) // Returns std::nullptr case of invalid generation
return toGenericObject(frameSize, pixelFormat, requireAligned, converter, nullPointer);
}
/**
* @brief Converts this VideoFrame to a QImage that shares this VideoFrame's buffer.
*
* The VideoFrame will be scaled into the RGB24 pixel format along with the given
* dimension.
*
* @param frameSize the given frame size of QImage to generate. Defaults to source frame size if
* frameSize is invalid.
* @return a QImage that represents this VideoFrame, sharing it's buffers or a null image if
* this VideoFrame is no longer valid.
*/
QImage VideoFrame::toQImage(QSize frameSize)
{
if(!frameSize.isValid())
{ {
size.setWidth(sourceFrame->width); frameSize = sourceDimensions.size();
size.setHeight(sourceFrame->height);
} }
if (frameRGB24) // Converter function (constructs QImage out of AVFrame*)
const std::function<QImage(AVFrame* const)> converter = [&](AVFrame* const frame)
{ {
if (frameRGB24->width == size.width() && frameRGB24->height == size.height()) return QImage {*(frame->data), frameSize.width(), frameSize.height(), *(frame->linesize), QImage::Format_RGB888};
return true; };
av_free(frameRGB24->opaque); // Returns an empty constructed QImage in case of invalid generation
av_frame_unref(frameRGB24); return toGenericObject(frameSize, AV_PIX_FMT_RGB24, false, converter, QImage {});
av_frame_free(&frameRGB24); }
}
frameRGB24=av_frame_alloc(); /**
if (!frameRGB24) * @brief Converts this VideoFrame to a ToxAVFrame that shares this VideoFrame's buffer.
*
* The given ToxAVFrame will be frame aligned under a pixel format of planar YUV with a chroma
* subsampling format of 4:2:0 (i.e. AV_PIX_FMT_YUV420P).
*
* @param frameSize the given frame size of ToxAVFrame to generate. Defaults to source frame size
* if frameSize is invalid.
* @return a ToxAVFrame structure that represents this VideoFrame, sharing it's buffers or an
* empty structure if this VideoFrame is no longer valid.
*/
ToxYUVFrame VideoFrame::toToxYUVFrame(QSize frameSize)
{
if(!frameSize.isValid())
{ {
qCritical() << "av_frame_alloc failed"; frameSize = sourceDimensions.size();
return false;
} }
int imgBufferSize = av_image_get_buffer_size(AV_PIX_FMT_RGB24, size.width(), size.height(), 1); // Converter function (constructs ToxAVFrame out of AVFrame*)
uint8_t* buf = (uint8_t*)av_malloc(imgBufferSize); const std::function<ToxYUVFrame(AVFrame* const)> converter = [&](AVFrame* const frame)
if (!buf)
{ {
qCritical() << "av_malloc failed"; ToxYUVFrame ret
av_frame_free(&frameRGB24); {
return false; static_cast<std::uint16_t>(frameSize.width()),
} static_cast<std::uint16_t>(frameSize.height()),
frameRGB24->opaque = buf; frame->data[0], frame->data[1], frame->data[2]
};
uint8_t** data = frameRGB24->data; return ret;
int* linesize = frameRGB24->linesize; };
av_image_fill_arrays(data, linesize, buf, AV_PIX_FMT_RGB24, size.width(), size.height(), 1);
frameRGB24->width = size.width();
frameRGB24->height = size.height();
// Bilinear is better for shrinking, bicubic better for upscaling return toGenericObject(frameSize, AV_PIX_FMT_YUV420P, true, converter, ToxYUVFrame {0, 0, nullptr, nullptr, nullptr});
int resizeAlgo = size.width()<=width ? SWS_BILINEAR : SWS_BICUBIC; }
SwsContext *swsCtx = sws_getContext(width, height, (AVPixelFormat)pixFmt, /**
size.width(), size.height(), AV_PIX_FMT_RGB24, * @brief Returns the ID for the given frame.
resizeAlgo, nullptr, nullptr, nullptr); *
sws_scale(swsCtx, (uint8_t const * const *)sourceFrame->data, * Frame IDs are globally unique (with respect to the running instance).
sourceFrame->linesize, 0, height, *
frameRGB24->data, frameRGB24->linesize); * @return an integer representing the ID of this frame.
sws_freeContext(swsCtx); */
VideoFrame::IDType VideoFrame::getFrameID() const
{
return frameID;
}
/**
* @brief Returns the ID for the VideoSource which created this frame.
*
* @return an integer representing the ID of the VideoSource which created this frame.
*/
VideoFrame::IDType VideoFrame::getSourceID() const
{
return sourceID;
}
/**
* @brief Retrieves a copy of the source VideoFrame's dimensions.
*
* @return QRect copy representing the source VideoFrame's dimensions.
*/
QRect VideoFrame::getSourceDimensions() const
{
return sourceDimensions;
}
/**
* @brief Retrieves a copy of the source VideoFormat's pixel format.
*
* @return integer copy representing the source VideoFrame's pixel format.
*/
int VideoFrame::getSourcePixelFormat() const
{
return sourcePixelFormat;
}
/**
* @brief Constructs a new FrameBufferKey with the given attributes.
*
* @param width the width of the frame.
* @param height the height of the frame.
* @param pixFmt the pixel format of the frame.
* @param lineAligned whether the linesize matches the width of the image.
*/
VideoFrame::FrameBufferKey::FrameBufferKey(const int pixFmt, const int width, const int height, const bool lineAligned)
: frameWidth(width),
frameHeight(height),
pixelFormat(pixFmt),
linesizeAligned(lineAligned){}
/**
* @brief Comparison operator for FrameBufferKey.
*
* @param other instance to compare against.
* @return true if instances are equivilent, false otherwise.
*/
bool VideoFrame::FrameBufferKey::operator==(const FrameBufferKey& other) const
{
return pixelFormat == other.pixelFormat &&
frameWidth == other.frameWidth &&
frameHeight == other.frameHeight &&
linesizeAligned == other.linesizeAligned;
}
/**
* @brief Not equal to operator for FrameBufferKey.
*
* @param other instance to compare against
* @return true if instances are not equivilent, false otherwise.
*/
bool VideoFrame::FrameBufferKey::operator!=(const FrameBufferKey& other) const
{
return !operator==(other);
}
/**
* @brief Hash function for FrameBufferKey.
*
* This function computes a hash value for use with std::unordered_map.
*
* @param key the given instance to compute hash value of.
* @return the hash of the given instance.
*/
size_t VideoFrame::FrameBufferKey::hash(const FrameBufferKey& key)
{
std::hash<int> intHasher;
std::hash<bool> boolHasher;
// Use java-style hash function to combine fields
// See: https://en.wikipedia.org/wiki/Java_hashCode%28%29#hashCode.28.29_in_general
size_t ret = 47;
ret = 37 * ret + intHasher(key.frameWidth);
ret = 37 * ret + intHasher(key.frameHeight);
ret = 37 * ret + intHasher(key.pixelFormat);
ret = 37 * ret + boolHasher(key.linesizeAligned);
return true; return ret;
} }
bool VideoFrame::convertToYUV420() /**
* @brief Generates a key object based on given parameters.
*
* @param frameSize the given size of the frame.
* @param pixFmt the pixel format of the frame.
* @param linesize the maximum linesize of the frame, may be larger than the width.
* @return a FrameBufferKey object representing the key for the frameBuffer map.
*/
VideoFrame::FrameBufferKey VideoFrame::getFrameKey(const QSize& frameSize, const int pixFmt, const int linesize)
{ {
QMutexLocker locker(&biglock); return getFrameKey(frameSize, pixFmt, frameSize.width() == linesize);
}
if (frameYUV420) /**
return true; * @brief Generates a key object based on given parameters.
*
* @param frameSize the given size of the frame.
* @param pixFmt the pixel format of the frame.
* @param frameAligned true if the frame is aligned, false otherwise.
* @return a FrameBufferKey object representing the key for the frameBuffer map.
*/
VideoFrame::FrameBufferKey VideoFrame::getFrameKey(const QSize& frameSize, const int pixFmt, const bool frameAligned)
{
return {frameSize.width(), frameSize.height(), pixFmt, frameAligned};
}
AVFrame* sourceFrame; /**
if (frameOther) * @brief Retrieves an AVFrame derived from the source based on the given parameters without
* obtaining a lock.
*
* This function is not thread-safe and must be called from a thread-safe context.
*
* Note: this function differs from getAVFrame() in that it returns a nullptr if no frame was
* found.
*
* @param dimensions the dimensions of the frame, must be valid.
* @param pixelFormat the desired pixel format of the frame.
* @param requireAligned true if the frame must be frame aligned, false otherwise.
* @return a pointer to a AVFrame with the given parameters or nullptr if no such frame was
* found.
*/
AVFrame* VideoFrame::retrieveAVFrame(const QSize& dimensions, const int pixelFormat, const bool requireAligned)
{
if(!requireAligned)
{ {
sourceFrame = frameOther; /*
* We attempt to obtain a unaligned frame first because an unaligned linesize corresponds
* to a data aligned frame.
*/
FrameBufferKey frameKey = getFrameKey(dimensions, pixelFormat, false);
if(frameBuffer.count(frameKey) > 0)
{
return frameBuffer[frameKey];
}
} }
else if (frameRGB24)
FrameBufferKey frameKey = getFrameKey(dimensions, pixelFormat, true);
if(frameBuffer.count(frameKey) > 0)
{ {
sourceFrame = frameRGB24; return frameBuffer[frameKey];
} }
else else
{ {
qCritical() << "None of the frames are valid! Did someone release us?"; return nullptr;
return false; }
}
/**
* @brief Generates an AVFrame based on the given specifications.
*
* This function is not thread-safe and must be called from a thread-safe context.
*
* @param dimensions the required dimensions for the frame, must be valid.
* @param pixelFormat the required pixel format for the frame.
* @param requireAligned true if the generated frame needs to be frame aligned, false otherwise.
* @return an AVFrame with the given specifications.
*/
AVFrame* VideoFrame::generateAVFrame(const QSize& dimensions, const int pixelFormat, const bool requireAligned)
{
AVFrame* ret = av_frame_alloc();
if(!ret){
return nullptr;
} }
//std::cout << "converting to YUV420" << std::endl;
frameYUV420=av_frame_alloc(); // Populate AVFrame fields
if (!frameYUV420) ret->width = dimensions.width();
ret->height = dimensions.height();
ret->format = pixelFormat;
/*
* We generate a frame under data alignment only if the dimensions allow us to be frame aligned
* or if the caller doesn't require frame alignment
*/
int bufSize;
if(!requireAligned || (dimensions.width() % 8 == 0 && dimensions.height() % 8 == 0))
{
bufSize = av_image_alloc(ret->data, ret->linesize,
dimensions.width(), dimensions.height(),
static_cast<AVPixelFormat>(pixelFormat), dataAlignment);
}
else
{ {
qCritical() << "av_frame_alloc failed"; bufSize = av_image_alloc(ret->data, ret->linesize,
return false; dimensions.width(), dimensions.height(),
static_cast<AVPixelFormat>(pixelFormat), 1);
} }
int imgBufferSize = av_image_get_buffer_size(AV_PIX_FMT_RGB24, width, height, 1); if(bufSize < 0)
uint8_t* buf = (uint8_t*)av_malloc(imgBufferSize);
if (!buf)
{ {
qCritical() << "av_malloc failed"; av_frame_free(&ret);
av_frame_free(&frameYUV420); return nullptr;
return false; }
// Bilinear is better for shrinking, bicubic better for upscaling
int resizeAlgo = sourceDimensions.width() > dimensions.width() ? SWS_BILINEAR : SWS_BICUBIC;
SwsContext* swsCtx = sws_getContext(sourceDimensions.width(), sourceDimensions.height(),
static_cast<AVPixelFormat>(sourcePixelFormat),
dimensions.width(), dimensions.height(),
static_cast<AVPixelFormat>(pixelFormat),
resizeAlgo, nullptr, nullptr, nullptr);
if(!swsCtx){
av_freep(&ret->data[0]);
av_frame_unref(ret);
av_frame_free(&ret);
return nullptr;
} }
frameYUV420->opaque = buf;
AVFrame* source = frameBuffer[sourceFrameKey];
uint8_t** data = frameYUV420->data;
int* linesize = frameYUV420->linesize; sws_scale(swsCtx, source->data, source->linesize, 0, sourceDimensions.height(), ret->data, ret->linesize);
av_image_fill_arrays(data, linesize, buf, AV_PIX_FMT_YUV420P, width, height, 1);
SwsContext *swsCtx = sws_getContext(width, height, (AVPixelFormat)pixFmt,
width, height, AV_PIX_FMT_YUV420P,
SWS_BILINEAR, nullptr, nullptr, nullptr);
sws_scale(swsCtx, (uint8_t const * const *)sourceFrame->data,
sourceFrame->linesize, 0, height,
frameYUV420->data, frameYUV420->linesize);
sws_freeContext(swsCtx); sws_freeContext(swsCtx);
return true; return ret;
} }
/** /**
@brief Frees all frame memory. * @brief Stores a given AVFrame within the frameBuffer map.
*
Frees all internal buffers and frame data, removes the freelistCallback * As protection against duplicate frames, the storage mechanism will only allow one frame of a
This makes all converted objects that shares our internal buffers invalid. * given type to exist in the frame buffer. Should the given frame type already exist in the frame
*/ * buffer, the given frame will be freed and have it's buffers invalidated. In order to ensure
void VideoFrame::releaseFrame() * correct operation, always replace the frame pointer with the one returned by this function.
*
* As an example:
* @code{.cpp}
* AVFrame* frame = // create AVFrame...
*
* frame = storeAVFrame(frame, dimensions, pixelFormat);
* @endcode
*
* This function is not thread-safe and must be called from a thread-safe context.
*
* @param frame the given frame to store.
* @param dimensions the dimensions of the frame, must be valid.
* @param pixelFormat the pixel format of the frame.
* @return The given AVFrame* or a pre-existing AVFrame* that already exists in the frameBuffer.
*/
AVFrame* VideoFrame::storeAVFrame(AVFrame* frame, const QSize& dimensions, const int pixelFormat)
{ {
QMutexLocker locker(&biglock); FrameBufferKey frameKey = getFrameKey(dimensions, pixelFormat, frame->linesize[0]);
freelistCallback = nullptr;
releaseFrameLockless(); // We check the prescence of the frame in case of double-computation
if(frameBuffer.count(frameKey) > 0)
{
AVFrame* old_ret = frameBuffer[frameKey];
// Free new frame
av_freep(&frame->data[0]);
av_frame_unref(frame);
av_frame_free(&frame);
return old_ret;
}
else
{
frameBuffer[frameKey] = frame;
return frame;
}
} }
void VideoFrame::releaseFrameLockless() /**
* @brief Releases all frames within the frame buffer.
*
* This function is not thread-safe and must be called from a thread-safe context.
*/
void VideoFrame::deleteFrameBuffer()
{ {
if (frameOther) // An empty framebuffer represents a frame that's already been freed
if(frameBuffer.empty()){
return;
}
for(const auto& frameIterator : frameBuffer)
{ {
av_free(frameOther->opaque); AVFrame* frame = frameIterator.second;
av_frame_unref(frameOther);
av_frame_free(&frameOther); // Treat source frame and derived frames separately
if(sourceFrameKey == frameIterator.first)
{
if(freeSourceFrame)
{
av_freep(&frame->data[0]);
}
av_frame_unref(frame);
av_frame_free(&frame);
}
else
{
av_freep(&frame->data[0]);
av_frame_unref(frame);
av_frame_free(&frame);
}
} }
if (frameYUV420)
frameBuffer.clear();
}
/**
* @brief Converts this VideoFrame to a generic type T based on the given parameters and
* supplied converter functions.
*
* This function is used internally to create various toXObject functions that all follow the
* same generation pattern (where XObject is some existing type like QImage).
*
* In order to create such a type, a object constructor function is required that takes the
* generated AVFrame object and creates type T out of it. This function additionally requires
* a null object of type T that represents an invalid/null object for when the generation
* process fails (e.g. when the VideoFrame is no longer valid).
*
* @param dimensions the dimensions of the frame, must be valid.
* @param pixelFormat the pixel format of the frame.
* @param requireAligned true if the generated frame needs to be frame aligned, false otherwise.
* @param objectConstructor a std::function that takes the generated AVFrame and converts it
* to an object of type T.
* @param nullObject an object of type T that represents the null/invalid object to be used
* when the generation process fails.
*/
template <typename T>
T VideoFrame::toGenericObject(const QSize& dimensions, const int pixelFormat, const bool requireAligned,
const std::function<T(AVFrame* const)> objectConstructor, const T& nullObject)
{
frameLock.lockForRead();
// We return nullObject if the VideoFrame is no longer valid
if(frameBuffer.size() == 0)
{ {
av_free(frameYUV420->opaque); frameLock.unlock();
av_frame_unref(frameYUV420); return nullObject;
av_frame_free(&frameYUV420);
} }
if (frameRGB24)
AVFrame* frame = retrieveAVFrame(dimensions, static_cast<int>(pixelFormat), requireAligned);
if(frame)
{ {
av_free(frameRGB24->opaque); T ret = objectConstructor(frame);
av_frame_unref(frameRGB24);
av_frame_free(&frameRGB24); frameLock.unlock();
return ret;
} }
// VideoFrame does not contain an AVFrame to spec, generate one here
frame = generateAVFrame(dimensions, static_cast<int>(pixelFormat), requireAligned);
/*
* We need to "upgrade" the lock to a write lock so we can update our frameBuffer map.
*
* It doesn't matter if another thread obtains the write lock before we finish since it is
* likely writing to somewhere else. Worst-case scenario, we merely perform the generation
* process twice, and discard the old result.
*/
frameLock.unlock();
frameLock.lockForWrite();
frame = storeAVFrame(frame, dimensions, static_cast<int>(pixelFormat));
T ret = objectConstructor(frame);
frameLock.unlock();
return ret;
} }
// Explicitly specialize VideoFrame::toGenericObject() function
template QImage VideoFrame::toGenericObject<QImage>(const QSize& dimensions, const int pixelFormat, const bool requireAligned,
const std::function<QImage(AVFrame* const)> objectConstructor, const QImage& nullObject);
template ToxYUVFrame VideoFrame::toGenericObject<ToxYUVFrame>(const QSize& dimensions, const int pixelFormat, const bool requireAligned,
const std::function<ToxYUVFrame(AVFrame* const)> objectConstructor, const ToxYUVFrame& nullObject);
/** /**
@brief Return the size of the original frame * @brief Returns whether the given ToxYUVFrame represents a valid frame or not.
@return The size of the original frame *
*/ * Valid frames are frames in which both width and height are greater than zero.
QSize VideoFrame::getSize() *
* @return true if the frame is valid, false otherwise.
*/
bool ToxYUVFrame::isValid() const
{
return width > 0 && height > 0;
}
/**
* @brief Checks if the given ToxYUVFrame is valid or not, delegates to isValid().
*/
ToxYUVFrame::operator bool() const
{ {
return {width, height}; return isValid();
} }

138
src/video/videoframe.h

@ -20,44 +20,140 @@
#ifndef VIDEOFRAME_H #ifndef VIDEOFRAME_H
#define VIDEOFRAME_H #define VIDEOFRAME_H
#include <QMutex>
#include <QImage> #include <QImage>
#include <QMutex>
#include <QReadWriteLock>
#include <QRect>
#include <QSize>
extern "C"{
#include <libavcodec/avcodec.h>
}
#include <atomic>
#include <cstdint>
#include <functional> #include <functional>
#include <memory>
#include <unordered_map>
struct AVFrame; struct ToxYUVFrame
struct AVCodecContext; {
struct vpx_image; public:
bool isValid() const;
explicit operator bool() const;
const std::uint16_t width;
const std::uint16_t height;
const uint8_t* y;
const uint8_t* u;
const uint8_t* v;
};
class VideoFrame class VideoFrame
{ {
public: public:
explicit VideoFrame(AVFrame* frame); // Declare type aliases
VideoFrame(AVFrame* frame, std::function<void()> freelistCallback); using IDType = std::uint_fast64_t;
VideoFrame(AVFrame* frame, int w, int h, int fmt, std::function<void()> freelistCallback); using AtomicIDType = std::atomic_uint_fast64_t;
public:
VideoFrame(IDType sourceID, AVFrame* sourceFrame, QRect dimensions, int pixFmt, bool freeSourceFrame = false);
VideoFrame(IDType sourceID, AVFrame* sourceFrame, bool freeSourceFrame = false);
~VideoFrame(); ~VideoFrame();
QSize getSize(); // Copy/Move operations are disabled for the VideoFrame, encapsulate with a std::shared_ptr to manage.
VideoFrame(const VideoFrame& other) = delete;
VideoFrame(VideoFrame&& other) = delete;
const VideoFrame& operator=(const VideoFrame& other) = delete;
const VideoFrame& operator=(VideoFrame&& other) = delete;
bool isValid();
std::shared_ptr<VideoFrame> trackFrame();
static void untrackFrames(const IDType& sourceID, bool releaseFrames = false);
void releaseFrame(); void releaseFrame();
QImage toQImage(QSize size = QSize()); const AVFrame* getAVFrame(QSize frameSize, const int pixelFormat, const bool requireAligned);
vpx_image* toVpxImage(); QImage toQImage(QSize frameSize = {});
ToxYUVFrame toToxYUVFrame(QSize frameSize = {});
IDType getFrameID() const;
IDType getSourceID() const;
QRect getSourceDimensions() const;
int getSourcePixelFormat() const;
protected: static constexpr int dataAlignment = 32;
bool convertToRGB24(QSize size = QSize());
bool convertToYUV420();
void releaseFrameLockless();
private: private:
VideoFrame(const VideoFrame& other)=delete; class FrameBufferKey{
VideoFrame& operator=(const VideoFrame& other)=delete; public:
FrameBufferKey(const int width, const int height, const int pixFmt, const bool lineAligned);
// Explictly state default constructor/destructor
FrameBufferKey(const FrameBufferKey&) = default;
FrameBufferKey(FrameBufferKey&&) = default;
~FrameBufferKey() = default;
// Assignment operators are disabled for the FrameBufferKey
const FrameBufferKey& operator=(const FrameBufferKey&) = delete;
const FrameBufferKey& operator=(FrameBufferKey&&) = delete;
bool operator==(const FrameBufferKey& other) const;
bool operator!=(const FrameBufferKey& other) const;
static size_t hash(const FrameBufferKey& key);
public:
const int frameWidth;
const int frameHeight;
const int pixelFormat;
const bool linesizeAligned;
};
private: private:
std::function<void()> freelistCallback; static FrameBufferKey getFrameKey(const QSize& frameSize, const int pixFmt, const int linesize);
QMutex biglock; static FrameBufferKey getFrameKey(const QSize& frameSize, const int pixFmt, const bool frameAligned);
AVFrame* frameOther, *frameYUV420, *frameRGB24;
int width, height; AVFrame* retrieveAVFrame(const QSize& dimensions, const int pixelFormat, const bool requireAligned);
int pixFmt; AVFrame* generateAVFrame(const QSize& dimensions, const int pixelFormat, const bool requireAligned);
AVFrame* storeAVFrame(AVFrame* frame, const QSize& dimensions, const int pixelFormat);
void deleteFrameBuffer();
template <typename T>
T toGenericObject(const QSize& dimensions, const int pixelFormat, const bool requireAligned,
const std::function<T(AVFrame* const)> objectConstructor, const T& nullObject);
private:
// ID
const IDType frameID;
const IDType sourceID;
// Main framebuffer store
std::unordered_map<FrameBufferKey, AVFrame*, std::function<decltype(FrameBufferKey::hash)>> frameBuffer {3, FrameBufferKey::hash};
// Source frame
const QRect sourceDimensions;
int sourcePixelFormat;
const FrameBufferKey sourceFrameKey;
const bool freeSourceFrame;
// Reference store
static AtomicIDType frameIDs;
static std::unordered_map<IDType, QMutex> mutexMap;
static std::unordered_map<IDType, std::unordered_map<IDType, std::weak_ptr<VideoFrame>>> refsMap;
// Concurrency
QReadWriteLock frameLock {};
static QReadWriteLock refsLock;
}; };
#endif // VIDEOFRAME_H #endif // VIDEOFRAME_H

31
src/video/videosource.cpp

@ -0,0 +1,31 @@
/*
Copyright © 2016 by The qTox Project
This file is part of qTox, a Qt-based graphical interface for Tox.
qTox is libre software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
qTox is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with qTox. If not, see <http://www.gnu.org/licenses/>.
*/
#include "videosource.h"
/**
* @class VideoSource
* @brief An abstract source of video frames
*
* When it has at least one subscriber the source will emit new video frames.
* Subscribing is recursive, multiple users can subscribe to the same VideoSource.
*/
// Initialize sourceIDs to 0
VideoSource::AtomicIDType VideoSource::sourceIDs {0};

39
src/video/videosource.h

@ -21,42 +21,51 @@
#define VIDEOSOURCE_H #define VIDEOSOURCE_H
#include <QObject> #include <QObject>
#include <atomic>
#include <memory> #include <memory>
class VideoFrame; class VideoFrame;
/**
@brief An abstract source of video frames
When it has at least one subscriber the source will emit new video frames
Subscribing is recursive, multiple users can subscribe to the same VideoSource
*/
class VideoSource : public QObject class VideoSource : public QObject
{ {
Q_OBJECT Q_OBJECT
public: public:
// Declare type aliases
using IDType = std::uint_fast64_t;
using AtomicIDType = std::atomic_uint_fast64_t;
public:
VideoSource() : id(sourceIDs++){}
virtual ~VideoSource() = default; virtual ~VideoSource() = default;
/** /**
If subscribe sucessfully opens the source, it will start emitting frameAvailable signals. * @brief If subscribe sucessfully opens the source, it will start emitting frameAvailable signals.
*/ */
virtual bool subscribe() = 0; virtual bool subscribe() = 0;
/** /**
Stop emitting frameAvailable signals, and free associated resources if necessary. * @brief Stop emitting frameAvailable signals, and free associated resources if necessary.
*/ */
virtual void unsubscribe() = 0; virtual void unsubscribe() = 0;
/// ID of this VideoSource
const IDType id;
signals: signals:
/** /**
Emitted when new frame available to use. * @brief Emitted when new frame available to use.
@param frame New frame. * @param frame New frame.
*/ */
void frameAvailable(std::shared_ptr<VideoFrame> frame); void frameAvailable(std::shared_ptr<VideoFrame> frame);
/** /**
Emitted when the source is stopped for an indefinite amount of time, * @brief Emitted when the source is stopped for an indefinite amount of time, but might restart
but might restart sending frames again later * sending frames again later
*/ */
void sourceStopped(); void sourceStopped();
private:
/// Used to manage a global ID for all VideoSources
static AtomicIDType sourceIDs;
}; };
#endif // VIDEOSOURCE_H #endif // VIDEOSOURCE_H

2
src/video/videosurface.cpp

@ -146,7 +146,7 @@ void VideoSurface::onNewFrameAvailable(std::shared_ptr<VideoFrame> newFrame)
lock(); lock();
lastFrame = newFrame; lastFrame = newFrame;
newSize = lastFrame->getSize(); newSize = lastFrame->getSourceDimensions().size();
unlock(); unlock();
float newRatio = getSizeRatio(newSize); float newRatio = getSizeRatio(newSize);

Loading…
Cancel
Save