Skip to content

Commit

Permalink
enc: support hwa using hwcontext api
Browse files Browse the repository at this point in the history
  • Loading branch information
wang-bin committed Aug 28, 2016
1 parent 30ecf89 commit d888ad7
Showing 1 changed file with 135 additions and 6 deletions.
141 changes: 135 additions & 6 deletions src/codec/video/VideoEncoderFFmpeg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,11 @@
#include "QtAV/version.h"
#include "utils/Logger.h"

#if AV_MODULE_CHECK(LIBAVUTIL, 55, 13, 0, 27, 100)
#define HAVE_AVHWCTX
#include <libavutil/hwcontext.h>
#endif

/*!
* options (properties) are from libavcodec/options_table.h
* enum name here must convert to lower case to fit the names in avcodec. done in AVEncoder.setOptions()
Expand All @@ -35,14 +40,39 @@

namespace QtAV {

#ifdef HAVE_AVHWCTX
struct {
AVHWDeviceType type;
const char* name;
} hwdevs[] = {
{AV_HWDEVICE_TYPE_VDPAU, "vdpau"},
{AV_HWDEVICE_TYPE_CUDA, "cuda"},
{AV_HWDEVICE_TYPE_VAAPI, "vaapi"},
{AV_HWDEVICE_TYPE_DXVA2, "dxva2"},
};

AVHWDeviceType fromHWAName(const char* name)
{
for (size_t i = 0; i < sizeof(hwdevs)/sizeof(hwdevs[0]); ++i) {
if (qstrcmp(name, hwdevs[i].name) == 0)
return hwdevs[i].type;
}
return AVHWDeviceType(-1);
}
#endif // HAVE_AVHWCTX

class VideoEncoderFFmpegPrivate;
class VideoEncoderFFmpeg Q_DECL_FINAL: public VideoEncoder
{
DPTR_DECLARE_PRIVATE(VideoEncoderFFmpeg)
Q_PROPERTY(QString hwdevice READ hwDevice WRITE setHWDevice)
public:
VideoEncoderFFmpeg();
VideoEncoderId id() const Q_DECL_OVERRIDE;
bool encode(const VideoFrame &frame = VideoFrame()) Q_DECL_OVERRIDE;

void setHWDevice(const QString& name);
QString hwDevice() const;
};

static const VideoEncoderId VideoEncoderId_FFmpeg = mkid::id32base36_6<'F', 'F', 'm', 'p', 'e', 'g'>::value;
Expand All @@ -64,6 +94,14 @@ class VideoEncoderFFmpegPrivate Q_DECL_FINAL: public VideoEncoderPrivate

qint64 nb_encoded;
QByteArray buffer;
#ifdef HAVE_AVHWCTX
QString hwdev;
AVBufferRef *hw_device_ctx;

AVBufferRef *hwframes_ref;
AVHWFramesContext *hwframes;
QVector<AVPixelFormat> sw_fmts;
#endif
};

bool VideoEncoderFFmpegPrivate::open()
Expand Down Expand Up @@ -95,17 +133,57 @@ bool VideoEncoderFFmpegPrivate::open()
avctx->height = height;
// reset format_used to user defined format. important to update default format if format is invalid
format_used = format.pixelFormat();
AVPixelFormat hwfmt = AVPixelFormat(-1);
if (format.pixelFormat() == VideoFormat::Format_Invalid) {
if (codec->pix_fmts) {
qDebug("use first supported pixel format: %d", codec->pix_fmts[0]);
format_used = VideoFormat::pixelFormatFromFFmpeg((int)codec->pix_fmts[0]);
if (av_pix_fmt_desc_get(codec->pix_fmts[0])->flags & AV_PIX_FMT_FLAG_HWACCEL) {
hwfmt = codec->pix_fmts[0];
}
} else {
qWarning("pixel format and supported pixel format are not set. use yuv420p");
format_used = VideoFormat::Format_YUV420P;
}
}
//avctx->sample_aspect_ratio =
avctx->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(format_used);
if (hwfmt != AVPixelFormat(-1)) {
avctx->pix_fmt = hwfmt;
hw_device_ctx = NULL;
AV_ENSURE(av_hwdevice_ctx_create(&hw_device_ctx, fromHWAName(codec_name.section(QChar('_'), -1).toUtf8().constData()), hwdev.toLatin1().constData(), NULL, 0), false);
avctx->hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx);
if (!avctx->hw_frames_ctx) {
qWarning("Failed to create hw frame context for '%s'", codec_name.toLatin1().constData());
return false;
}
AVHWFramesContext* hwfs = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
// encoder surface parameters
hwfs->format = hwfmt;
//hwframes->sw_format = ;// default is use the first constraints.valid_sw_formats

// hw upload parameters. encoder's hwframes is just for parameter checking, will never be intialized, so we allocate an individual one.
hwframes_ref = av_hwframe_ctx_alloc(hw_device_ctx);
if (!hwframes_ref) {
#ifdef HAVE_AVHWCTX
qWarning("Failed to create hw frame context for uploading '%s'", codec_name.toLatin1().constData());
} else {
hwframes = (AVHWFramesContext*)hwframes_ref->data;
hwframes->format = hwfmt;
const void *hwcfg = NULL;
AVHWFramesConstraints *constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx, hwcfg);
const AVPixelFormat* in_fmts = constraints->valid_sw_formats;
if (in_fmts) {
while (*in_fmts != AVPixelFormat(-1)) {
sw_fmts.append(*in_fmts);
++in_fmts;
}
}
av_hwframe_constraints_free(&constraints);
}
#endif //HAVE_AVHWCTX
} else {
avctx->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(format_used);
}
if (frame_rate > 0)
avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2);
else
Expand Down Expand Up @@ -151,13 +229,22 @@ VideoEncoderId VideoEncoderFFmpeg::id() const
return VideoEncoderId_FFmpeg;
}

struct ScopedAVFrameDeleter
{
static inline void cleanup(void *pointer) {
av_frame_free((AVFrame**)&pointer);
}
};

bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
DPTR_D(VideoEncoderFFmpeg);
AVFrame *f = NULL;
QScopedPointer<AVFrame, ScopedAVFrameDeleter> f;
// hwupload
AVPixelFormat pixfmt = AVPixelFormat(frame.pixelFormatFFmpeg());
if (frame.isValid()) {
f = av_frame_alloc();
f->format = frame.format().pixelFormatFFmpeg();
f.reset(av_frame_alloc());
f->format = pixfmt;
f->width = frame.width();
f->height = frame.height();
// f->quality = d.avctx->global_quality;
Expand All @@ -171,6 +258,7 @@ bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
default:
break;
}

// pts is set in muxer
const int nb_planes = frame.planeCount();
for (int i = 0; i < nb_planes; ++i) {
Expand All @@ -183,14 +271,55 @@ bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
if (d.avctx->height <= 0) {
d.avctx->height = frame.width();
}
#ifdef HAVE_AVHWCTX
if (d.avctx->hw_frames_ctx) {
// TODO: try to map to SourceSurface
// checl valid sw_formats
if (!d.hwframes_ref) {
qWarning("no hw frame context for uploading");
return false;
}
if (pixfmt != d.hwframes->sw_format) {
// reinit or got an unsupported format. assume parameters will not change, so it's the 1st init
// check constraints
bool init_frames_ctx = d.hwframes->sw_format == AVPixelFormat(-1);
if (d.sw_fmts.contains(pixfmt)) {
init_frames_ctx = true;
} else { // convert to supported sw format
pixfmt = d.sw_fmts[0];
f->format = pixfmt;
VideoFrame converted = frame.to(VideoFormat::pixelFormatFromFFmpeg(pixfmt));
for (int i = 0; i < converted.planeCount(); ++i) {
f->linesize[i] = converted.bytesPerLine(i);
f->data[i] = (uint8_t*)frame.constBits(i);
}
}
if (init_frames_ctx) {
d.hwframes->sw_format = pixfmt;
d.hwframes->width = frame.width();
d.hwframes->height = frame.height();
AV_ENSURE(av_hwframe_ctx_init(d.hwframes_ref), false);
}
}
// upload
QScopedPointer<AVFrame, ScopedAVFrameDeleter> hwf( av_frame_alloc());
AV_ENSURE(av_hwframe_get_buffer(d.hwframes_ref, hwf.data(), 0), false);
//hwf->format = d.hwframes->format; // not necessary
//hwf->width = f->width;
//hwf->height = f->height;
AV_ENSURE(av_hwframe_transfer_data(hwf.data(), f.data(), 0), false);
AV_ENSURE(av_frame_copy_props(hwf.data(), f.data()), false);
av_frame_unref(f.data());
av_frame_move_ref(f.data(), hwf.data());
}
#endif //HAVE_AVHWCTX
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = (uint8_t*)d.buffer.constData();
pkt.size = d.buffer.size();
int got_packet = 0;
int ret = avcodec_encode_video2(d.avctx, &pkt, f, &got_packet);
av_frame_free(&f);
int ret = avcodec_encode_video2(d.avctx, &pkt, f.data(), &got_packet);
if (ret < 0) {
qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
return false; //false
Expand Down

0 comments on commit d888ad7

Please sign in to comment.