Skip to content

Commit

Permalink
Bug 1179094: Use TimeUnit in PlatformDecoderModule. r=cpearce
Browse files Browse the repository at this point in the history
  • Loading branch information
Jean-Yves Avenard committed Jul 8, 2015
1 parent f41604e commit c7b6fa5
Show file tree
Hide file tree
Showing 17 changed files with 128 additions and 92 deletions.
8 changes: 8 additions & 0 deletions dom/media/TimeUnits.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,14 @@ class TimeUnit final {
return TimeUnit(INT64_MAX);
}

static TimeUnit Invalid() {
TimeUnit ret;
ret.mValue = CheckedInt64(INT64_MAX);
// Force an overflow to render the CheckedInt invalid.
ret.mValue += 1;
return ret;
}

int64_t ToMicroseconds() const {
return mValue.value();
}
Expand Down
4 changes: 4 additions & 0 deletions dom/media/VideoUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@ CheckedInt64 FramesToUsecs(int64_t aFrames, uint32_t aRate) {
return (CheckedInt64(aFrames) * USECS_PER_S) / aRate;
}

media::TimeUnit FramesToTimeUnit(int64_t aFrames, uint32_t aRate) {
return (media::TimeUnit::FromMicroseconds(aFrames) * USECS_PER_S) / aRate;
}

// Converts from microseconds to number of audio frames, given the specified
// audio rate.
CheckedInt64 UsecsToFrames(int64_t aUsecs, uint32_t aRate) {
Expand Down
7 changes: 4 additions & 3 deletions dom/media/VideoUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,10 +127,11 @@ media::TimeIntervals GetEstimatedBufferedTimeRanges(mozilla::MediaResource* aStr
int64_t aDurationUsecs);

// Converts from number of audio frames (aFrames) to microseconds, given
// the specified audio rate (aRate). Stores result in aOutUsecs. Returns true
// if the operation succeeded, or false if there was an integer overflow
// while calulating the conversion.
// the specified audio rate (aRate).
CheckedInt64 FramesToUsecs(int64_t aFrames, uint32_t aRate);
// Converts from number of audio frames (aFrames) TimeUnit, given
// the specified audio rate (aRate).
media::TimeUnit FramesToTimeUnit(int64_t aFrames, uint32_t aRate);

// Converts from microseconds (aUsecs) to number of audio frames, given the
// specified audio rate (aRate). Stores the result in aOutFrames. Returns
Expand Down
8 changes: 4 additions & 4 deletions dom/media/fmp4/MP4Reader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,7 @@ MP4Reader::ReadMetadata(MediaInfo* aInfo,
}

// Get the duration, and report it to the decoder if we have it.
Microseconds duration;
mp4_demuxer::Microseconds duration;
{
MonitorAutoLock lock(mDemuxerMonitor);
duration = mDemuxer->Duration();
Expand Down Expand Up @@ -561,7 +561,7 @@ MP4Reader::GetDecoderData(TrackType aTrack)
return mVideo;
}

Microseconds
mp4_demuxer::Microseconds
MP4Reader::GetNextKeyframeTime()
{
MonitorAutoLock mon(mDemuxerMonitor);
Expand Down Expand Up @@ -596,7 +596,7 @@ MP4Reader::ShouldSkip(bool aSkipToNextKeyframe, int64_t aTimeThreshold)
// if the time threshold (the current playback position) is after the next
// keyframe in the stream. This means we'll only skip frames that we have
// no hope of ever playing.
Microseconds nextKeyframe = -1;
mp4_demuxer::Microseconds nextKeyframe = -1;
if (!sDemuxSkipToNextKeyframe ||
(nextKeyframe = GetNextKeyframeTime()) == -1) {
return aSkipToNextKeyframe;
Expand Down Expand Up @@ -1090,7 +1090,7 @@ MP4Reader::GetBuffered()
nsresult rv = resource->GetCachedRanges(ranges);

if (NS_SUCCEEDED(rv)) {
nsTArray<Interval<Microseconds>> timeRanges;
nsTArray<Interval<mp4_demuxer::Microseconds>> timeRanges;
mDemuxer->ConvertByteRangesToTime(ranges, &timeRanges);
for (size_t i = 0; i < timeRanges.Length(); i++) {
buffered += media::TimeInterval(
Expand Down
2 changes: 1 addition & 1 deletion dom/media/fmp4/MP4Reader.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ class MP4Reader final : public MediaDecoderReader
bool IsSupportedVideoMimeType(const nsACString& aMimeType);
virtual bool IsWaitingOnCDMResource() override;

Microseconds GetNextKeyframeTime();
mp4_demuxer::Microseconds GetNextKeyframeTime();
bool ShouldSkip(bool aSkipToNextKeyframe, int64_t aTimeThreshold);

size_t SizeOfQueue(TrackType aTrack);
Expand Down
1 change: 0 additions & 1 deletion dom/media/platforms/PlatformDecoderModule.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ class MediaDataDecoder;
class MediaDataDecoderCallback;
class FlushableMediaTaskQueue;
class CDMProxy;
typedef int64_t Microseconds;

// The PlatformDecoderModule interface is used by the MP4Reader to abstract
// access to the H264 and Audio (AAC/MP3) decoders provided by various platforms.
Expand Down
27 changes: 15 additions & 12 deletions dom/media/platforms/agnostic/BlankDecoderModule.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include "ImageContainer.h"
#include "MediaInfo.h"
#include "MediaTaskQueue.h"
#include "TimeUnits.h"

namespace mozilla {

Expand Down Expand Up @@ -51,9 +52,10 @@ class BlankMediaDataDecoder : public MediaDataDecoder {
}
NS_IMETHOD Run() override
{
nsRefPtr<MediaData> data = mCreator->Create(mSample->mTime,
mSample->mDuration,
mSample->mOffset);
nsRefPtr<MediaData> data =
mCreator->Create(media::TimeUnit::FromMicroseconds(mSample->mTime),
media::TimeUnit::FromMicroseconds(mSample->mDuration),
mSample->mOffset);
mCallback->Output(data);
return NS_OK;
}
Expand Down Expand Up @@ -103,7 +105,7 @@ class BlankVideoDataCreator {
}

already_AddRefed<MediaData>
Create(Microseconds aDTS, Microseconds aDuration, int64_t aOffsetInStream)
Create(const media::TimeUnit& aDTS, const media::TimeUnit& aDuration, int64_t aOffsetInStream)
{
// Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
// with a U and V plane that are half the size of the Y plane, i.e 8 bit,
Expand Down Expand Up @@ -141,11 +143,11 @@ class BlankVideoDataCreator {
mImageContainer,
nullptr,
aOffsetInStream,
aDTS,
aDuration,
aDTS.ToMicroseconds(),
aDuration.ToMicroseconds(),
buffer,
true,
aDTS,
aDTS.ToMicroseconds(),
mPicture);
}
private:
Expand All @@ -164,13 +166,14 @@ class BlankAudioDataCreator {
{
}

MediaData* Create(Microseconds aDTS,
Microseconds aDuration,
MediaData* Create(const media::TimeUnit& aDTS,
const media::TimeUnit& aDuration,
int64_t aOffsetInStream)
{
// Convert duration to frames. We add 1 to duration to account for
// rounding errors, so we get a consistent tone.
CheckedInt64 frames = UsecsToFrames(aDuration+1, mSampleRate);
CheckedInt64 frames =
UsecsToFrames(aDuration.ToMicroseconds()+1, mSampleRate);
if (!frames.isValid() ||
!mChannelCount ||
!mSampleRate ||
Expand All @@ -189,8 +192,8 @@ class BlankAudioDataCreator {
mFrameSum++;
}
return new AudioData(aOffsetInStream,
aDTS,
aDuration,
aDTS.ToMicroseconds(),
aDuration.ToMicroseconds(),
uint32_t(frames.value()),
samples,
mChannelCount,
Expand Down
15 changes: 9 additions & 6 deletions dom/media/platforms/android/AndroidDecoderModule.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,8 @@ class VideoDataDecoder : public MediaCodecDataDecoder {
return eglImage;
}

virtual nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat, Microseconds aDuration) override {
virtual nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat,
const media::TimeUnit& aDuration) override {
if (!EnsureGLContext()) {
return NS_ERROR_FAILURE;
}
Expand Down Expand Up @@ -168,7 +169,7 @@ class VideoDataDecoder : public MediaCodecDataDecoder {
mImageContainer,
offset,
presentationTimeUs,
aDuration,
aDuration.ToMicroseconds(),
img,
isSync,
presentationTimeUs,
Expand Down Expand Up @@ -213,7 +214,9 @@ class AudioDataDecoder : public MediaCodecDataDecoder {
}
}

nsresult Output(BufferInfo::Param aInfo, void* aBuffer, MediaFormat::Param aFormat, Microseconds aDuration) {
nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
MediaFormat::Param aFormat,
const media::TimeUnit& aDuration) {
// The output on Android is always 16-bit signed

nsresult rv;
Expand All @@ -239,7 +242,7 @@ class AudioDataDecoder : public MediaCodecDataDecoder {
NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);

nsRefPtr<AudioData> data = new AudioData(offset, presentationTimeUs,
aDuration,
aDuration.ToMicroseconds(),
numFrames,
audio,
numChannels,
Expand Down Expand Up @@ -485,7 +488,7 @@ void MediaCodecDataDecoder::DecoderLoop()
sample->mTime, 0);
HANDLE_DECODER_ERROR();

mDurations.push(sample->mDuration);
mDurations.push(media::TimeUnit::FromMicroseconds(sample->mDuration));
sample = nullptr;
outputDone = false;
}
Expand Down Expand Up @@ -543,7 +546,7 @@ void MediaCodecDataDecoder::DecoderLoop()

MOZ_ASSERT(!mDurations.empty(), "Should have had a duration queued");

Microseconds duration = 0;
media::TimeUnit duration;
if (!mDurations.empty()) {
duration = mDurations.front();
mDurations.pop();
Expand Down
8 changes: 5 additions & 3 deletions dom/media/platforms/android/AndroidDecoderModule.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include "AndroidSurfaceTexture.h"

#include "MediaCodec.h"
#include "TimeUnits.h"
#include "mozilla/Monitor.h"

#include <queue>
Expand Down Expand Up @@ -81,12 +82,13 @@ class MediaCodecDataDecoder : public MediaDataDecoder {
bool mStopping;

SampleQueue mQueue;
std::queue<Microseconds> mDurations;
// Durations are stored in microseconds.
std::queue<media::TimeUnit> mDurations;

virtual nsresult InitDecoder(widget::sdk::Surface::Param aSurface);

virtual nsresult Output(widget::sdk::BufferInfo::Param aInfo, void* aBuffer, widget::sdk::MediaFormat::Param aFormat, Microseconds aDuration) { return NS_OK; }
virtual nsresult PostOutput(widget::sdk::BufferInfo::Param aInfo, widget::sdk::MediaFormat::Param aFormat, Microseconds aDuration) { return NS_OK; }
virtual nsresult Output(widget::sdk::BufferInfo::Param aInfo, void* aBuffer, widget::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration) { return NS_OK; }
virtual nsresult PostOutput(widget::sdk::BufferInfo::Param aInfo, widget::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration) { return NS_OK; }
virtual void Cleanup() {};

nsresult ResetInputBuffers();
Expand Down
8 changes: 4 additions & 4 deletions dom/media/platforms/apple/AppleATDecoder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -261,23 +261,23 @@ AppleATDecoder::DecodeSample(MediaRawData* aSample)

size_t numFrames = outputData.Length() / channels;
int rate = mOutputFormat.mSampleRate;
CheckedInt<Microseconds> duration = FramesToUsecs(numFrames, rate);
if (!duration.isValid()) {
media::TimeUnit duration = FramesToTimeUnit(numFrames, rate);
if (!duration.IsValid()) {
NS_WARNING("Invalid count of accumulated audio samples");
return NS_ERROR_FAILURE;
}

#ifdef LOG_SAMPLE_DECODE
LOG("pushed audio at time %lfs; duration %lfs\n",
(double)aSample->mTime / USECS_PER_S,
(double)duration.value() / USECS_PER_S);
duration.ToSeconds());
#endif

nsAutoArrayPtr<AudioDataValue> data(new AudioDataValue[outputData.Length()]);
PodCopy(data.get(), &outputData[0], outputData.Length());
nsRefPtr<AudioData> audio = new AudioData(aSample->mOffset,
aSample->mTime,
duration.value(),
duration.ToMicroseconds(),
numFrames,
data.forget(),
channels,
Expand Down
30 changes: 16 additions & 14 deletions dom/media/platforms/apple/AppleVDADecoder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -195,9 +195,9 @@ PlatformCallback(void* decompressionOutputRefCon,
AutoCFRelease<CFNumberRef> kfref =
(CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_KEYFRAME"));

Microseconds dts;
Microseconds pts;
Microseconds duration;
int64_t dts;
int64_t pts;
int64_t duration;
int64_t byte_offset;
char is_sync_point;

Expand All @@ -208,11 +208,12 @@ PlatformCallback(void* decompressionOutputRefCon,
CFNumberGetValue(kfref, kCFNumberSInt8Type, &is_sync_point);

nsAutoPtr<AppleVDADecoder::AppleFrameRef> frameRef(
new AppleVDADecoder::AppleFrameRef(dts,
pts,
duration,
byte_offset,
is_sync_point == 1));
new AppleVDADecoder::AppleFrameRef(
media::TimeUnit::FromMicroseconds(dts),
media::TimeUnit::FromMicroseconds(pts),
media::TimeUnit::FromMicroseconds(duration),
byte_offset,
is_sync_point == 1));

// Forward the data back to an object method which can access
// the correct MP4Reader callback.
Expand Down Expand Up @@ -252,9 +253,9 @@ AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,

LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
aFrameRef->byte_offset,
aFrameRef->decode_timestamp,
aFrameRef->composition_timestamp,
aFrameRef->duration,
aFrameRef->decode_timestamp.ToMicroseconds(),
aFrameRef->composition_timestamp.ToMicroseconds(),
aFrameRef->duration.ToMicroseconds(),
aFrameRef->is_sync_point ? " keyframe" : ""
);

Expand All @@ -277,10 +278,11 @@ AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
data = VideoData::CreateFromImage(info,
mImageContainer,
aFrameRef->byte_offset,
aFrameRef->composition_timestamp,
aFrameRef->duration, image.forget(),
aFrameRef->composition_timestamp.ToMicroseconds(),
aFrameRef->duration.ToMicroseconds(),
image.forget(),
aFrameRef->is_sync_point,
aFrameRef->decode_timestamp,
aFrameRef->decode_timestamp.ToMicroseconds(),
visible);

if (!data) {
Expand Down
19 changes: 10 additions & 9 deletions dom/media/platforms/apple/AppleVDADecoder.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include "MP4Decoder.h"
#include "nsIThread.h"
#include "ReorderQueue.h"
#include "TimeUnits.h"

#include "VideoDecodeAcceleration/VDADecoder.h"

Expand All @@ -28,24 +29,24 @@ class AppleVDADecoder : public MediaDataDecoder {
public:
class AppleFrameRef {
public:
Microseconds decode_timestamp;
Microseconds composition_timestamp;
Microseconds duration;
media::TimeUnit decode_timestamp;
media::TimeUnit composition_timestamp;
media::TimeUnit duration;
int64_t byte_offset;
bool is_sync_point;

explicit AppleFrameRef(const MediaRawData& aSample)
: decode_timestamp(aSample.mTimecode)
, composition_timestamp(aSample.mTime)
, duration(aSample.mDuration)
: decode_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTimecode))
, composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime))
, duration(media::TimeUnit::FromMicroseconds(aSample.mDuration))
, byte_offset(aSample.mOffset)
, is_sync_point(aSample.mKeyframe)
{
}

AppleFrameRef(Microseconds aDts,
Microseconds aPts,
Microseconds aDuration,
AppleFrameRef(const media::TimeUnit& aDts,
const media::TimeUnit& aPts,
const media::TimeUnit& aDuration,
int64_t aByte_offset,
bool aIs_sync_point)
: decode_timestamp(aDts)
Expand Down
Loading

0 comments on commit c7b6fa5

Please sign in to comment.