Skip to content

Commit

Permalink
Revert "Add Timestamp accessor methods to the EncodedImage class."
Browse files Browse the repository at this point in the history
This reverts commit f34d467.

Reason for revert: Seems to break downstream project.

Original change's description:
> Add Timestamp accessor methods to the EncodedImage class.
> 
> Bug: webrtc:9378
> Change-Id: I59bf14f631f92f0f4e05f60d4af25641a23a53f9
> Reviewed-on: https://webrtc-review.googlesource.com/82100
> Reviewed-by: Stefan Holmer <[email protected]>
> Reviewed-by: Philip Eliasson <[email protected]>
> Reviewed-by: Rasmus Brandt <[email protected]>
> Commit-Queue: Niels Moller <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#23734}

[email protected],[email protected],[email protected],[email protected]

Change-Id: I3aa0c0119426886bc583c918aae862eb7f4b6b63
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: webrtc:9378
Reviewed-on: https://webrtc-review.googlesource.com/85600
Reviewed-by: Björn Terelius <[email protected]>
Commit-Queue: Björn Terelius <[email protected]>
Cr-Commit-Position: refs/heads/master@{#23739}
  • Loading branch information
Björn Terelius authored and Commit Bot committed Jun 26, 2018
1 parent c9ac93f commit 52f53d5
Show file tree
Hide file tree
Showing 15 changed files with 82 additions and 70 deletions.
8 changes: 8 additions & 0 deletions api/video/encoded_frame.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,13 @@ bool EncodedFrame::delayed_by_retransmission() const {
return 0;
}

uint32_t EncodedFrame::Timestamp() const {
return timestamp;
}

void EncodedFrame::SetTimestamp(uint32_t rtp_timestamp) {
timestamp = rtp_timestamp;
}

} // namespace video_coding
} // namespace webrtc
5 changes: 5 additions & 0 deletions api/video/encoded_frame.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,10 @@ class EncodedFrame : public webrtc::VCMEncodedFrame {

virtual bool GetBitstream(uint8_t* destination) const = 0;

// The capture timestamp of this frame, using the 90 kHz RTP clock.
virtual uint32_t Timestamp() const;
virtual void SetTimestamp(uint32_t rtp_timestamp);

// When this frame was received.
virtual int64_t ReceivedTime() const = 0;

Expand All @@ -74,6 +78,7 @@ class EncodedFrame : public webrtc::VCMEncodedFrame {
bool is_keyframe() const { return num_references == 0; }

VideoLayerFrameId id;
uint32_t timestamp = 0;

// TODO(philipel): Add simple modify/access functions to prevent adding too
// many |references|.
Expand Down
8 changes: 0 additions & 8 deletions common_video/include/video_frame.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,6 @@ class EncodedImage {
EncodedImage(const EncodedImage&);
EncodedImage(uint8_t* buffer, size_t length, size_t size);

// TODO(nisse): Change style to timestamp(), set_timestamp(), for consistency
// with the VideoFrame class.
// Set frame timestamp (90kHz).
void SetTimestamp(uint32_t timestamp) { _timeStamp = timestamp; }

// Get frame timestamp (90kHz).
uint32_t Timestamp() const { return _timeStamp; }

void SetEncodeTime(int64_t encode_start_ms, int64_t encode_finish_ms);

uint32_t _encodedWidth = 0;
Expand Down
6 changes: 3 additions & 3 deletions modules/video_coding/decoding_state.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ bool VCMDecodingState::IsOldFrame(const VCMFrameBuffer* frame) const {
assert(frame != NULL);
if (in_initial_state_)
return false;
return !IsNewerTimestamp(frame->Timestamp(), time_stamp_);
return !IsNewerTimestamp(frame->TimeStamp(), time_stamp_);
}

bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
Expand All @@ -73,7 +73,7 @@ void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
if (!UsingFlexibleMode(frame))
UpdateSyncState(frame);
sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
time_stamp_ = frame->Timestamp();
time_stamp_ = frame->TimeStamp();
picture_id_ = frame->PictureId();
temporal_id_ = frame->TemporalId();
tl0_pic_id_ = frame->Tl0PicId();
Expand Down Expand Up @@ -143,7 +143,7 @@ bool VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) {
// Continuous empty packets or continuous frames can be dropped if we
// advance the sequence number.
sequence_num_ = frame->GetHighSeqNum();
time_stamp_ = frame->Timestamp();
time_stamp_ = frame->TimeStamp();
return true;
}
return false;
Expand Down
6 changes: 2 additions & 4 deletions modules/video_coding/encoded_frame.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,12 +65,10 @@ class VCMEncodedFrame : protected EncodedImage {
* Get frame length
*/
size_t Length() const { return _length; }

/**
* Frame RTP timestamp (90kHz)
* Get frame timestamp (90kHz)
*/
using EncodedImage::Timestamp;
using EncodedImage::SetTimestamp;
uint32_t TimeStamp() const { return _timeStamp; }
/**
* Get render time in milliseconds
*/
Expand Down
19 changes: 9 additions & 10 deletions modules/video_coding/frame_buffer2.cc
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,7 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(

next_frame_it_ = frame_it;
if (frame->RenderTime() == -1)
frame->SetRenderTime(
timing_->RenderTimeMs(frame->Timestamp(), now_ms));
frame->SetRenderTime(timing_->RenderTimeMs(frame->timestamp, now_ms));
wait_ms = timing_->MaxWaitingTime(frame->RenderTime(), now_ms);

// This will cause the frame buffer to prefer high framerate rather
Expand Down Expand Up @@ -147,7 +146,7 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
if (!frame->delayed_by_retransmission()) {
int64_t frame_delay;

if (inter_frame_delay_.CalculateDelay(frame->Timestamp(), &frame_delay,
if (inter_frame_delay_.CalculateDelay(frame->timestamp, &frame_delay,
frame->ReceivedTime())) {
jitter_estimator_->UpdateEstimate(frame_delay, frame->size());
}
Expand All @@ -164,7 +163,7 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
if (HasBadRenderTiming(*frame, now_ms)) {
jitter_estimator_->Reset();
timing_->Reset();
frame->SetRenderTime(timing_->RenderTimeMs(frame->Timestamp(), now_ms));
frame->SetRenderTime(timing_->RenderTimeMs(frame->timestamp, now_ms));
}

UpdateJitterDelay();
Expand All @@ -178,17 +177,17 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
const VideoLayerFrameId& frame_key = next_frame_it_->first;

const bool frame_is_higher_spatial_layer_of_last_decoded_frame =
last_decoded_frame_timestamp_ == frame->Timestamp() &&
last_decoded_frame_timestamp_ == frame->timestamp &&
last_decoded_frame_key.picture_id == frame_key.picture_id &&
last_decoded_frame_key.spatial_layer < frame_key.spatial_layer;

if (AheadOrAt(last_decoded_frame_timestamp_, frame->Timestamp()) &&
if (AheadOrAt(last_decoded_frame_timestamp_, frame->timestamp) &&
!frame_is_higher_spatial_layer_of_last_decoded_frame) {
// TODO(brandtr): Consider clearing the entire buffer when we hit
// these conditions.
RTC_LOG(LS_WARNING)
<< "Frame with (timestamp:picture_id:spatial_id) ("
<< frame->Timestamp() << ":" << frame->id.picture_id << ":"
<< frame->timestamp << ":" << frame->id.picture_id << ":"
<< static_cast<int>(frame->id.spatial_layer) << ")"
<< " sent to decoder after frame with"
<< " (timestamp:picture_id:spatial_id) ("
Expand All @@ -199,7 +198,7 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
}

AdvanceLastDecodedFrame(next_frame_it_);
last_decoded_frame_timestamp_ = frame->Timestamp();
last_decoded_frame_timestamp_ = frame->timestamp;
*frame_out = std::move(frame);
return kFrameFound;
}
Expand Down Expand Up @@ -298,7 +297,7 @@ void FrameBuffer::UpdatePlayoutDelays(const EncodedFrame& frame) {
timing_->set_max_playout_delay(playout_delay.max_ms);

if (!frame.delayed_by_retransmission())
timing_->IncomingTimestamp(frame.Timestamp(), frame.ReceivedTime());
timing_->IncomingTimestamp(frame.timestamp, frame.ReceivedTime());
}

int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
Expand Down Expand Up @@ -344,7 +343,7 @@ int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {

if (last_decoded_frame_it_ != frames_.end() &&
id <= last_decoded_frame_it_->first) {
if (AheadOf(frame->Timestamp(), last_decoded_frame_timestamp_) &&
if (AheadOf(frame->timestamp, last_decoded_frame_timestamp_) &&
frame->is_keyframe()) {
// If this frame has a newer timestamp but an earlier picture id then we
// assume there has been a jump in the picture id due to some encoder
Expand Down
6 changes: 4 additions & 2 deletions modules/video_coding/frame_buffer2_unittest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,8 @@ class FrameObjectFake : public EncodedFrame {
public:
bool GetBitstream(uint8_t* destination) const override { return true; }

uint32_t Timestamp() const override { return timestamp; }

int64_t ReceivedTime() const override { return 0; }

int64_t RenderTime() const override { return _renderTimeMs; }
Expand Down Expand Up @@ -163,7 +165,7 @@ class TestFrameBuffer2 : public ::testing::Test {
std::unique_ptr<FrameObjectFake> frame(new FrameObjectFake());
frame->id.picture_id = picture_id;
frame->id.spatial_layer = spatial_layer;
frame->SetTimestamp(ts_ms * 90);
frame->timestamp = ts_ms * 90;
frame->num_references = references.size();
frame->inter_layer_predicted = inter_layer_predicted;
for (size_t r = 0; r < references.size(); ++r)
Expand Down Expand Up @@ -518,7 +520,7 @@ TEST_F(TestFrameBuffer2, StatsCallback) {
frame->SetSize(kFrameSize);
frame->id.picture_id = pid;
frame->id.spatial_layer = 0;
frame->SetTimestamp(ts);
frame->timestamp = ts;
frame->num_references = 0;
frame->inter_layer_predicted = false;

Expand Down
7 changes: 6 additions & 1 deletion modules/video_coding/frame_object.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
: packet_buffer_(packet_buffer),
first_seq_num_(first_seq_num),
last_seq_num_(last_seq_num),
timestamp_(0),
received_time_(received_time),
times_nacked_(times_nacked) {
VCMPacket* first_packet = packet_buffer_->GetPacket(first_seq_num);
Expand Down Expand Up @@ -68,7 +69,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
_encodedHeight = first_packet->height;

// EncodedFrame members
SetTimestamp(first_packet->timestamp);
timestamp = first_packet->timestamp;

VCMPacket* last_packet = packet_buffer_->GetPacket(last_seq_num);
RTC_CHECK(last_packet);
Expand Down Expand Up @@ -139,6 +140,10 @@ bool RtpFrameObject::GetBitstream(uint8_t* destination) const {
return packet_buffer_->GetBitstream(*this, destination);
}

uint32_t RtpFrameObject::Timestamp() const {
return timestamp_;
}

int64_t RtpFrameObject::ReceivedTime() const {
return received_time_;
}
Expand Down
2 changes: 2 additions & 0 deletions modules/video_coding/frame_object.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ class RtpFrameObject : public EncodedFrame {
enum FrameType frame_type() const;
VideoCodecType codec_type() const;
bool GetBitstream(uint8_t* destination) const override;
uint32_t Timestamp() const override;
int64_t ReceivedTime() const override;
int64_t RenderTime() const override;
bool delayed_by_retransmission() const override;
Expand All @@ -48,6 +49,7 @@ class RtpFrameObject : public EncodedFrame {
VideoCodecType codec_type_;
uint16_t first_seq_num_;
uint16_t last_seq_num_;
uint32_t timestamp_;
int64_t received_time_;

// Equal to times nacked of the packet with the highet times nacked
Expand Down
8 changes: 4 additions & 4 deletions modules/video_coding/generic_decoder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
} else {
_frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type;
}
_callback->Map(frame.Timestamp(), &_frameInfos[_nextFrameInfoIdx]);
_callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);

_nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
Expand All @@ -234,13 +234,13 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
_callback->OnDecoderImplementationName(decoder_->ImplementationName());
if (ret < WEBRTC_VIDEO_CODEC_OK) {
RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
<< frame.Timestamp() << ", error code: " << ret;
_callback->Pop(frame.Timestamp());
<< frame.TimeStamp() << ", error code: " << ret;
_callback->Pop(frame.TimeStamp());
return ret;
} else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
// No output
_callback->Pop(frame.Timestamp());
_callback->Pop(frame.TimeStamp());
}
return ret;
}
Expand Down
30 changes: 15 additions & 15 deletions modules/video_coding/jitter_buffer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ bool HasNonEmptyState(FrameListPair pair) {
}

void FrameList::InsertFrame(VCMFrameBuffer* frame) {
insert(rbegin().base(), FrameListPair(frame->Timestamp(), frame));
insert(rbegin().base(), FrameListPair(frame->TimeStamp(), frame));
}

VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) {
Expand Down Expand Up @@ -110,7 +110,7 @@ void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
}
free_frames->push_back(oldest_frame);
TRACE_EVENT_INSTANT1("webrtc", "JB::OldOrEmptyFrameDropped", "timestamp",
oldest_frame->Timestamp());
oldest_frame->TimeStamp());
erase(begin());
}
}
Expand Down Expand Up @@ -206,7 +206,7 @@ void Vp9SsMap::UpdateFrames(FrameList* frames) {
continue;
}
SsMap::iterator ss_it;
if (Find(frame_it.second->Timestamp(), &ss_it)) {
if (Find(frame_it.second->TimeStamp(), &ss_it)) {
if (gof_idx >= ss_it->second.num_frames_in_gof) {
continue; // Assume corresponding SS not yet received.
}
Expand Down Expand Up @@ -522,7 +522,7 @@ bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
}
}

*timestamp = oldest_frame->Timestamp();
*timestamp = oldest_frame->TimeStamp();
return true;
}

Expand Down Expand Up @@ -558,7 +558,7 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
// Wait for this one to get complete.
waiting_for_completion_.frame_size = frame->Length();
waiting_for_completion_.latest_packet_time = frame->LatestPacketTimeMs();
waiting_for_completion_.timestamp = frame->Timestamp();
waiting_for_completion_.timestamp = frame->TimeStamp();
}
}

Expand Down Expand Up @@ -709,8 +709,8 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);

if (previous_state != kStateComplete) {
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->Timestamp(), "timestamp",
frame->Timestamp());
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(), "timestamp",
frame->TimeStamp());
}

if (buffer_state > 0) {
Expand Down Expand Up @@ -825,7 +825,7 @@ bool VCMJitterBuffer::IsContinuous(const VCMFrameBuffer& frame) const {
for (FrameList::const_iterator it = decodable_frames_.begin();
it != decodable_frames_.end(); ++it) {
VCMFrameBuffer* decodable_frame = it->second;
if (IsNewerTimestamp(decodable_frame->Timestamp(), frame.Timestamp())) {
if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) {
break;
}
decoding_state.SetState(decodable_frame);
Expand Down Expand Up @@ -859,7 +859,7 @@ void VCMJitterBuffer::FindAndInsertContinuousFramesWithState(
it != incomplete_frames_.end();) {
VCMFrameBuffer* frame = it->second;
if (IsNewerTimestamp(original_decoded_state.time_stamp(),
frame->Timestamp())) {
frame->TimeStamp())) {
++it;
continue;
}
Expand Down Expand Up @@ -941,11 +941,11 @@ int VCMJitterBuffer::NonContinuousOrIncompleteDuration() {
if (incomplete_frames_.empty()) {
return 0;
}
uint32_t start_timestamp = incomplete_frames_.Front()->Timestamp();
uint32_t start_timestamp = incomplete_frames_.Front()->TimeStamp();
if (!decodable_frames_.empty()) {
start_timestamp = decodable_frames_.Back()->Timestamp();
start_timestamp = decodable_frames_.Back()->TimeStamp();
}
return incomplete_frames_.Back()->Timestamp() - start_timestamp;
return incomplete_frames_.Back()->TimeStamp() - start_timestamp;
}

uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber(
Expand Down Expand Up @@ -1178,10 +1178,10 @@ void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
incoming_frame_count_++;

if (frame.FrameType() == kVideoFrameKey) {
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.Timestamp(),
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
"KeyComplete");
} else {
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.Timestamp(),
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
"DeltaComplete");
}

Expand Down Expand Up @@ -1257,7 +1257,7 @@ void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame,
}
// No retransmitted frames should be a part of the jitter
// estimate.
UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.Timestamp(),
UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.TimeStamp(),
frame.Length(), incomplete_frame);
}

Expand Down
Loading

0 comments on commit 52f53d5

Please sign in to comment.