Do not queue empty input buffers.

Follow-up to a comment on
ac8e418f3d
Buffers that are useful to pass to the sample/passthrough pipeline
should either contain data or the end of input flag. Otherwise, passing
these buffers along is unnecessary and may even cause the decoder to
allocate a new input buffer which is wasteful.

PiperOrigin-RevId: 411060709
This commit is contained in:
hschlueter 2021-11-19 15:53:52 +00:00 committed by Ian Baker
parent 30caac6fc3
commit 51901ad568
7 changed files with 78 additions and 96 deletions

View file

@ -298,6 +298,12 @@ sample:
size = 1193
isKeyFrame = false
presentationTimeUs = 734083
sample:
trackIndex = 0
dataHashCode = 820561200
size = 1252
isKeyFrame = true
presentationTimeUs = 201521
sample:
trackIndex = 1
dataHashCode = -1554795381

View file

@ -122,7 +122,7 @@ import java.nio.ByteBuffer;
* Attempts to write a sample to the muxer.
*
* @param trackType The {@link C.TrackType track type} of the sample.
* @param data The sample to write, or {@code null} if the sample is empty.
* @param data The sample to write.
* @param isKeyFrame Whether the sample is a key frame.
* @param presentationTimeUs The presentation time of the sample in microseconds.
* @return Whether the sample was successfully written. This is {@code false} if the muxer hasn't
@ -133,10 +133,7 @@ import java.nio.ByteBuffer;
* track of the given track type.
*/
public boolean writeSample(
@C.TrackType int trackType,
@Nullable ByteBuffer data,
boolean isKeyFrame,
long presentationTimeUs) {
@C.TrackType int trackType, ByteBuffer data, boolean isKeyFrame, long presentationTimeUs) {
int trackIndex = trackTypeToIndex.get(trackType, /* valueIfKeyNotFound= */ C.INDEX_UNSET);
checkState(
trackIndex != C.INDEX_UNSET,
@ -144,8 +141,6 @@ import java.nio.ByteBuffer;
if (!canWriteSampleOfType(trackType)) {
return false;
} else if (data == null) {
return true;
}
muxer.writeSampleData(trackIndex, data, isKeyFrame, presentationTimeUs);

View file

@ -1,31 +0,0 @@
/*
* Copyright 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package androidx.media3.transformer;
import androidx.media3.decoder.DecoderInputBuffer;
/** A sample transformer for a given track. */
/* package */ interface SampleTransformer {
/**
* Transforms the data and metadata of the sample contained in {@code buffer}.
*
* @param buffer The sample to transform. If the sample {@link DecoderInputBuffer#data data} is
* {@code null} after the execution of this method, the sample must be discarded.
*/
void transformSample(DecoderInputBuffer buffer);
}

View file

@ -18,7 +18,6 @@ package androidx.media3.transformer;
import static androidx.media3.common.util.Assertions.checkArgument;
import static androidx.media3.common.util.Assertions.checkState;
import static androidx.media3.common.util.Util.castNonNull;
import static androidx.media3.extractor.NalUnitUtil.NAL_START_CODE;
import static java.lang.Math.min;
@ -40,7 +39,7 @@ import java.util.List;
import org.checkerframework.checker.nullness.qual.RequiresNonNull;
/**
* {@link SampleTransformer} that flattens SEF slow motion video samples.
* Sample transformer that flattens SEF slow motion video samples.
*
* <p>Such samples follow the ITU-T Recommendation H.264 with temporal SVC.
*
@ -50,7 +49,7 @@ import org.checkerframework.checker.nullness.qual.RequiresNonNull;
* <p>The mathematical formulas used in this class are explained in [Internal ref:
* http://go/exoplayer-sef-slomo-video-flattening].
*/
/* package */ final class SefSlowMotionVideoSampleTransformer implements SampleTransformer {
/* package */ final class SefSlowMotionFlattener {
/**
* The frame rate of SEF slow motion videos, in fps.
@ -109,7 +108,7 @@ import org.checkerframework.checker.nullness.qual.RequiresNonNull;
*/
private long frameTimeDeltaUs;
public SefSlowMotionVideoSampleTransformer(Format format) {
public SefSlowMotionFlattener(Format format) {
scratch = new byte[NAL_START_CODE_LENGTH];
MetadataInfo metadataInfo = getMetadataInfo(format.metadata);
slowMotionData = metadataInfo.slowMotionData;
@ -130,14 +129,20 @@ import org.checkerframework.checker.nullness.qual.RequiresNonNull;
}
}
@Override
public void transformSample(DecoderInputBuffer buffer) {
/**
* Applies slow motion flattening by either indicating that the buffer's data should be dropped or
* transforming it in place.
*
* @return Whether the buffer should be dropped.
*/
@RequiresNonNull("#1.data")
public boolean dropOrTransformSample(DecoderInputBuffer buffer) {
if (slowMotionData == null) {
// The input is not an SEF slow motion video.
return;
return false;
}
ByteBuffer data = castNonNull(buffer.data);
ByteBuffer data = buffer.data;
int originalPosition = data.position();
data.position(originalPosition + NAL_START_CODE_LENGTH);
data.get(scratch, 0, 4); // Read nal_unit_header_svc_extension.
@ -148,14 +153,14 @@ import org.checkerframework.checker.nullness.qual.RequiresNonNull;
"Missing SVC extension prefix NAL unit.");
int layer = (scratch[3] & 0xFF) >> 5;
boolean shouldKeepFrame = processCurrentFrame(layer, buffer.timeUs);
// Update buffer timestamp regardless of whether the frame is dropped because the buffer might
// still be passed to a decoder if it contains an end of stream flag.
// Update the timestamp regardless of whether the buffer is dropped as the timestamp may be
// reused for the empty end-of-stream buffer.
buffer.timeUs = getCurrentFrameOutputTimeUs(/* inputTimeUs= */ buffer.timeUs);
if (shouldKeepFrame) {
skipToNextNalUnit(data); // Skip over prefix_nal_unit_svc.
} else {
buffer.data = null;
return false;
}
return true;
}
/**

View file

@ -17,6 +17,7 @@
package androidx.media3.transformer;
import static androidx.media3.common.util.Assertions.checkNotNull;
import static androidx.media3.common.util.Assertions.checkStateNotNull;
import static androidx.media3.exoplayer.source.SampleStream.FLAG_REQUIRE_FORMAT;
import androidx.annotation.Nullable;
@ -127,7 +128,7 @@ import org.checkerframework.checker.nullness.qual.RequiresNonNull;
}
if (!muxerWrapper.writeSample(
getTrackType(),
samplePipelineOutputBuffer.data,
checkStateNotNull(samplePipelineOutputBuffer.data),
/* isKeyFrame= */ true,
samplePipelineOutputBuffer.timeUs)) {
return false;
@ -152,11 +153,15 @@ import org.checkerframework.checker.nullness.qual.RequiresNonNull;
int result = readSource(getFormatHolder(), samplePipelineInputBuffer, /* readFlags= */ 0);
switch (result) {
case C.RESULT_BUFFER_READ:
if (samplePipelineInputBuffer.isEndOfStream()) {
samplePipeline.queueInputBuffer();
return false;
}
mediaClock.updateTimeForTrackType(getTrackType(), samplePipelineInputBuffer.timeUs);
samplePipelineInputBuffer.timeUs -= streamOffsetUs;
samplePipelineInputBuffer.flip();
samplePipeline.queueInputBuffer();
return !samplePipelineInputBuffer.isEndOfStream();
return true;
case C.RESULT_FORMAT_READ:
throw new IllegalStateException("Format changes are not supported.");
case C.RESULT_NOTHING_READ:

View file

@ -17,6 +17,7 @@
package androidx.media3.transformer;
import static androidx.media3.common.util.Assertions.checkNotNull;
import static androidx.media3.common.util.Assertions.checkStateNotNull;
import static androidx.media3.exoplayer.source.SampleStream.FLAG_REQUIRE_FORMAT;
import android.content.Context;
@ -28,6 +29,7 @@ import androidx.media3.decoder.DecoderInputBuffer;
import androidx.media3.exoplayer.ExoPlaybackException;
import androidx.media3.exoplayer.FormatHolder;
import androidx.media3.exoplayer.source.SampleStream.ReadDataResult;
import java.nio.ByteBuffer;
import org.checkerframework.checker.nullness.qual.EnsuresNonNullIf;
import org.checkerframework.checker.nullness.qual.MonotonicNonNull;
import org.checkerframework.checker.nullness.qual.RequiresNonNull;
@ -40,7 +42,7 @@ import org.checkerframework.checker.nullness.qual.RequiresNonNull;
private final Context context;
private final DecoderInputBuffer decoderInputBuffer;
private @MonotonicNonNull SampleTransformer slowMotionSampleTransformer;
private @MonotonicNonNull SefSlowMotionFlattener sefSlowMotionFlattener;
private @MonotonicNonNull SamplePipeline samplePipeline;
private boolean muxerWrapperTrackAdded;
private boolean muxerWrapperTrackEnded;
@ -107,7 +109,7 @@ import org.checkerframework.checker.nullness.qual.RequiresNonNull;
samplePipeline = new PassthroughSamplePipeline(decoderInputFormat);
}
if (transformation.flattenForSlowMotion) {
slowMotionSampleTransformer = new SefSlowMotionVideoSampleTransformer(decoderInputFormat);
sefSlowMotionFlattener = new SefSlowMotionFlattener(decoderInputFormat);
}
return true;
}
@ -141,7 +143,7 @@ import org.checkerframework.checker.nullness.qual.RequiresNonNull;
if (!muxerWrapper.writeSample(
getTrackType(),
samplePipelineOutputBuffer.data,
checkStateNotNull(samplePipelineOutputBuffer.data),
samplePipelineOutputBuffer.isKeyFrame(),
samplePipelineOutputBuffer.timeUs)) {
return false;
@ -172,17 +174,24 @@ import org.checkerframework.checker.nullness.qual.RequiresNonNull;
int result = readSource(getFormatHolder(), samplePipelineInputBuffer, /* readFlags= */ 0);
switch (result) {
case C.RESULT_BUFFER_READ:
if (samplePipelineInputBuffer.data != null
&& samplePipelineInputBuffer.data.position() > 0) {
mediaClock.updateTimeForTrackType(getTrackType(), samplePipelineInputBuffer.timeUs);
samplePipelineInputBuffer.timeUs -= streamOffsetUs;
samplePipelineInputBuffer.flip();
if (slowMotionSampleTransformer != null) {
slowMotionSampleTransformer.transformSample(samplePipelineInputBuffer);
if (samplePipelineInputBuffer.isEndOfStream()) {
samplePipeline.queueInputBuffer();
return false;
}
mediaClock.updateTimeForTrackType(getTrackType(), samplePipelineInputBuffer.timeUs);
samplePipelineInputBuffer.timeUs -= streamOffsetUs;
samplePipelineInputBuffer.flip();
if (sefSlowMotionFlattener != null) {
ByteBuffer data = checkStateNotNull(samplePipelineInputBuffer.data);
boolean shouldDropSample =
sefSlowMotionFlattener.dropOrTransformSample(samplePipelineInputBuffer);
if (shouldDropSample) {
data.clear();
return true;
}
}
samplePipeline.queueInputBuffer();
return !samplePipelineInputBuffer.isEndOfStream();
return true;
case C.RESULT_FORMAT_READ:
throw new IllegalStateException("Format changes are not supported.");
case C.RESULT_NOTHING_READ:

View file

@ -16,7 +16,7 @@
package androidx.media3.transformer;
import static androidx.media3.transformer.SefSlowMotionVideoSampleTransformer.INPUT_FRAME_RATE;
import static androidx.media3.transformer.SefSlowMotionFlattener.INPUT_FRAME_RATE;
import static com.google.common.truth.Truth.assertThat;
import androidx.media3.common.C;
@ -32,9 +32,9 @@ import java.util.List;
import org.junit.Test;
import org.junit.runner.RunWith;
/** Unit tests for {@link SefSlowMotionVideoSampleTransformer}. */
/** Unit tests for {@link SefSlowMotionFlattener}. */
@RunWith(AndroidJUnit4.class)
public class SefSlowMotionVideoSampleTransformerTest {
public class SefSlowMotionFlattenerTest {
/**
* Sequence of temporal SVC layers in an SEF slow motion video track with a maximum layer of 3.
@ -56,10 +56,9 @@ public class SefSlowMotionVideoSampleTransformerTest {
createSefSlowMotionFormat(
captureFrameRate, inputMaxLayer, Arrays.asList(segment1, segment2));
SefSlowMotionVideoSampleTransformer sampleTransformer =
new SefSlowMotionVideoSampleTransformer(format);
SefSlowMotionFlattener sefSlowMotionFlattener = new SefSlowMotionFlattener(format);
List<Integer> outputLayers =
getKeptOutputLayers(sampleTransformer, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
getKeptOutputLayers(sefSlowMotionFlattener, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
List<Integer> expectedLayers = Arrays.asList(0, 0, 1, 0, 0, 1, 2, 3, 0, 3, 2, 3, 1, 3, 0);
assertThat(outputLayers).isEqualTo(expectedLayers);
@ -78,10 +77,9 @@ public class SefSlowMotionVideoSampleTransformerTest {
createSefSlowMotionFormat(
captureFrameRate, inputMaxLayer, Arrays.asList(segment1, segment2));
SefSlowMotionVideoSampleTransformer sampleTransformer =
new SefSlowMotionVideoSampleTransformer(format);
SefSlowMotionFlattener sefSlowMotionFlattener = new SefSlowMotionFlattener(format);
List<Integer> outputLayers =
getKeptOutputLayers(sampleTransformer, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
getKeptOutputLayers(sefSlowMotionFlattener, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
List<Integer> expectedLayers =
Arrays.asList(0, 1, 0, 3, 2, 3, 1, 3, 2, 3, 0, 1, 0, 1, 2, 3, 0, 3, 2, 3, 1, 3, 0, 1);
@ -101,10 +99,9 @@ public class SefSlowMotionVideoSampleTransformerTest {
createSefSlowMotionFormat(
captureFrameRate, inputMaxLayer, Arrays.asList(segment1, segment2));
SefSlowMotionVideoSampleTransformer sampleTransformer =
new SefSlowMotionVideoSampleTransformer(format);
SefSlowMotionFlattener sefSlowMotionFlattener = new SefSlowMotionFlattener(format);
List<Integer> outputLayers =
getKeptOutputLayers(sampleTransformer, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
getKeptOutputLayers(sefSlowMotionFlattener, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
List<Integer> expectedLayers = Arrays.asList(0, 0, 1, 0, 2, 3, 1, 3, 0);
assertThat(outputLayers).isEqualTo(expectedLayers);
@ -129,10 +126,9 @@ public class SefSlowMotionVideoSampleTransformerTest {
inputMaxLayer,
Arrays.asList(segmentWithNoFrame1, segmentWithNoFrame2, segmentWithFrame));
SefSlowMotionVideoSampleTransformer sampleTransformer =
new SefSlowMotionVideoSampleTransformer(format);
SefSlowMotionFlattener sefSlowMotionFlattener = new SefSlowMotionFlattener(format);
List<Integer> outputLayers =
getKeptOutputLayers(sampleTransformer, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
getKeptOutputLayers(sefSlowMotionFlattener, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
List<Integer> expectedLayers = Arrays.asList(0, 0, 1);
assertThat(outputLayers).isEqualTo(expectedLayers);
@ -153,10 +149,9 @@ public class SefSlowMotionVideoSampleTransformerTest {
createSefSlowMotionFormat(
captureFrameRate, inputMaxLayer, Arrays.asList(segment1, segment2));
SefSlowMotionVideoSampleTransformer sampleTransformer =
new SefSlowMotionVideoSampleTransformer(format);
SefSlowMotionFlattener sefSlowMotionFlattener = new SefSlowMotionFlattener(format);
List<Long> outputTimesUs =
getOutputTimesUs(sampleTransformer, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
getOutputTimesUs(sefSlowMotionFlattener, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
// Test frame inside segment.
assertThat(outputTimesUs.get(9))
@ -181,10 +176,9 @@ public class SefSlowMotionVideoSampleTransformerTest {
createSefSlowMotionFormat(
captureFrameRate, inputMaxLayer, Arrays.asList(segment1, segment2));
SefSlowMotionVideoSampleTransformer sampleTransformer =
new SefSlowMotionVideoSampleTransformer(format);
SefSlowMotionFlattener sefSlowMotionFlattener = new SefSlowMotionFlattener(format);
List<Long> outputTimesUs =
getOutputTimesUs(sampleTransformer, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
getOutputTimesUs(sefSlowMotionFlattener, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
// Test frame inside segment.
assertThat(outputTimesUs.get(9))
@ -209,10 +203,9 @@ public class SefSlowMotionVideoSampleTransformerTest {
createSefSlowMotionFormat(
captureFrameRate, inputMaxLayer, Arrays.asList(segment1, segment2));
SefSlowMotionVideoSampleTransformer sampleTransformer =
new SefSlowMotionVideoSampleTransformer(format);
SefSlowMotionFlattener sefSlowMotionFlattener = new SefSlowMotionFlattener(format);
List<Long> outputTimesUs =
getOutputTimesUs(sampleTransformer, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
getOutputTimesUs(sefSlowMotionFlattener, LAYER_SEQUENCE_MAX_LAYER_THREE, frameCount);
// Test frame inside second segment.
assertThat(outputTimesUs.get(9)).isEqualTo(136_250);
@ -249,20 +242,20 @@ public class SefSlowMotionVideoSampleTransformerTest {
/**
* Returns a list containing the temporal SVC layers of the frames that should be kept according
* to {@link SefSlowMotionVideoSampleTransformer#processCurrentFrame(int, long)}.
* to {@link SefSlowMotionFlattener#processCurrentFrame(int, long)}.
*
* @param sampleTransformer The {@link SefSlowMotionVideoSampleTransformer}.
* @param sefSlowMotionFlattener The {@link SefSlowMotionFlattener}.
* @param layerSequence The sequence of layer values in the input.
* @param frameCount The number of video frames in the input.
* @return The output layers.
*/
private static List<Integer> getKeptOutputLayers(
SefSlowMotionVideoSampleTransformer sampleTransformer, int[] layerSequence, int frameCount) {
SefSlowMotionFlattener sefSlowMotionFlattener, int[] layerSequence, int frameCount) {
List<Integer> outputLayers = new ArrayList<>();
for (int i = 0; i < frameCount; i++) {
int layer = layerSequence[i % layerSequence.length];
long timeUs = i * C.MICROS_PER_SECOND / INPUT_FRAME_RATE;
if (sampleTransformer.processCurrentFrame(layer, timeUs)) {
if (sefSlowMotionFlattener.processCurrentFrame(layer, timeUs)) {
outputLayers.add(layer);
}
}
@ -271,24 +264,24 @@ public class SefSlowMotionVideoSampleTransformerTest {
/**
* Returns a list containing the frame output times obtained using {@link
* SefSlowMotionVideoSampleTransformer#getCurrentFrameOutputTimeUs(long)}.
* SefSlowMotionFlattener#getCurrentFrameOutputTimeUs(long)}.
*
* <p>The output contains the output times for all the input frames, regardless of whether they
* should be kept or not.
*
* @param sampleTransformer The {@link SefSlowMotionVideoSampleTransformer}.
* @param sefSlowMotionFlattener The {@link SefSlowMotionFlattener}.
* @param layerSequence The sequence of layer values in the input.
* @param frameCount The number of video frames in the input.
* @return The frame output times, in microseconds.
*/
private static List<Long> getOutputTimesUs(
SefSlowMotionVideoSampleTransformer sampleTransformer, int[] layerSequence, int frameCount) {
SefSlowMotionFlattener sefSlowMotionFlattener, int[] layerSequence, int frameCount) {
List<Long> outputTimesUs = new ArrayList<>();
for (int i = 0; i < frameCount; i++) {
int layer = layerSequence[i % layerSequence.length];
long inputTimeUs = i * C.MICROS_PER_SECOND / INPUT_FRAME_RATE;
sampleTransformer.processCurrentFrame(layer, inputTimeUs);
outputTimesUs.add(sampleTransformer.getCurrentFrameOutputTimeUs(inputTimeUs));
sefSlowMotionFlattener.processCurrentFrame(layer, inputTimeUs);
outputTimesUs.add(sefSlowMotionFlattener.getCurrentFrameOutputTimeUs(inputTimeUs));
}
return outputTimesUs;
}