Ensure EOS is queued after processing generated silence with effects.

When generating silence for AudioProcessingPipeline, audio never
queued EOS downstream.

Linked to this, when silence followed an item with audio, the silence
was added to SilentAudioGenerator before the mediaItem reconfiguration
occurred. If the silence had effects, the APP would be flushed after
silence queued EOS, resetting APP.isEnded back to false, so AudioGraph
never ended.

Regression tests reproduce failure without fix, but pass with it.

PiperOrigin-RevId: 550853714
This commit is contained in:
samrobinson 2023-07-25 13:22:35 +01:00 committed by Rohit Singh
parent 35bee3299c
commit 357c458028
10 changed files with 14601 additions and 67 deletions

View file

@ -0,0 +1,476 @@
format 0:
id = 1
sampleMimeType = video/avc
codecs = avc1.64001F
maxInputSize = 36722
width = 1080
height = 720
frameRate = 29.970028
metadata = entries=[TSSE: description=null: values=[Lavf56.1.0], xyz: latitude=40.68, longitude=-74.5, Mp4Timestamp: creation time=3547558895, modification time=3547558895, timescale=1000]
initializationData:
data = length 29, hash 4746B5D9
data = length 10, hash 7A0D0F2B
container metadata = entries=[TSSE: description=null: values=[Lavf56.1.0], xyz: latitude=40.68, longitude=-74.5, Mp4Timestamp: creation time=3547558895, modification time=3547558895, timescale=1000]
format 1:
averageBitrate = 131072
sampleMimeType = audio/mp4a-latm
channelCount = 2
sampleRate = 48000
pcmEncoding = 2
sample:
trackIndex = 0
dataHashCode = -770308242
size = 36692
isKeyFrame = true
presentationTimeUs = 0
sample:
trackIndex = 0
dataHashCode = -732087136
size = 5312
isKeyFrame = false
presentationTimeUs = 66733
sample:
trackIndex = 0
dataHashCode = 468156717
size = 599
isKeyFrame = false
presentationTimeUs = 33366
sample:
trackIndex = 0
dataHashCode = 1150349584
size = 7735
isKeyFrame = false
presentationTimeUs = 200200
sample:
trackIndex = 0
dataHashCode = 1443582006
size = 987
isKeyFrame = false
presentationTimeUs = 133466
sample:
trackIndex = 0
dataHashCode = -310585145
size = 673
isKeyFrame = false
presentationTimeUs = 100100
sample:
trackIndex = 0
dataHashCode = 807460688
size = 523
isKeyFrame = false
presentationTimeUs = 166833
sample:
trackIndex = 0
dataHashCode = 1936487090
size = 6061
isKeyFrame = false
presentationTimeUs = 333666
sample:
trackIndex = 0
dataHashCode = -32297181
size = 992
isKeyFrame = false
presentationTimeUs = 266933
sample:
trackIndex = 0
dataHashCode = 1529616406
size = 623
isKeyFrame = false
presentationTimeUs = 233566
sample:
trackIndex = 0
dataHashCode = 1949198785
size = 421
isKeyFrame = false
presentationTimeUs = 300300
sample:
trackIndex = 0
dataHashCode = -147880287
size = 4899
isKeyFrame = false
presentationTimeUs = 433766
sample:
trackIndex = 0
dataHashCode = 1369083472
size = 568
isKeyFrame = false
presentationTimeUs = 400400
sample:
trackIndex = 0
dataHashCode = 965782073
size = 620
isKeyFrame = false
presentationTimeUs = 367033
sample:
trackIndex = 0
dataHashCode = -261176150
size = 5450
isKeyFrame = false
presentationTimeUs = 567233
sample:
trackIndex = 0
dataHashCode = -1830836678
size = 1051
isKeyFrame = false
presentationTimeUs = 500500
sample:
trackIndex = 0
dataHashCode = 1767407540
size = 874
isKeyFrame = false
presentationTimeUs = 467133
sample:
trackIndex = 0
dataHashCode = 918440283
size = 781
isKeyFrame = false
presentationTimeUs = 533866
sample:
trackIndex = 0
dataHashCode = -1408463661
size = 4725
isKeyFrame = false
presentationTimeUs = 700700
sample:
trackIndex = 0
dataHashCode = 1569455924
size = 1022
isKeyFrame = false
presentationTimeUs = 633966
sample:
trackIndex = 0
dataHashCode = -1723778407
size = 790
isKeyFrame = false
presentationTimeUs = 600600
sample:
trackIndex = 0
dataHashCode = 1578275472
size = 610
isKeyFrame = false
presentationTimeUs = 667333
sample:
trackIndex = 0
dataHashCode = 1989768395
size = 2751
isKeyFrame = false
presentationTimeUs = 834166
sample:
trackIndex = 0
dataHashCode = -1215674502
size = 745
isKeyFrame = false
presentationTimeUs = 767433
sample:
trackIndex = 0
dataHashCode = -814473606
size = 621
isKeyFrame = false
presentationTimeUs = 734066
sample:
trackIndex = 0
dataHashCode = 498370894
size = 505
isKeyFrame = false
presentationTimeUs = 800800
sample:
trackIndex = 0
dataHashCode = -1051506468
size = 1268
isKeyFrame = false
presentationTimeUs = 967633
sample:
trackIndex = 0
dataHashCode = -1025604144
size = 880
isKeyFrame = false
presentationTimeUs = 900900
sample:
trackIndex = 0
dataHashCode = -913586520
size = 530
isKeyFrame = false
presentationTimeUs = 867533
sample:
trackIndex = 0
dataHashCode = 1340459242
size = 568
isKeyFrame = false
presentationTimeUs = 934266
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 0
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 23208
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 46437
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 69645
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 92875
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 116083
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 139312
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 162520
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 185750
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 208958
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 232187
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 255416
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 278625
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 301854
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 325062
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 348291
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 371500
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 394729
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 417937
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 441166
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 464395
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 487604
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 510833
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 534041
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 557270
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 580479
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 603708
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 626916
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 650145
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 673375
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 696583
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 719812
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 743020
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 766250
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 789458
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 812687
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 835895
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 859125
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 882354
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 905562
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 928791
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 952000
sample:
trackIndex = 1
dataHashCode = 864256769
size = 4456
isKeyFrame = true
presentationTimeUs = 975229
sample:
trackIndex = 1
dataHashCode = -266855807
size = 4460
isKeyFrame = true
presentationTimeUs = 998437
sample:
trackIndex = 1
dataHashCode = -115707775
size = 444
isKeyFrame = true
presentationTimeUs = 1021666
sample:
trackIndex = 1
dataHashCode = 923521
size = 4
isKeyFrame = true
presentationTimeUs = 1023979
released = true

View file

@ -23,7 +23,6 @@ import static androidx.media3.common.util.Assertions.checkState;
import static androidx.media3.common.util.Assertions.checkStateNotNull;
import static androidx.media3.decoder.DecoderInputBuffer.BUFFER_REPLACEMENT_MODE_DIRECT;
import android.util.Pair;
import androidx.annotation.Nullable;
import androidx.media3.common.C;
import androidx.media3.common.Format;
@ -52,14 +51,13 @@ import java.util.concurrent.atomic.AtomicReference;
private final SilentAudioGenerator silentAudioGenerator;
private final Queue<DecoderInputBuffer> availableInputBuffers;
private final Queue<DecoderInputBuffer> pendingInputBuffers;
private final AtomicReference<@NullableType Pair<EditedMediaItem, @NullableType Format>>
pendingMediaItem;
private final AtomicReference<@NullableType MediaItemChange> pendingMediaItemChange;
@Nullable private DecoderInputBuffer currentInputBufferBeingOutput;
private AudioProcessingPipeline audioProcessingPipeline;
private boolean receivedFirstMediaItemCallback;
private boolean processedFirstMediaItemChange;
private boolean receivedEndOfStreamFromInput;
private volatile boolean queueEndOfStreamAfterSilence;
private boolean queueEndOfStreamAfterSilence;
// TODO(b/260618558): Move silent audio generation upstream of this component.
public AudioGraph(Format firstInputFormat, EditedMediaItem firstEditedMediaItem)
@ -73,7 +71,7 @@ import java.util.concurrent.atomic.AtomicReference;
availableInputBuffers.add(inputBuffer);
}
pendingInputBuffers = new ConcurrentLinkedDeque<>();
pendingMediaItem = new AtomicReference<>();
pendingMediaItemChange = new AtomicReference<>();
AudioFormat inputAudioFormat = new AudioFormat(firstInputFormat);
silentAudioGenerator = new SilentAudioGenerator(inputAudioFormat);
audioProcessingPipeline =
@ -82,6 +80,8 @@ import java.util.concurrent.atomic.AtomicReference;
/* trackFormat= */ firstInputFormat,
/* inputAudioFormat= */ inputAudioFormat,
/* requiredOutputAudioFormat= */ AudioFormat.NOT_SET);
// APP configuration not active until flush called. getOutputAudioFormat based on active config.
audioProcessingPipeline.flush();
outputAudioFormat = audioProcessingPipeline.getOutputAudioFormat();
}
@ -102,9 +102,9 @@ import java.util.concurrent.atomic.AtomicReference;
return outputBuffer;
}
if (!hasDataToOutput() && pendingMediaItem.get() != null) {
if (!hasDataToOutput() && pendingMediaItemChange.get() != null) {
try {
reconfigureProcessingForPendingMediaItem();
configureForPendingMediaItemChange();
} catch (AudioProcessor.UnhandledAudioFormatException e) {
throw ExportException.createForAudioProcessing(e, "AudioGraph reconfiguration");
}
@ -123,26 +123,18 @@ import java.util.concurrent.atomic.AtomicReference;
checkState(
durationUs != C.TIME_UNSET,
"Could not generate silent audio because duration is unknown.");
silentAudioGenerator.addSilence(durationUs);
if (isLast) {
queueEndOfStreamAfterSilence = true;
}
} else {
checkState(MimeTypes.isAudio(trackFormat.sampleMimeType));
checkState(trackFormat.pcmEncoding != Format.NO_VALUE);
}
if (!receivedFirstMediaItemCallback) {
receivedFirstMediaItemCallback = true;
return;
}
pendingMediaItem.set(Pair.create(editedMediaItem, trackFormat));
pendingMediaItemChange.set(
new MediaItemChange(editedMediaItem, durationUs, trackFormat, isLast));
}
@Override
@Nullable
public DecoderInputBuffer getInputBuffer() {
if (shouldGenerateSilence() || pendingMediaItem.get() != null) {
if (silentAudioGenerator.hasRemaining() || pendingMediaItemChange.get() != null) {
return null;
}
return availableInputBuffers.peek();
@ -150,7 +142,7 @@ import java.util.concurrent.atomic.AtomicReference;
@Override
public boolean queueInputBuffer() {
checkState(pendingMediaItem.get() == null);
checkState(pendingMediaItemChange.get() == null);
DecoderInputBuffer inputBuffer = availableInputBuffers.remove();
pendingInputBuffers.add(inputBuffer);
return true;
@ -165,15 +157,17 @@ import java.util.concurrent.atomic.AtomicReference;
if (hasDataToOutput()) {
return false;
}
if (pendingMediaItem.get() != null) {
if (pendingMediaItemChange.get() != null) {
return false;
}
// Only read volatile variable queueEndOfStreamAfterSilence if there is a chance that the
// graph has ended.
return receivedEndOfStreamFromInput || queueEndOfStreamAfterSilence;
}
private ByteBuffer getOutputInternal() {
if (!processedFirstMediaItemChange) {
return EMPTY_BUFFER;
}
if (!audioProcessingPipeline.isOperational()) {
return feedOutputFromInput();
}
@ -183,22 +177,23 @@ import java.util.concurrent.atomic.AtomicReference;
return audioProcessingPipeline.getOutput();
}
/**
* Attempts to feed input data to the {@link AudioProcessingPipeline}.
*
* @return Whether it may be possible to process more data immediately by calling this method
* again.
*/
private boolean feedProcessingPipelineFromInput() {
if (shouldGenerateSilence()) {
if (silentAudioGenerator.hasRemaining()) {
ByteBuffer inputData = silentAudioGenerator.getBuffer();
audioProcessingPipeline.queueInput(inputData);
return !inputData.hasRemaining();
if (inputData.hasRemaining()) {
return false;
}
if (!silentAudioGenerator.hasRemaining()) {
audioProcessingPipeline.queueEndOfStream();
return false;
}
return true;
}
@Nullable DecoderInputBuffer pendingInputBuffer = pendingInputBuffers.peek();
if (pendingInputBuffer == null) {
if (pendingMediaItem.get() != null) {
if (pendingMediaItemChange.get() != null) {
audioProcessingPipeline.queueEndOfStream();
}
return false;
@ -214,16 +209,14 @@ import java.util.concurrent.atomic.AtomicReference;
ByteBuffer inputData = checkNotNull(pendingInputBuffer.data);
audioProcessingPipeline.queueInput(inputData);
if (inputData.hasRemaining()) {
// APP could not consume all input.
return false;
}
// All input consumed, remove from pending and make available.
clearAndAddToAvailableBuffers(pendingInputBuffers.remove());
return true;
}
private ByteBuffer feedOutputFromInput() {
if (shouldGenerateSilence()) {
if (silentAudioGenerator.hasRemaining()) {
return silentAudioGenerator.getBuffer();
}
@ -262,6 +255,10 @@ import java.util.concurrent.atomic.AtomicReference;
}
private boolean hasDataToOutput() {
if (!processedFirstMediaItemChange) {
return false;
}
if (currentInputBufferBeingOutput != null
&& currentInputBufferBeingOutput.data != null
&& currentInputBufferBeingOutput.data.hasRemaining()) {
@ -279,10 +276,6 @@ import java.util.concurrent.atomic.AtomicReference;
return false;
}
private boolean shouldGenerateSilence() {
return silentAudioGenerator.hasRemaining() && pendingInputBuffers.isEmpty();
}
private void clearAndAddToAvailableBuffers(DecoderInputBuffer inputBuffer) {
inputBuffer.clear();
inputBuffer.timeUs = 0;
@ -290,28 +283,38 @@ import java.util.concurrent.atomic.AtomicReference;
}
/**
* Reconfigures audio processing based on the pending {@linkplain #onMediaItemChanged media item
* change}.
* Configures the graph based on the pending {@linkplain #onMediaItemChanged media item change}.
*
* <p>Before reconfiguration, all {@linkplain #hasDataToOutput() pending data} must be consumed
* <p>Before configuration, all {@linkplain #hasDataToOutput() pending data} must be consumed
* through {@link #getOutput()}.
*/
private void reconfigureProcessingForPendingMediaItem() throws UnhandledAudioFormatException {
checkState(!hasDataToOutput());
Pair<EditedMediaItem, @NullableType Format> pendingChange =
checkStateNotNull(pendingMediaItem.get());
AudioFormat pendingAudioFormat =
pendingChange.second != null
? new AudioFormat(pendingChange.second)
: silentAudioGenerator.audioFormat;
audioProcessingPipeline =
configureProcessing(
/* editedMediaItem= */ pendingChange.first,
/* trackFormat= */ pendingChange.second,
/* inputAudioFormat= */ pendingAudioFormat,
/* requiredOutputAudioFormat= */ outputAudioFormat);
pendingMediaItem.set(null);
private void configureForPendingMediaItemChange() throws UnhandledAudioFormatException {
MediaItemChange pendingChange = checkStateNotNull(pendingMediaItemChange.get());
AudioFormat pendingAudioFormat;
if (pendingChange.format != null) {
pendingAudioFormat = new AudioFormat(pendingChange.format);
} else { // Generating silence
pendingAudioFormat = silentAudioGenerator.audioFormat;
silentAudioGenerator.addSilence(pendingChange.durationUs);
if (pendingChange.isLast) {
queueEndOfStreamAfterSilence = true;
}
}
if (processedFirstMediaItemChange) {
// APP is configured in constructor for first media item.
audioProcessingPipeline =
configureProcessing(
/* editedMediaItem= */ pendingChange.editedMediaItem,
/* trackFormat= */ pendingChange.format,
/* inputAudioFormat= */ pendingAudioFormat,
/* requiredOutputAudioFormat= */ outputAudioFormat);
}
audioProcessingPipeline.flush();
pendingMediaItemChange.set(null);
receivedEndOfStreamFromInput = false;
processedFirstMediaItemChange = true;
}
private static AudioProcessingPipeline configureProcessing(
@ -357,7 +360,21 @@ import java.util.concurrent.atomic.AtomicReference;
"Audio format can not be modified to match existing downstream format", inputAudioFormat);
}
audioProcessingPipeline.flush();
return audioProcessingPipeline;
}
private static final class MediaItemChange {
public final EditedMediaItem editedMediaItem;
public final long durationUs;
@Nullable public final Format format;
public final boolean isLast;
public MediaItemChange(
EditedMediaItem editedMediaItem, long durationUs, @Nullable Format format, boolean isLast) {
this.editedMediaItem = editedMediaItem;
this.durationUs = durationUs;
this.format = format;
this.isLast = isLast;
}
}
}

View file

@ -353,6 +353,34 @@ public final class MediaItemExportTest {
getDumpFileName(FILE_AUDIO_VIDEO + ".silentaudio"));
}
@Test
public void start_forceAudioTrackAndRemoveAudioWithEffects_generatesSilentAudio()
throws Exception {
Transformer transformer =
createTransformerBuilder(testMuxerHolder, /* enableFallback= */ false).build();
SonicAudioProcessor sonicAudioProcessor = new SonicAudioProcessor();
sonicAudioProcessor.setOutputSampleRateHz(48000);
EditedMediaItem editedMediaItem =
new EditedMediaItem.Builder(MediaItem.fromUri(ASSET_URI_PREFIX + FILE_AUDIO_VIDEO))
.setRemoveAudio(true)
.setEffects(new Effects(ImmutableList.of(sonicAudioProcessor), ImmutableList.of()))
.build();
EditedMediaItemSequence sequence =
new EditedMediaItemSequence(ImmutableList.of(editedMediaItem));
Composition composition =
new Composition.Builder(ImmutableList.of(sequence))
.experimentalSetForceAudioTrack(true)
.build();
transformer.start(composition, outputPath);
TransformerTestRunner.runLooper(transformer);
DumpFileAsserts.assertOutput(
context,
checkNotNull(testMuxerHolder.testMuxer),
getDumpFileName(FILE_AUDIO_VIDEO + ".silentaudio_48000hz"));
}
@Test
public void start_forceAudioTrackAndRemoveVideo_isIgnored() throws Exception {
Transformer transformer =

View file

@ -177,7 +177,33 @@ public final class SequenceExportTest {
}
@Test
public void start_concatenateSilenceAndAudioWithTransmuxVideo_completesSuccessfully()
public void concatenateAudioAndSilence_withTransmuxVideo_completesSuccessfully()
throws Exception {
Transformer transformer =
createTransformerBuilder(testMuxerHolder, /* enableFallback= */ false).build();
MediaItem mediaItem = MediaItem.fromUri(ASSET_URI_PREFIX + FILE_AUDIO_RAW_VIDEO);
EditedMediaItem audioVideoMediaItem = new EditedMediaItem.Builder(mediaItem).build();
EditedMediaItem videoOnlyMediaItem =
new EditedMediaItem.Builder(mediaItem).setRemoveAudio(true).build();
EditedMediaItemSequence sequence =
new EditedMediaItemSequence(ImmutableList.of(audioVideoMediaItem, videoOnlyMediaItem));
Composition composition =
new Composition.Builder(ImmutableList.of(sequence))
.experimentalSetForceAudioTrack(true)
.setTransmuxVideo(true)
.build();
transformer.start(composition, outputPath);
TransformerTestRunner.runLooper(transformer);
DumpFileAsserts.assertOutput(
context,
checkNotNull(testMuxerHolder.testMuxer),
getDumpFileName(FILE_AUDIO_RAW_VIDEO + ".audio_then_silence"));
}
@Test
public void concatenateSilenceAndAudio_withTransmuxVideo_completesSuccessfully()
throws Exception {
Transformer transformer =
createTransformerBuilder(testMuxerHolder, /* enableFallback= */ false).build();
@ -203,20 +229,28 @@ public final class SequenceExportTest {
}
@Test
public void start_concatenateSilenceAndAudioWithEffectsAndTransmuxVideo_completesSuccessfully()
public void concatenateAudioAndSilence_withEffectsAndTransmuxVideo_completesSuccessfully()
throws Exception {
Transformer transformer =
createTransformerBuilder(testMuxerHolder, /* enableFallback= */ false).build();
MediaItem mediaItem = MediaItem.fromUri(ASSET_URI_PREFIX + FILE_AUDIO_RAW_VIDEO);
SonicAudioProcessor sonicAudioProcessor = createPitchChangingAudioProcessor(/* pitch= */ 2f);
Effects effects =
new Effects(ImmutableList.of(sonicAudioProcessor), /* videoEffects= */ ImmutableList.of());
EditedMediaItem noAudioEditedMediaItem =
new EditedMediaItem.Builder(mediaItem).setRemoveAudio(true).setEffects(effects).build();
EditedMediaItem audioEditedMediaItem =
new EditedMediaItem.Builder(mediaItem).setEffects(effects).build();
new EditedMediaItem.Builder(mediaItem)
.setEffects(
new Effects(
ImmutableList.of(createPitchChangingAudioProcessor(/* pitch= */ 2f)),
/* videoEffects= */ ImmutableList.of()))
.build();
EditedMediaItem noAudioEditedMediaItem =
new EditedMediaItem.Builder(mediaItem)
.setRemoveAudio(true)
.setEffects(
new Effects(
ImmutableList.of(createPitchChangingAudioProcessor(/* pitch= */ 2f)),
/* videoEffects= */ ImmutableList.of()))
.build();
EditedMediaItemSequence sequence =
new EditedMediaItemSequence(ImmutableList.of(noAudioEditedMediaItem, audioEditedMediaItem));
new EditedMediaItemSequence(ImmutableList.of(audioEditedMediaItem, noAudioEditedMediaItem));
Composition composition =
new Composition.Builder(ImmutableList.of(sequence))
.experimentalSetForceAudioTrack(true)
@ -226,12 +260,193 @@ public final class SequenceExportTest {
transformer.start(composition, outputPath);
TransformerTestRunner.runLooper(transformer);
DumpFileAsserts.assertOutput(
context,
checkNotNull(testMuxerHolder.testMuxer),
getDumpFileName(FILE_AUDIO_RAW_VIDEO + ".audio_then_silence_with_effects"));
}
@Test
public void concatenateSilenceAndAudio_withEffectsAndTransmuxVideo_completesSuccessfully()
throws Exception {
Transformer transformer =
createTransformerBuilder(testMuxerHolder, /* enableFallback= */ false).build();
MediaItem mediaItem = MediaItem.fromUri(ASSET_URI_PREFIX + FILE_AUDIO_RAW_VIDEO);
EditedMediaItem noAudioEditedMediaItem =
new EditedMediaItem.Builder(mediaItem)
.setRemoveAudio(true)
.setEffects(
new Effects(
ImmutableList.of(createPitchChangingAudioProcessor(/* pitch= */ 2f)),
/* videoEffects= */ ImmutableList.of()))
.build();
EditedMediaItem audioEditedMediaItem =
new EditedMediaItem.Builder(mediaItem)
.setEffects(
new Effects(
ImmutableList.of(createPitchChangingAudioProcessor(/* pitch= */ 2f)),
/* videoEffects= */ ImmutableList.of()))
.build();
Composition composition =
new Composition.Builder(
ImmutableList.of(
new EditedMediaItemSequence(
ImmutableList.of(noAudioEditedMediaItem, audioEditedMediaItem))))
.experimentalSetForceAudioTrack(true)
.setTransmuxVideo(true)
.build();
transformer.start(composition, outputPath);
TransformerTestRunner.runLooper(transformer);
DumpFileAsserts.assertOutput(
context,
checkNotNull(testMuxerHolder.testMuxer),
getDumpFileName(FILE_AUDIO_RAW_VIDEO + ".silence_then_audio_with_effects"));
}
@Test
public void concatenateSilenceAndSilence_withTransmuxVideo_completesSuccessfully()
throws Exception {
Transformer transformer =
createTransformerBuilder(testMuxerHolder, /* enableFallback= */ false).build();
MediaItem mediaItem = MediaItem.fromUri(ASSET_URI_PREFIX + FILE_AUDIO_RAW_VIDEO);
EditedMediaItem videoOnlyMediaItem =
new EditedMediaItem.Builder(mediaItem).setRemoveAudio(true).build();
EditedMediaItemSequence sequence =
new EditedMediaItemSequence(ImmutableList.of(videoOnlyMediaItem, videoOnlyMediaItem));
Composition composition =
new Composition.Builder(ImmutableList.of(sequence))
.experimentalSetForceAudioTrack(true)
.setTransmuxVideo(true)
.build();
transformer.start(composition, outputPath);
TransformerTestRunner.runLooper(transformer);
DumpFileAsserts.assertOutput(
context,
checkNotNull(testMuxerHolder.testMuxer),
getDumpFileName(FILE_AUDIO_RAW_VIDEO + ".silence_then_silence"));
}
@Test
public void concatenateEditedSilenceAndSilence_withTransmuxVideo_completesSuccessfully()
throws Exception {
Transformer transformer =
createTransformerBuilder(testMuxerHolder, /* enableFallback= */ false).build();
MediaItem mediaItem = MediaItem.fromUri(ASSET_URI_PREFIX + FILE_AUDIO_RAW_VIDEO);
EditedMediaItem silenceWithEffectsItem =
new EditedMediaItem.Builder(mediaItem)
.setRemoveAudio(true)
.setEffects(
new Effects(
ImmutableList.of(createPitchChangingAudioProcessor(/* pitch= */ 2f)),
/* videoEffects= */ ImmutableList.of()))
.build();
EditedMediaItem silenceItem =
new EditedMediaItem.Builder(mediaItem)
.setRemoveAudio(true)
.setEffects(
new Effects(
ImmutableList.of(createPitchChangingAudioProcessor(/* pitch= */ 2f)),
/* videoEffects= */ ImmutableList.of()))
.build();
EditedMediaItemSequence sequence =
new EditedMediaItemSequence(ImmutableList.of(silenceWithEffectsItem, silenceItem));
Composition composition =
new Composition.Builder(ImmutableList.of(sequence))
.experimentalSetForceAudioTrack(true)
.setTransmuxVideo(true)
.build();
transformer.start(composition, outputPath);
TransformerTestRunner.runLooper(transformer);
DumpFileAsserts.assertOutput(
context,
checkNotNull(testMuxerHolder.testMuxer),
getDumpFileName(FILE_AUDIO_RAW_VIDEO + ".silence-effects_then_silence"));
}
@Test
public void concatenateSilenceAndEditedSilence_withTransmuxVideo_completesSuccessfully()
throws Exception {
Transformer transformer =
createTransformerBuilder(testMuxerHolder, /* enableFallback= */ false).build();
MediaItem mediaItem = MediaItem.fromUri(ASSET_URI_PREFIX + FILE_AUDIO_RAW_VIDEO);
EditedMediaItem silenceWithEffectsItem =
new EditedMediaItem.Builder(mediaItem)
.setRemoveAudio(true)
.setEffects(
new Effects(
ImmutableList.of(createPitchChangingAudioProcessor(/* pitch= */ 2f)),
/* videoEffects= */ ImmutableList.of()))
.build();
EditedMediaItem silenceItem =
new EditedMediaItem.Builder(mediaItem)
.setRemoveAudio(true)
.setEffects(
new Effects(
ImmutableList.of(createPitchChangingAudioProcessor(/* pitch= */ 2f)),
/* videoEffects= */ ImmutableList.of()))
.build();
EditedMediaItemSequence sequence =
new EditedMediaItemSequence(ImmutableList.of(silenceItem, silenceWithEffectsItem));
Composition composition =
new Composition.Builder(ImmutableList.of(sequence))
.experimentalSetForceAudioTrack(true)
.setTransmuxVideo(true)
.build();
transformer.start(composition, outputPath);
TransformerTestRunner.runLooper(transformer);
DumpFileAsserts.assertOutput(
context,
checkNotNull(testMuxerHolder.testMuxer),
getDumpFileName(FILE_AUDIO_RAW_VIDEO + ".silence_then_silence-effects"));
}
@Test
public void concatenateSilenceAndSilence_withEffectsAndTransmuxVideo_completesSuccessfully()
throws Exception {
Transformer transformer =
createTransformerBuilder(testMuxerHolder, /* enableFallback= */ false).build();
MediaItem mediaItem = MediaItem.fromUri(ASSET_URI_PREFIX + FILE_AUDIO_RAW_VIDEO);
EditedMediaItem firstItem =
new EditedMediaItem.Builder(mediaItem)
.setRemoveAudio(true)
.setEffects(
new Effects(
ImmutableList.of(createPitchChangingAudioProcessor(/* pitch= */ 2f)),
/* videoEffects= */ ImmutableList.of()))
.build();
EditedMediaItem secondItem =
new EditedMediaItem.Builder(mediaItem)
.setRemoveAudio(true)
.setEffects(
new Effects(
ImmutableList.of(createPitchChangingAudioProcessor(/* pitch= */ 2f)),
/* videoEffects= */ ImmutableList.of()))
.build();
EditedMediaItemSequence sequence =
new EditedMediaItemSequence(ImmutableList.of(firstItem, secondItem));
Composition composition =
new Composition.Builder(ImmutableList.of(sequence))
.experimentalSetForceAudioTrack(true)
.setTransmuxVideo(true)
.build();
transformer.start(composition, outputPath);
TransformerTestRunner.runLooper(transformer);
DumpFileAsserts.assertOutput(
context,
checkNotNull(testMuxerHolder.testMuxer),
getDumpFileName(FILE_AUDIO_RAW_VIDEO + ".silence_then_silence_with_effects"));
}
@Test
public void concatenateTwoAudioItems_withSameFormat_completesSuccessfully() throws Exception {
Transformer transformer =