mirror of
https://github.com/samsonjs/media.git
synced 2026-03-25 09:25:53 +00:00
Split DefaultAudioSink.writeBuffer in two seperate steps
The method currently has two ways of operating with a complicated
contract:
1. outputBuffer == null -> sets a new outputBuffer and starts
draining it as far as possible.
2. outputBuffer != null -> continues draining a previous buffer,
requires that the input parameter is the same as outputBuffer.
This contract can be made cleaner by splitting the logic into
(1) setting a new output buffer and (2) draining the output buffer.
Only one parameter is needed for each method and there is no
requirement to pass in the same argument as before because we already
know what we are draining. This also means the caller of the method
doesn't have to keep track of the actual outputBuffer, allowing
further processing steps in the future that change the drained
instance.
PiperOrigin-RevId: 663316251
This commit is contained in:
parent
7a1f3629ff
commit
b58a1d16f7
1 changed files with 45 additions and 24 deletions
|
|
@ -15,7 +15,6 @@
|
|||
*/
|
||||
package androidx.media3.exoplayer.audio;
|
||||
|
||||
import static androidx.media3.common.audio.AudioProcessor.EMPTY_BUFFER;
|
||||
import static androidx.media3.common.util.Assertions.checkNotNull;
|
||||
import static androidx.media3.common.util.Assertions.checkState;
|
||||
import static androidx.media3.common.util.Util.constrainValue;
|
||||
|
|
@ -1071,28 +1070,40 @@ public final class DefaultAudioSink implements AudioSink {
|
|||
|
||||
/**
|
||||
* Repeatedly drains and feeds the {@link AudioProcessingPipeline} until {@link
|
||||
* #writeBuffer(ByteBuffer, long)} is not accepting any more input or there is no more input to
|
||||
* #drainOutputBuffer(long)} is not able to fully drain the output or there is no more input to
|
||||
* feed into the pipeline.
|
||||
*
|
||||
* <p>If the {@link AudioProcessingPipeline} is not {@linkplain
|
||||
* AudioProcessingPipeline#isOperational() operational}, input buffers are passed straight to
|
||||
* {@link #writeBuffer(ByteBuffer, long)}.
|
||||
* {@link #setOutputBuffer(ByteBuffer)}.
|
||||
*
|
||||
* @param avSyncPresentationTimeUs The tunneling AV sync presentation time for the current buffer,
|
||||
* or {@link C#TIME_END_OF_SOURCE} when draining remaining buffers at the end of the stream.
|
||||
*/
|
||||
private void processBuffers(long avSyncPresentationTimeUs) throws WriteException {
|
||||
// Drain existing buffer first.
|
||||
drainOutputBuffer(avSyncPresentationTimeUs);
|
||||
if (outputBuffer != null) {
|
||||
// The existing output buffer is not fully processed.
|
||||
return;
|
||||
}
|
||||
|
||||
// Obtain new output buffer and start draining.
|
||||
if (!audioProcessingPipeline.isOperational()) {
|
||||
writeBuffer(inputBuffer != null ? inputBuffer : EMPTY_BUFFER, avSyncPresentationTimeUs);
|
||||
if (inputBuffer != null) {
|
||||
setOutputBuffer(inputBuffer);
|
||||
drainOutputBuffer(avSyncPresentationTimeUs);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
while (!audioProcessingPipeline.isEnded()) {
|
||||
ByteBuffer bufferToWrite;
|
||||
while ((bufferToWrite = audioProcessingPipeline.getOutput()).hasRemaining()) {
|
||||
writeBuffer(bufferToWrite, avSyncPresentationTimeUs);
|
||||
if (bufferToWrite.hasRemaining()) {
|
||||
// writeBuffer method is providing back pressure.
|
||||
setOutputBuffer(bufferToWrite);
|
||||
drainOutputBuffer(avSyncPresentationTimeUs);
|
||||
if (outputBuffer != null) {
|
||||
// drainOutputBuffer method is providing back pressure.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
@ -1110,10 +1121,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||
*/
|
||||
private boolean drainToEndOfStream() throws WriteException {
|
||||
if (!audioProcessingPipeline.isOperational()) {
|
||||
if (outputBuffer == null) {
|
||||
return true;
|
||||
}
|
||||
writeBuffer(outputBuffer, C.TIME_END_OF_SOURCE);
|
||||
drainOutputBuffer(C.TIME_END_OF_SOURCE);
|
||||
return outputBuffer == null;
|
||||
}
|
||||
|
||||
|
|
@ -1124,26 +1132,38 @@ public final class DefaultAudioSink implements AudioSink {
|
|||
}
|
||||
|
||||
/**
|
||||
* Writes the provided buffer to the audio track.
|
||||
* Sets a new output buffer.
|
||||
*
|
||||
* <p>Must only be called if the existing {@link #outputBuffer} is null (i.e. has been fully
|
||||
* drained with {@link #drainOutputBuffer}.
|
||||
*
|
||||
* @param buffer The buffer to set.
|
||||
*/
|
||||
private void setOutputBuffer(ByteBuffer buffer) {
|
||||
checkState(outputBuffer == null);
|
||||
if (!buffer.hasRemaining()) {
|
||||
return;
|
||||
}
|
||||
outputBuffer = buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Drains the {@link #outputBuffer} by writing it to the audio track.
|
||||
*
|
||||
* <p>{@link #outputBuffer} will be set to null if it has been fully drained.
|
||||
*
|
||||
* @param buffer The buffer to write.
|
||||
* @param avSyncPresentationTimeUs The tunneling AV sync presentation time for the buffer, or
|
||||
* {@link C#TIME_END_OF_SOURCE} when draining remaining buffers at the end of the stream.
|
||||
*/
|
||||
@SuppressWarnings("ReferenceEquality")
|
||||
private void writeBuffer(ByteBuffer buffer, long avSyncPresentationTimeUs) throws WriteException {
|
||||
if (!buffer.hasRemaining()) {
|
||||
private void drainOutputBuffer(long avSyncPresentationTimeUs) throws WriteException {
|
||||
if (outputBuffer == null) {
|
||||
return;
|
||||
}
|
||||
if (outputBuffer != null) {
|
||||
Assertions.checkArgument(outputBuffer == buffer);
|
||||
} else {
|
||||
outputBuffer = buffer;
|
||||
}
|
||||
if (writeExceptionPendingExceptionHolder.shouldWaitBeforeRetry()) {
|
||||
return;
|
||||
}
|
||||
int bytesRemaining = buffer.remaining();
|
||||
int bytesRemaining = outputBuffer.remaining();
|
||||
int bytesWrittenOrError = 0; // Error if negative
|
||||
if (tunneling) {
|
||||
Assertions.checkState(avSyncPresentationTimeUs != C.TIME_UNSET);
|
||||
|
|
@ -1156,9 +1176,10 @@ public final class DefaultAudioSink implements AudioSink {
|
|||
lastTunnelingAvSyncPresentationTimeUs = avSyncPresentationTimeUs;
|
||||
}
|
||||
bytesWrittenOrError =
|
||||
writeNonBlockingWithAvSync(audioTrack, buffer, bytesRemaining, avSyncPresentationTimeUs);
|
||||
writeNonBlockingWithAvSync(
|
||||
audioTrack, outputBuffer, bytesRemaining, avSyncPresentationTimeUs);
|
||||
} else {
|
||||
bytesWrittenOrError = writeNonBlocking(audioTrack, buffer, bytesRemaining);
|
||||
bytesWrittenOrError = writeNonBlocking(audioTrack, outputBuffer, bytesRemaining);
|
||||
}
|
||||
|
||||
lastFeedElapsedRealtimeMs = SystemClock.elapsedRealtime();
|
||||
|
|
@ -1222,7 +1243,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||
if (configuration.outputMode != OUTPUT_MODE_PCM) {
|
||||
// When playing non-PCM, the inputBuffer is never processed, thus the last inputBuffer
|
||||
// must be the current input buffer.
|
||||
Assertions.checkState(buffer == inputBuffer);
|
||||
Assertions.checkState(outputBuffer == inputBuffer);
|
||||
writtenEncodedFrames += (long) framesPerEncodedSample * inputBufferAccessUnitCount;
|
||||
}
|
||||
outputBuffer = null;
|
||||
|
|
|
|||
Loading…
Reference in a new issue