diff --git a/RELEASENOTES.md b/RELEASENOTES.md index a52c51efd4..8bdc2462fe 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -26,6 +26,9 @@ Release notes * Audio: * Use the compressed audio format bitrate to calculate the min buffer size for `AudioTrack` in direct playbacks (passthrough). + * Fix bug where some playbacks fail when tunneling is enabled and + `AudioProcessors` are active, e.g. for gapless trimming + ([#10847](https://github.com/google/ExoPlayer/issues/10847)). * Text: * Fix `TextRenderer` passing an invalid (negative) index to `Subtitle.getEventTime` if a subtitle file contains no cues. diff --git a/libraries/exoplayer/src/main/java/androidx/media3/exoplayer/audio/DefaultAudioSink.java b/libraries/exoplayer/src/main/java/androidx/media3/exoplayer/audio/DefaultAudioSink.java index d7bdb9f675..7da81dfba1 100644 --- a/libraries/exoplayer/src/main/java/androidx/media3/exoplayer/audio/DefaultAudioSink.java +++ b/libraries/exoplayer/src/main/java/androidx/media3/exoplayer/audio/DefaultAudioSink.java @@ -522,6 +522,7 @@ public final class DefaultAudioSink implements AudioSink { private AuxEffectInfo auxEffectInfo; @Nullable private AudioDeviceInfoApi23 preferredDevice; private boolean tunneling; + private long lastTunnelingAvSyncPresentationTimeUs; private long lastFeedElapsedRealtimeMs; private boolean offloadDisabledUntilNextConfiguration; private boolean isWaitingForOffloadEndOfStreamHandled; @@ -1012,6 +1013,9 @@ public final class DefaultAudioSink implements AudioSink { *
If the {@link AudioProcessingPipeline} is not {@linkplain * AudioProcessingPipeline#isOperational() operational}, input buffers are passed straight to * {@link #writeBuffer(ByteBuffer, long)}. + * + * @param avSyncPresentationTimeUs The tunneling AV sync presentation time for the current buffer, + * or {@link C#TIME_END_OF_SOURCE} when draining remaining buffers at the end of the stream. */ private void processBuffers(long avSyncPresentationTimeUs) throws WriteException { if (!audioProcessingPipeline.isOperational()) { @@ -1045,17 +1049,24 @@ public final class DefaultAudioSink implements AudioSink { if (outputBuffer == null) { return true; } - writeBuffer(outputBuffer, C.TIME_UNSET); + writeBuffer(outputBuffer, C.TIME_END_OF_SOURCE); return outputBuffer == null; } audioProcessingPipeline.queueEndOfStream(); - processBuffers(C.TIME_UNSET); + processBuffers(C.TIME_END_OF_SOURCE); return audioProcessingPipeline.isEnded() && (outputBuffer == null || !outputBuffer.hasRemaining()); } @SuppressWarnings("ReferenceEquality") + /** + * Writes the provided buffer to the audio track. + * + * @param buffer The buffer to write. + * @param avSyncPresentationTimeUs The tunneling AV sync presentation time for the buffer, or + * {@link C#TIME_END_OF_SOURCE} when draining remaining buffers at the end of the stream. + */ private void writeBuffer(ByteBuffer buffer, long avSyncPresentationTimeUs) throws WriteException { if (!buffer.hasRemaining()) { return; @@ -1091,6 +1102,14 @@ public final class DefaultAudioSink implements AudioSink { } } else if (tunneling) { Assertions.checkState(avSyncPresentationTimeUs != C.TIME_UNSET); + if (avSyncPresentationTimeUs == C.TIME_END_OF_SOURCE) { + // Audio processors during tunneling are required to produce buffers immediately when + // queuing, so we can assume the timestamp during draining at the end of the stream is the + // same as the timestamp of the last sample we processed. + avSyncPresentationTimeUs = lastTunnelingAvSyncPresentationTimeUs; + } else { + lastTunnelingAvSyncPresentationTimeUs = avSyncPresentationTimeUs; + } bytesWrittenOrError = writeNonBlockingWithAvSyncV21( audioTrack, buffer, bytesRemaining, avSyncPresentationTimeUs);