Pass AudioFormat to ASP#computeNextEncoderInputBufferTimeUs.

This method uses sampleRate, channelCount and pcmEncoding, so passing
AudioFormat is easier.

This will lead into a future change that builds the
encoderInputAudioFormat from encoder.getConfigurationFormat()

PiperOrigin-RevId: 510956177
This commit is contained in:
samrobinson 2023-02-20 11:43:09 +00:00 committed by Andrew Lewis
parent ae37d33b18
commit 363b85226e

View file

@ -326,9 +326,7 @@ import org.checkerframework.dataflow.qual.Pure;
encoderInputBufferData.put(inputBuffer); encoderInputBufferData.put(inputBuffer);
encoderInputBuffer.timeUs = nextEncoderInputBufferTimeUs; encoderInputBuffer.timeUs = nextEncoderInputBufferTimeUs;
computeNextEncoderInputBufferTimeUs( computeNextEncoderInputBufferTimeUs(
/* bytesWritten= */ encoderInputBufferData.position(), /* bytesWritten= */ encoderInputBufferData.position(), encoderInputAudioFormat);
encoderInputAudioFormat.bytesPerFrame,
encoderInputAudioFormat.sampleRate);
encoderInputBuffer.setFlags(0); encoderInputBuffer.setFlags(0);
encoderInputBuffer.flip(); encoderInputBuffer.flip();
inputBuffer.limit(bufferLimit); inputBuffer.limit(bufferLimit);
@ -355,15 +353,14 @@ import org.checkerframework.dataflow.qual.Pure;
return transformationRequest.buildUpon().setAudioMimeType(actualFormat.sampleMimeType).build(); return transformationRequest.buildUpon().setAudioMimeType(actualFormat.sampleMimeType).build();
} }
private void computeNextEncoderInputBufferTimeUs( private void computeNextEncoderInputBufferTimeUs(long bytesWritten, AudioFormat audioFormat) {
long bytesWritten, int bytesPerFrame, int sampleRate) {
// The calculation below accounts for remainders and rounding. Without that it corresponds to // The calculation below accounts for remainders and rounding. Without that it corresponds to
// the following: // the following:
// bufferDurationUs = numberOfFramesInBuffer * sampleDurationUs // bufferDurationUs = numberOfFramesInBuffer * sampleDurationUs
// where numberOfFramesInBuffer = bytesWritten / bytesPerFrame // where numberOfFramesInBuffer = bytesWritten / bytesPerFrame
// and sampleDurationUs = C.MICROS_PER_SECOND / sampleRate // and sampleDurationUs = C.MICROS_PER_SECOND / sampleRate
long numerator = bytesWritten * C.MICROS_PER_SECOND + encoderBufferDurationRemainder; long numerator = bytesWritten * C.MICROS_PER_SECOND + encoderBufferDurationRemainder;
long denominator = (long) bytesPerFrame * sampleRate; long denominator = (long) audioFormat.bytesPerFrame * audioFormat.sampleRate;
long bufferDurationUs = numerator / denominator; long bufferDurationUs = numerator / denominator;
encoderBufferDurationRemainder = numerator - bufferDurationUs * denominator; encoderBufferDurationRemainder = numerator - bufferDurationUs * denominator;
if (encoderBufferDurationRemainder > 0) { // Ceil division result. if (encoderBufferDurationRemainder > 0) { // Ceil division result.