I am trying to implement https://android.googlesource.com/platform/cts/+/jb-mr2-release/tests/tests/media/src/android/media/cts/DecodeEditEncodeTest.java
but modifying the source by using a video file mp4. The mime tipe is video/avc, bitrate 288kbps, iframeinterval 100, width: 176, height: 144. The file size is 6MB.
When I decode the video and put the frame in the outputsurface, I can save the frame to a bitmap and see the frame great. But at the end, after encoding (with the same parameters of the original video), I get a 700kb file and I cannot see the video (may be a corrupted file).
extractor = new MediaExtractor();
extractor.SetDataSource(filePath);
for (int i = 0; i < extractor.TrackCount; i++)
{
inputFormat = extractor.GetTrackFormat(i);
string mime = inputFormat.GetString(MediaFormat.KeyMime);
if (mime.StartsWith("video/"))
{
extractor.SelectTrack(i);
mimeType = mime;
break;
}
}
mWidth = inputFormat.GetInteger(MediaFormat.KeyWidth);
mHeight = inputFormat.GetInteger(MediaFormat.KeyHeight);
// Create an encoder format that matches the input format. (Might be able to just
// re-use the format used to generate the video, since we want it to be the same.)
MediaFormat outputFormat = MediaFormat.CreateVideoFormat(mimeType, mWidth, mHeight);
outputFormat.SetInteger(MediaFormat.KeyColorFormat,
(int)MediaCodecCapabilities.Formatsurface);
outputFormat.SetInteger(MediaFormat.KeyBitRate, 288000);
outputFormat.SetInteger(MediaFormat.KeyFrameRate,
inputFormat.GetInteger(MediaFormat.KeyFrameRate));
outputFormat.SetInteger(MediaFormat.KeyIFrameInterval, 100);
outputData.setMediaFormat(outputFormat);
encoder = MediaCodec.CreateEncoderByType(mimeType);
encoder.Configure(outputFormat, null, null, MediaCodecConfigFlags.Encode);
inputSurface = new InputSurface(encoder.CreateInputSurface());
inputSurface.makeCurrent();
encoder.Start();
// OutputSurface uses the EGL context created by InputSurface.
decoder = MediaCodec.CreateDecoderByType(mimeType);
outputSurface = new OutputSurface();
outputSurface.changeFragmentShader(FRAGMENT_SHADER);
decoder.Configure(inputFormat, outputSurface.getSurface(), null, 0);
decoder.Start();
editVideoData2(extractor, decoder, outputSurface, inputSurface, encoder, outputData);
and the decoding-encoding part:
while (!outputDone)
{
if (VERBOSE) Log.Debug(TAG, "edit loop");
// Feed more data to the decoder.
if (!inputDone)
{
int inputBufIndex = decoder.DequeueInputBuffer(TIMEOUT_USEC);
if (inputBufIndex >= 0)
{
ByteBuffer buffer = decoderInputBuffers[inputBufIndex];
int sampleSize = extractor.ReadSampleData(buffer, 0);
if (sampleSize < 0)
{
inputChunk++;
// End of stream -- send empty frame with EOS flag set.
decoder.QueueInputBuffer(inputBufIndex, 0, 0, 0L,
MediaCodecBufferFlags.EndOfStream);
inputDone = true;
if (VERBOSE) Log.Debug(TAG, "sent input EOS (with zero-length frame)");
}
else {
// Copy a chunk of input to the decoder. The first chunk should have
// the BUFFER_FLAG_CODEC_CONFIG flag set.
buffer.Clear();
decoder.QueueInputBuffer(inputBufIndex, 0, sampleSize, extractor.SampleTime, 0);
extractor.Advance();
inputChunk++;
}
}
else {
if (VERBOSE) Log.Debug(TAG, "input buffer not available");
}
}
// Assume output is available. Loop until both assumptions are false.
bool decoderOutputAvailable = !decoderDone;
bool encoderOutputAvailable = true;
while (decoderOutputAvailable || encoderOutputAvailable)
{
// Start by draining any pending output from the encoder. It's important to
// do this before we try to stuff any more data in.
int encoderStatus = encoder.DequeueOutputBuffer(info, TIMEOUT_USEC);
if (encoderStatus == (int)MediaCodecInfoState.TryAgainLater)
{
// no output available yet
if (VERBOSE) Log.Debug(TAG, "no output from encoder available");
encoderOutputAvailable = false;
}
else if (encoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged)
{
encoderOutputBuffers = encoder.GetOutputBuffers();
if (VERBOSE) Log.Debug(TAG, "encoder output buffers changed");
}
else if (encoderStatus == (int)MediaCodecInfoState.OutputFormatChanged)
{
MediaFormat newFormat = encoder.OutputFormat;
if (VERBOSE) Log.Debug(TAG, "encoder output format changed: " + newFormat);
}
else if (encoderStatus < 0)
{
Log.Error(TAG, "unexpected result from encoder.dequeueOutputBuffer: " + encoderStatus);
}
else { // encoderStatus >= 0
ByteBuffer encodedData = encoderOutputBuffers[encoderStatus];
if (encodedData == null)
{
Log.Error(TAG,"encoderOutputBuffer " + encoderStatus + " was null");
}
// Write the data to the output "file".
if (info.Size != 0)
{
encodedData.Position(info.Offset);
encodedData.Limit(info.Offset + info.Size);
byte[] data = new byte[encodedData.Remaining()];
encodedData.Get(data);
fStream.Write(data, 0, data.Length);
// outputData.addChunk(encodedData, (int)info.Flags, info.PresentationTimeUs);
outputCount++;
if (VERBOSE) Log.Debug(TAG, "encoder output " + info.Size + " bytes");
}
outputDone = (info.Flags & MediaCodecBufferFlags.EndOfStream) != 0;
encoder.ReleaseOutputBuffer(encoderStatus, false);
}
if (encoderStatus != (int)MediaCodecInfoState.TryAgainLater)
{
// Continue attempts to drain output.
continue;
}
// Encoder is drained, check to see if we've got a new frame of output from
// the decoder. (The output is going to a Surface, rather than a ByteBuffer,
// but we still get information through BufferInfo.)
if (!decoderDone)
{
int decoderStatus = decoder.DequeueOutputBuffer(info, TIMEOUT_USEC);
if (decoderStatus == (int)MediaCodecInfoState.TryAgainLater)
{
// no output available yet
if (VERBOSE) Log.Debug(TAG, "no output from decoder available");
decoderOutputAvailable = false;
}
else if (decoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged)
{
//decoderOutputBuffers = decoder.GetOutputBuffers();
if (VERBOSE) Log.Debug(TAG, "decoder output buffers changed (we don't care)");
}
else if (decoderStatus == (int)MediaCodecInfoState.OutputFormatChanged)
{
// expected before first buffer of data
MediaFormat newFormat = decoder.OutputFormat;
if (VERBOSE) Log.Debug(TAG, "decoder output format changed: " + newFormat);
}
else if (decoderStatus < 0)
{
Log.Error(TAG,"unexpected result from decoder.dequeueOutputBuffer: " + decoderStatus);
}
else { // decoderStatus >= 0
if (VERBOSE) Log.Debug(TAG, "surface decoder given buffer "
+ decoderStatus + " (size=" + info.Size + ")");
// The ByteBuffers are null references, but we still get a nonzero
// size for the decoded data.
bool doRender = (info.Size != 0);
// As soon as we call releaseOutputBuffer, the buffer will be forwarded
// to SurfaceTexture to convert to a texture. The API doesn't
// guarantee that the texture will be available before the call
// returns, so we need to wait for the onFrameAvailable callback to
// fire. If we don't wait, we risk rendering from the previous frame.
decoder.ReleaseOutputBuffer(decoderStatus, doRender);
if (doRender)
{
// This waits for the image and renders it after it arrives.
if (VERBOSE) Log.Debug(TAG, "awaiting frame");
outputSurface.awaitNewImage();
outputSurface.drawImage();
outputSurface.saveFrame(Android.OS.Environment.ExternalStorageDirectory + "/test.jpg", mWidth, mHeight);
// Send it to the encoder.
inputSurface.setPresentationTime(info.PresentationTimeUs * 1000);
if (VERBOSE) Log.Debug(TAG, "swapBuffers");
inputSurface.swapBuffers();
}
if ((info.Flags & MediaCodecBufferFlags.EndOfStream) != 0)
{
// forward decoder EOS to encoder
if (VERBOSE) Log.Debug(TAG, "signaling input EOS");