Show how to use the libavformat and libavcodec API to demux and decode audio and video data. Write the output as raw audio and input files to be played by ffplay.
/*
* Copyright (c) 2012 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file libavformat and libavcodec demuxing and decoding API usage example
* @example demux_decode.c
*
* Show how to use the libavformat and libavcodec API to demux and decode audio
* and video data. Write the output as raw audio and input files to be played by
* ffplay.
*/
{
/* To handle this change, one could call av_image_alloc again and
* decode the following frames into another rawvideo file. */
fprintf(stderr, "Error: Width, height and pixel format have to be "
"constant in a rawvideo file, but the width, height or "
"pixel format of the input video changed:\n"
"old: width = %d, height = %d, format = %s\n"
"new: width = %d, height = %d, format = %s\n",
return -1;
}
/* copy decoded frame to destination buffer:
* this is required since rawvideo expects non aligned data */
/* write to rawvideo file */
return 0;
}
{
printf(
"audio_frame n:%d nb_samples:%d pts:%s\n",
/* Write the raw audio data samples of the first plane. This works
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
* most audio decoders output planar audio, which uses a separate
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
* In other words, this code will write only the first audio channel
* in these cases.
* You should use libswresample or libavfilter to convert the frame
* to packed data. */
return 0;
}
{
// submit the packet to the decoder
fprintf(stderr,
"Error submitting a packet for decoding (%s)\n",
av_err2str(
ret));
}
// get all the available frames from the decoder
// those two return values are special and mean there is no output
// frame available, but there were no errors during decoding
return 0;
fprintf(stderr,
"Error during decoding (%s)\n",
av_err2str(
ret));
}
// write the frame data to output file
else
}
}
{
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
} else {
/* find decoder for the stream */
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
}
/* Allocate a codec context for the decoder */
fprintf(stderr, "Failed to allocate the %s codec context\n",
}
/* Copy codec parameters from input stream to output codec context */
fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
}
/* Init the decoders */
fprintf(stderr, "Failed to open %s codec\n",
}
*stream_idx = stream_index;
}
return 0;
}
{
struct sample_fmt_entry {
} sample_fmt_entries[] = {
};
struct sample_fmt_entry *
entry = &sample_fmt_entries[
i];
if (sample_fmt ==
entry->sample_fmt) {
return 0;
}
}
fprintf(stderr,
"sample format %s is not supported as output format\n",
return -1;
}
int main (
int argc,
char **argv)
{
if (argc != 4) {
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
"API example program to show how to read frames from an input file.\n"
"This program reads frames from a file, decodes them, and writes decoded\n"
"video frames to a rawvideo file named video_output_file, and decoded\n"
"audio frames to a rawaudio file named audio_output_file.\n",
argv[0]);
exit(1);
}
/* open input file, and allocate format context */
fprintf(stderr,
"Could not open source file %s\n",
src_filename);
exit(1);
}
/* retrieve stream information */
fprintf(stderr, "Could not find stream information\n");
exit(1);
}
goto end;
}
/* allocate image where the decoded image will be put */
fprintf(stderr, "Could not allocate raw video buffer\n");
goto end;
}
}
goto end;
}
}
/* dump input information to stderr */
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
goto end;
}
fprintf(stderr, "Could not allocate frame\n");
goto end;
}
fprintf(stderr, "Could not allocate packet\n");
goto end;
}
/* read frames from the file */
// check if the packet belongs to a stream we are interested in, otherwise
// skip it
break;
}
/* flush the decoders */
printf(
"Demuxing succeeded.\n");
printf(
"Play the output video file with the command:\n"
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
}
const char *fmt;
printf(
"Warning: the sample format the decoder produced is planar "
"(%s). This example will output the first channel only.\n",
packed ? packed : "?");
n_channels = 1;
}
goto end;
printf(
"Play the output audio file with the command:\n"
"ffplay -f %s -ac %d -ar %d %s\n",
}
end:
}