Generate a synthetic audio and video signal and mux them to a media file in any supported libavformat format. The default codecs are used.
/*
* Copyright (c) 2003 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file libavformat muxing API usage example
* @example mux.c
*
* Generate a synthetic audio and video signal and mux them to a media file in
* any supported libavformat format. The default codecs are used.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#define STREAM_DURATION 10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
#define SCALE_FLAGS SWS_BICUBIC
// a wrapper around a single output AVStream
/* pts of the next frame that will be generated */
{
printf(
"pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
}
{
// send the frame to the encoder
fprintf(stderr, "Error sending a frame to the encoder: %s\n",
exit(1);
}
break;
fprintf(stderr,
"Error encoding a frame: %s\n",
av_err2str(
ret));
exit(1);
}
/* rescale output packet timestamp values from codec to stream timebase */
/* Write the compressed frame to the media file. */
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
* its contents and resets pkt), so that no unreferencing is necessary.
* This would be different if one used av_write_frame(). */
fprintf(stderr,
"Error while writing output packet: %s\n",
av_err2str(
ret));
exit(1);
}
}
}
/* Add an output stream. */
{
/* find the encoder */
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
exit(1);
}
fprintf(stderr, "Could not allocate AVPacket\n");
exit(1);
}
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
fprintf(stderr, "Could not alloc an encoding context\n");
exit(1);
}
switch ((*codec)->type) {
c->sample_fmt = (*codec)->sample_fmts ?
if ((*codec)->supported_samplerates) {
c->sample_rate = (*codec)->supported_samplerates[0];
for (
i = 0; (*codec)->supported_samplerates[
i];
i++) {
if ((*codec)->supported_samplerates[
i] == 44100)
}
}
break;
/* Resolution must be a multiple of two. */
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
c->gop_size = 12;
/* emit one intra frame every twelve frames at most */
/* just for testing, we also add B-frames */
}
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
}
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
}
/**************************************************************/
/* audio output */
int sample_rate, int nb_samples)
{
fprintf(stderr, "Error allocating an audio frame\n");
exit(1);
}
frame->format = sample_fmt;
frame->sample_rate = sample_rate;
frame->nb_samples = nb_samples;
if (nb_samples) {
fprintf(stderr, "Error allocating an audio buffer\n");
exit(1);
}
}
}
{
int nb_samples;
/* open it */
fprintf(stderr,
"Could not open audio codec: %s\n",
av_err2str(
ret));
exit(1);
}
/* init signal generator */
ost->tincr = 2 *
M_PI * 110.0 /
c->sample_rate;
/* increment frequency by 110 Hz per second */
ost->tincr2 = 2 *
M_PI * 110.0 /
c->sample_rate /
c->sample_rate;
nb_samples = 10000;
else
nb_samples =
c->frame_size;
c->sample_rate, nb_samples);
c->sample_rate, nb_samples);
/* copy the stream parameters to the muxer */
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
/* create resampler context */
fprintf(stderr, "Could not allocate resampler context\n");
exit(1);
}
/* set options */
/* initialize the resampling context */
fprintf(stderr, "Failed to initialize the resampling context\n");
exit(1);
}
}
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
* 'nb_channels' channels. */
{
int16_t *q = (int16_t*)
frame->data[0];
/* check if we want to generate more frames */
for (j = 0; j <
frame->nb_samples; j++) {
v = (int)(sin(
ost->t) * 10000);
for (
i = 0;
i <
ost->enc->ch_layout.nb_channels;
i++)
*q++ = v;
}
}
/*
* encode one audio frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
{
int dst_nb_samples;
/* convert samples from native format to destination codec format, using the resampler */
/* compute destination number of samples */
/* when we pass a frame to the encoder, it may keep a reference to it
* internally;
* make sure we do not overwrite it here
*/
exit(1);
/* convert to destination format */
ost->frame->data, dst_nb_samples,
(
const uint8_t **)
frame->data,
frame->nb_samples);
fprintf(stderr, "Error while converting\n");
exit(1);
}
ost->samples_count += dst_nb_samples;
}
}
/**************************************************************/
/* video output */
{
/* allocate the buffers for the frame data */
fprintf(stderr, "Could not allocate frame data.\n");
exit(1);
}
}
{
/* open the codec */
fprintf(stderr,
"Could not open video codec: %s\n",
av_err2str(
ret));
exit(1);
}
/* allocate and init a reusable frame */
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
fprintf(stderr, "Could not allocate temporary video frame\n");
exit(1);
}
}
/* copy the stream parameters to the muxer */
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
}
/* Prepare a dummy image. */
{
/* Y */
for (x = 0; x <
width; x++)
/* Cb and Cr */
for (x = 0; x <
width / 2; x++) {
}
}
}
{
/* check if we want to generate more frames */
/* when we pass a frame to the encoder, it may keep a reference to it
* internally; make sure we do not overwrite it here */
exit(1);
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
fprintf(stderr,
"Could not initialize the conversion context\n");
exit(1);
}
}
ost->tmp_frame->linesize, 0,
c->height,
ost->frame->data,
} else {
}
ost->frame->pts =
ost->next_pts++;
}
/*
* encode one video frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
{
}
{
}
/**************************************************************/
/* media file output */
int main(
int argc,
char **argv)
{
const char *filename;
const AVCodec *audio_codec, *video_codec;
int have_video = 0, have_audio = 0;
int encode_video = 0, encode_audio = 0;
if (argc < 2) {
printf(
"usage: %s output_file\n"
"API example program to output a media file with libavformat.\n"
"This program generates a synthetic audio and video stream, encodes and\n"
"muxes them into a file named output_file.\n"
"The output format is automatically guessed according to the file extension.\n"
"Raw images can also be output by using '%%d' in the filename.\n"
"\n", argv[0]);
return 1;
}
filename = argv[1];
for (
i = 2;
i+1 < argc;
i+=2) {
if (!strcmp(argv[
i],
"-flags") || !strcmp(argv[
i],
"-fflags"))
}
/* allocate the output media context */
if (!oc) {
printf(
"Could not deduce output format from file extension: using MPEG.\n");
}
if (!oc)
return 1;
/* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
have_video = 1;
encode_video = 1;
}
have_audio = 1;
encode_audio = 1;
}
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (have_video)
if (have_audio)
/* open the output file, if needed */
fprintf(stderr, "Could not open '%s': %s\n", filename,
return 1;
}
}
/* Write the stream header, if any. */
fprintf(stderr, "Error occurred when opening output file: %s\n",
return 1;
}
while (encode_video || encode_audio) {
/* select the stream to encode */
if (encode_video &&
} else {
}
}
/* Close each codec. */
if (have_video)
if (have_audio)
/* Close the output file. */
/* free the stream */
return 0;
}