FFmpeg: libavfilter/af_astreamsync.c Source File

FFmpeg
af_astreamsync.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Stream (de)synchronization filter
24  */
25 
26 #include "libavutil/eval.h"
27 #include "libavutil/opt.h"
28 #include "avfilter.h"
29 #include "audio.h"
30 #include "internal.h"
31 
32  #define QUEUE_SIZE 16
33 
34  static const char * const var_names[] = {
35  "b1", "b2",
36  "s1", "s2",
37  "t1", "t2",
38  NULL
39 };
40 
41  enum var_name {
42   VAR_B1, VAR_B2,
43   VAR_S1, VAR_S2,
44   VAR_T1, VAR_T2,
45   VAR_NB
46 };
47 
48  typedef struct {
49   const AVClass *class;
50   AVExpr *expr;
51   char *expr_str;
52   double var_values[VAR_NB];
53   struct buf_queue {
54   AVFrame *buf[QUEUE_SIZE];
55   unsigned tail, nb;
56  /* buf[tail] is the oldest,
57  buf[(tail + nb) % QUEUE_SIZE] is where the next is added */
58  } queue[2];
59   int req[2];
60   int next_out;
61   int eof; /* bitmask, one bit for each stream */
62 } AStreamSyncContext;
63 
64  #define OFFSET(x) offsetof(AStreamSyncContext, x)
65  #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
66  static const AVOption astreamsync_options[] = {
67  { "expr", "set stream selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "t1-t2" }, .flags = FLAGS },
68  { "e", "set stream selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "t1-t2" }, .flags = FLAGS },
69  { NULL }
70 };
71 
72 AVFILTER_DEFINE_CLASS(astreamsync);
73 
74  static av_cold int init(AVFilterContext *ctx)
75 {
76  AStreamSyncContext *as = ctx->priv;
77  int r, i;
78 
79  r = av_expr_parse(&as->expr, as->expr_str, var_names,
80  NULL, NULL, NULL, NULL, 0, ctx);
81  if (r < 0) {
82  av_log(ctx, AV_LOG_ERROR, "Error in expression \"%s\"\n", as->expr_str);
83  return r;
84  }
85  for (i = 0; i < 42; i++)
86  av_expr_eval(as->expr, as->var_values, NULL); /* exercize prng */
87  return 0;
88 }
89 
90  static int query_formats(AVFilterContext *ctx)
91 {
92  int i;
93  AVFilterFormats *formats, *rates;
94  AVFilterChannelLayouts *layouts;
95 
96  for (i = 0; i < 2; i++) {
97  formats = ctx->inputs[i]->in_formats;
98  ff_formats_ref(formats, &ctx->inputs[i]->out_formats);
99  ff_formats_ref(formats, &ctx->outputs[i]->in_formats);
100  rates = ff_all_samplerates();
101  ff_formats_ref(rates, &ctx->inputs[i]->out_samplerates);
102  ff_formats_ref(rates, &ctx->outputs[i]->in_samplerates);
103  layouts = ctx->inputs[i]->in_channel_layouts;
104  ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts);
105  ff_channel_layouts_ref(layouts, &ctx->outputs[i]->in_channel_layouts);
106  }
107  return 0;
108 }
109 
110  static int config_output(AVFilterLink *outlink)
111 {
112  AVFilterContext *ctx = outlink->src;
113  int id = outlink == ctx->outputs[1];
114 
115  outlink->sample_rate = ctx->inputs[id]->sample_rate;
116  outlink->time_base = ctx->inputs[id]->time_base;
117  return 0;
118 }
119 
120  static int send_out(AVFilterContext *ctx, int out_id)
121 {
122  AStreamSyncContext *as = ctx->priv;
123  struct buf_queue *queue = &as->queue[out_id];
124  AVFrame *buf = queue->buf[queue->tail];
125  int ret;
126 
127  queue->buf[queue->tail] = NULL;
128  as->var_values[VAR_B1 + out_id]++;
129  as->var_values[VAR_S1 + out_id] += buf->nb_samples;
130  if (buf->pts != AV_NOPTS_VALUE)
131  as->var_values[VAR_T1 + out_id] =
132  av_q2d(ctx->outputs[out_id]->time_base) * buf->pts;
133  as->var_values[VAR_T1 + out_id] += buf->nb_samples /
134  (double)ctx->inputs[out_id]->sample_rate;
135  ret = ff_filter_frame(ctx->outputs[out_id], buf);
136  queue->nb--;
137  queue->tail = (queue->tail + 1) % QUEUE_SIZE;
138  if (as->req[out_id])
139  as->req[out_id]--;
140  return ret;
141 }
142 
143  static void send_next(AVFilterContext *ctx)
144 {
145  AStreamSyncContext *as = ctx->priv;
146  int i;
147 
148  while (1) {
149  if (!as->queue[as->next_out].nb)
150  break;
151  send_out(ctx, as->next_out);
152  if (!as->eof)
153  as->next_out = av_expr_eval(as->expr, as->var_values, NULL) >= 0;
154  }
155  for (i = 0; i < 2; i++)
156  if (as->queue[i].nb == QUEUE_SIZE)
157  send_out(ctx, i);
158 }
159 
160  static int request_frame(AVFilterLink *outlink)
161 {
162  AVFilterContext *ctx = outlink->src;
163  AStreamSyncContext *as = ctx->priv;
164  int id = outlink == ctx->outputs[1];
165 
166  as->req[id]++;
167  while (as->req[id] && !(as->eof & (1 << id))) {
168  if (as->queue[as->next_out].nb) {
169  send_next(ctx);
170  } else {
171  as->eof |= 1 << as->next_out;
172  ff_request_frame(ctx->inputs[as->next_out]);
173  if (as->eof & (1 << as->next_out))
174  as->next_out = !as->next_out;
175  }
176  }
177  return 0;
178 }
179 
180  static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
181 {
182  AVFilterContext *ctx = inlink->dst;
183  AStreamSyncContext *as = ctx->priv;
184  int id = inlink == ctx->inputs[1];
185 
186  as->queue[id].buf[(as->queue[id].tail + as->queue[id].nb++) % QUEUE_SIZE] =
187  insamples;
188  as->eof &= ~(1 << id);
189  send_next(ctx);
190  return 0;
191 }
192 
193  static av_cold void uninit(AVFilterContext *ctx)
194 {
195  AStreamSyncContext *as = ctx->priv;
196 
197  av_expr_free(as->expr);
198  as->expr = NULL;
199 }
200 
201  static const AVFilterPad astreamsync_inputs[] = {
202  {
203  .name = "in1",
204  .type = AVMEDIA_TYPE_AUDIO,
205  .filter_frame = filter_frame,
206  },{
207  .name = "in2",
208  .type = AVMEDIA_TYPE_AUDIO,
209  .filter_frame = filter_frame,
210  },
211  { NULL }
212 };
213 
214  static const AVFilterPad astreamsync_outputs[] = {
215  {
216  .name = "out1",
217  .type = AVMEDIA_TYPE_AUDIO,
218  .config_props = config_output,
219  .request_frame = request_frame,
220  },{
221  .name = "out2",
222  .type = AVMEDIA_TYPE_AUDIO,
223  .config_props = config_output,
224  .request_frame = request_frame,
225  },
226  { NULL }
227 };
228 
229  AVFilter ff_af_astreamsync = {
230  .name = "astreamsync",
231  .description = NULL_IF_CONFIG_SMALL("Copy two streams of audio data "
232  "in a configurable order."),
233  .priv_size = sizeof(AStreamSyncContext),
234  .init = init,
235  .uninit = uninit,
236  .query_formats = query_formats,
237  .inputs = astreamsync_inputs,
238  .outputs = astreamsync_outputs,
239  .priv_class = &astreamsync_class,
240 };

Generated on Sun Jul 20 2014 23:05:59 for FFmpeg by   doxygen 1.8.2

AltStyle によって変換されたページ (->オリジナル) /