vf_geq.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (C) 2012 Clément Bœsch <ubitux@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Generic equation change filter
25  * Originally written by Michael Niedermayer for the MPlayer project, and
26  * ported by Clément Bœsch for FFmpeg.
27  */
28 
29 #include "libavutil/avstring.h"
30 #include "libavutil/eval.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "internal.h"
34 
35 typedef struct {
36  const AVClass *class;
37  AVExpr *e[4]; ///< expressions for each plane
38  char *expr_str[4]; ///< expression strings for each plane
39  int framenum; ///< frame counter
40  AVFrame *picref; ///< current input buffer
41  int hsub, vsub; ///< chroma subsampling
42  int planes; ///< number of planes
43 } GEQContext;
44 
45 #define OFFSET(x) offsetof(GEQContext, x)
46 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
47 
48 static const AVOption geq_options[] = {
49  { "lum_expr", "set luminance expression", OFFSET(expr_str[0]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
50  { "cb_expr", "set chroma blue expression", OFFSET(expr_str[1]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
51  { "cr_expr", "set chroma red expression", OFFSET(expr_str[2]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
52  { "alpha_expr", "set alpha expression", OFFSET(expr_str[3]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
53  {NULL},
54 };
55 
57 
58 static inline double getpix(void *priv, double x, double y, int plane)
59 {
60  int xi, yi;
61  GEQContext *geq = priv;
62  AVFrame *picref = geq->picref;
63  const uint8_t *src = picref->data[plane];
64  const int linesize = picref->linesize[plane];
65  const int w = picref->width >> ((plane == 1 || plane == 2) ? geq->hsub : 0);
66  const int h = picref->height >> ((plane == 1 || plane == 2) ? geq->vsub : 0);
67 
68  if (!src)
69  return 0;
70 
71  xi = x = av_clipf(x, 0, w - 2);
72  yi = y = av_clipf(y, 0, h - 2);
73 
74  x -= xi;
75  y -= yi;
76 
77  return (1-y)*((1-x)*src[xi + yi * linesize] + x*src[xi + 1 + yi * linesize])
78  + y *((1-x)*src[xi + (yi+1) * linesize] + x*src[xi + 1 + (yi+1) * linesize]);
79 }
80 
81 //TODO: cubic interpolate
82 //TODO: keep the last few frames
83 static double lum(void *priv, double x, double y) { return getpix(priv, x, y, 0); }
84 static double cb(void *priv, double x, double y) { return getpix(priv, x, y, 1); }
85 static double cr(void *priv, double x, double y) { return getpix(priv, x, y, 2); }
86 static double alpha(void *priv, double x, double y) { return getpix(priv, x, y, 3); }
87 
88 static const char *const var_names[] = { "X", "Y", "W", "H", "N", "SW", "SH", "T", NULL };
90 
92 {
93  GEQContext *geq = ctx->priv;
94  int plane, ret = 0;
95 
96  if (!geq->expr_str[0]) {
97  av_log(ctx, AV_LOG_ERROR, "Luminance expression is mandatory\n");
98  ret = AVERROR(EINVAL);
99  goto end;
100  }
101 
102  if (!geq->expr_str[1] && !geq->expr_str[2]) {
103  /* No chroma at all: fallback on luma */
104  geq->expr_str[1] = av_strdup(geq->expr_str[0]);
105  geq->expr_str[2] = av_strdup(geq->expr_str[0]);
106  } else {
107  /* One chroma unspecified, fallback on the other */
108  if (!geq->expr_str[1]) geq->expr_str[1] = av_strdup(geq->expr_str[2]);
109  if (!geq->expr_str[2]) geq->expr_str[2] = av_strdup(geq->expr_str[1]);
110  }
111 
112  if (!geq->expr_str[3])
113  geq->expr_str[3] = av_strdup("255");
114 
115  if (!geq->expr_str[1] || !geq->expr_str[2] || !geq->expr_str[3]) {
116  ret = AVERROR(ENOMEM);
117  goto end;
118  }
119 
120  for (plane = 0; plane < 4; plane++) {
121  static double (*p[])(void *, double, double) = { lum, cb, cr, alpha };
122  static const char *const func2_names[] = { "lum", "cb", "cr", "alpha", "p", NULL };
123  double (*func2[])(void *, double, double) = { lum, cb, cr, alpha, p[plane], NULL };
124 
125  ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane], var_names,
126  NULL, NULL, func2_names, func2, 0, ctx);
127  if (ret < 0)
128  break;
129  }
130 
131 end:
132  return ret;
133 }
134 
136 {
137  static const enum PixelFormat pix_fmts[] = {
143  };
145  return 0;
146 }
147 
148 static int geq_config_props(AVFilterLink *inlink)
149 {
150  GEQContext *geq = inlink->dst->priv;
151  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
152 
153  geq->hsub = desc->log2_chroma_w;
154  geq->vsub = desc->log2_chroma_h;
155  geq->planes = desc->nb_components;
156  return 0;
157 }
158 
160 {
161  int plane;
162  GEQContext *geq = inlink->dst->priv;
163  AVFilterLink *outlink = inlink->dst->outputs[0];
164  AVFrame *out;
165  double values[VAR_VARS_NB] = {
166  [VAR_N] = geq->framenum++,
167  [VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
168  };
169 
170  geq->picref = in;
171  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
172  if (!out) {
173  av_frame_free(&in);
174  return AVERROR(ENOMEM);
175  }
176  av_frame_copy_props(out, in);
177 
178  for (plane = 0; plane < geq->planes && out->data[plane]; plane++) {
179  int x, y;
180  uint8_t *dst = out->data[plane];
181  const int linesize = out->linesize[plane];
182  const int w = inlink->w >> ((plane == 1 || plane == 2) ? geq->hsub : 0);
183  const int h = inlink->h >> ((plane == 1 || plane == 2) ? geq->vsub : 0);
184 
185  values[VAR_W] = w;
186  values[VAR_H] = h;
187  values[VAR_SW] = w / (double)inlink->w;
188  values[VAR_SH] = h / (double)inlink->h;
189 
190  for (y = 0; y < h; y++) {
191  values[VAR_Y] = y;
192  for (x = 0; x < w; x++) {
193  values[VAR_X] = x;
194  dst[x] = av_expr_eval(geq->e[plane], values, geq);
195  }
196  dst += linesize;
197  }
198  }
199 
200  av_frame_free(&geq->picref);
201  return ff_filter_frame(outlink, out);
202 }
203 
205 {
206  int i;
207  GEQContext *geq = ctx->priv;
208 
209  for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++)
210  av_expr_free(geq->e[i]);
211 }
212 
213 static const AVFilterPad geq_inputs[] = {
214  {
215  .name = "default",
216  .type = AVMEDIA_TYPE_VIDEO,
217  .config_props = geq_config_props,
218  .filter_frame = geq_filter_frame,
219  },
220  { NULL }
221 };
222 
223 static const AVFilterPad geq_outputs[] = {
224  {
225  .name = "default",
226  .type = AVMEDIA_TYPE_VIDEO,
227  },
228  { NULL }
229 };
230 
232  .name = "geq",
233  .description = NULL_IF_CONFIG_SMALL("Apply generic equation to each pixel."),
234  .priv_size = sizeof(GEQContext),
235  .init = geq_init,
236  .uninit = geq_uninit,
238  .inputs = geq_inputs,
239  .outputs = geq_outputs,
240  .priv_class = &geq_class,
241 };
Definition: vf_geq.c:89
static const AVFilterPad geq_inputs[]
Definition: vf_geq.c:213
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:424
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
AVOption.
Definition: opt.h:251
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static av_cold void geq_uninit(AVFilterContext *ctx)
Definition: vf_geq.c:204
static double getpix(void *priv, double x, double y, int plane)
Definition: vf_geq.c:58
static const char *const var_names[]
Definition: vf_geq.c:88
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
Definition: vf_geq.c:89
static const AVFilterPad geq_outputs[]
Definition: vf_geq.c:223
#define FF_ARRAY_ELEMS(a)
static int query_formats(AVFilterContext *ctx)
Definition: af_aconvert.c:73
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:640
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:143
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:66
output residual component w
AVFrame * picref
current input buffer
Definition: vf_geq.c:40
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:308
const char * name
Pad name.
static int geq_query_formats(AVFilterContext *ctx)
Definition: vf_geq.c:135
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:84
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:105
uint8_t
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
#define av_cold
Definition: attributes.h:78
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:334
AVOptions.
end end
#define NAN
Definition: math.h:7
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:159
Definition: eval.c:140
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:69
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:545
A filter pad used for either input or output.
Discrete Time axis x
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:219
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:86
int width
width and height of the video frame
Definition: frame.h:122
int planes
number of planes
Definition: vf_geq.c:42
AVExpr * e[4]
expressions for each plane
Definition: vf_geq.c:37
static int geq_config_props(AVFilterLink *inlink)
Definition: vf_geq.c:148
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:75
#define OFFSET(x)
Definition: vf_geq.c:45
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
Definition: avfilter.h:545
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
static const AVOption geq_options[]
Definition: vf_geq.c:48
char * expr_str[4]
expression strings for each plane
Definition: vf_geq.c:38
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:57
ret
Definition: avfilter.c:821
Definition: vf_geq.c:89
NULL
Definition: eval.c:55
Definition: vf_geq.c:89
AVS_Value src
Definition: avisynth_c.h:523
Definition: vf_geq.c:89
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:302
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:220
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:218
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
int hsub
Definition: vf_geq.c:41
#define FLAGS
Definition: vf_geq.c:46
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
static av_cold int geq_init(AVFilterContext *ctx)
Definition: vf_geq.c:91
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:74
Describe the class of an AVClass context structure.
Definition: log.h:50
Filter definition.
Definition: avfilter.h:436
synthesis window for stochastic i
AVFilter avfilter_vf_geq
Definition: vf_geq.c:231
Definition: vf_geq.c:89
const char * name
filter name
Definition: avfilter.h:437
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
static double lum(void *priv, double x, double y)
Definition: vf_geq.c:83
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
Y , 8bpp.
Definition: pixfmt.h:76
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:75
function y
Definition: D.m:1
Definition: vf_geq.c:89
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:691
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_geq.c:159
int framenum
frame counter
Definition: vf_geq.c:39
An instance of a filter.
Definition: avfilter.h:524
int height
Definition: frame.h:122
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:103
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
internal API functions
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:85
Definition: vf_geq.c:89
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
int vsub
chroma subsampling
Definition: vf_geq.c:41
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:190
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame This method is called when a frame is wanted on an output For an it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
simple arithmetic expression evaluator
AVFILTER_DEFINE_CLASS(geq)