1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2012-2016 Mentor Graphics Inc.
4 *
5 * Queued image conversion support, with tiling and rotation.
6 */
7
8 #include <linux/interrupt.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/math.h>
11
12 #include <video/imx-ipu-image-convert.h>
13
14 #include "ipu-prv.h"
15
16 /*
17 * The IC Resizer has a restriction that the output frame from the
18 * resizer must be 1024 or less in both width (pixels) and height
19 * (lines).
20 *
21 * The image converter attempts to split up a conversion when
22 * the desired output (converted) frame resolution exceeds the
23 * IC resizer limit of 1024 in either dimension.
24 *
25 * If either dimension of the output frame exceeds the limit, the
26 * dimension is split into 1, 2, or 4 equal stripes, for a maximum
27 * of 4*4 or 16 tiles. A conversion is then carried out for each
28 * tile (but taking care to pass the full frame stride length to
29 * the DMA channel's parameter memory!). IDMA double-buffering is used
30 * to convert each tile back-to-back when possible (see note below
31 * when double_buffering boolean is set).
32 *
33 * Note that the input frame must be split up into the same number
34 * of tiles as the output frame:
35 *
36 * +---------+-----+
37 * +-----+---+ | A | B |
38 * | A | B | | | |
39 * +-----+---+ --> +---------+-----+
40 * | C | D | | C | D |
41 * +-----+---+ | | |
42 * +---------+-----+
43 *
44 * Clockwise 90° rotations are handled by first rescaling into a
45 * reusable temporary tile buffer and then rotating with the 8x8
46 * block rotator, writing to the correct destination:
47 *
48 * +-----+-----+
49 * | | |
50 * +-----+---+ +---------+ | C | A |
51 * | A | B | | A,B, | | | | |
52 * +-----+---+ --> | C,D | | --> | | |
53 * | C | D | +---------+ +-----+-----+
54 * +-----+---+ | D | B |
55 * | | |
56 * +-----+-----+
57 *
58 * If the 8x8 block rotator is used, horizontal or vertical flipping
59 * is done during the rotation step, otherwise flipping is done
60 * during the scaling step.
61 * With rotation or flipping, tile order changes between input and
62 * output image. Tiles are numbered row major from top left to bottom
63 * right for both input and output image.
64 */
65
66 #define MAX_STRIPES_W 4
67 #define MAX_STRIPES_H 4
68 #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
69
70 #define MIN_W 16
71 #define MIN_H 8
72 #define MAX_W 4096
73 #define MAX_H 4096
74
75 enum ipu_image_convert_type {
76 IMAGE_CONVERT_IN = 0,
77 IMAGE_CONVERT_OUT,
78 };
79
80 struct ipu_image_convert_dma_buf {
81 void *virt;
82 dma_addr_t phys;
83 unsigned long len;
84 };
85
86 struct ipu_image_convert_dma_chan {
87 int in;
88 int out;
89 int rot_in;
90 int rot_out;
91 int vdi_in_p;
92 int vdi_in;
93 int vdi_in_n;
94 };
95
96 /* dimensions of one tile */
97 struct ipu_image_tile {
98 u32 width;
99 u32 height;
100 u32 left;
101 u32 top;
102 /* size and strides are in bytes */
103 u32 size;
104 u32 stride;
105 u32 rot_stride;
106 /* start Y or packed offset of this tile */
107 u32 offset;
108 /* offset from start to tile in U plane, for planar formats */
109 u32 u_off;
110 /* offset from start to tile in V plane, for planar formats */
111 u32 v_off;
112 };
113
114 struct ipu_image_convert_image {
115 struct ipu_image base;
116 enum ipu_image_convert_type type;
117
118 const struct ipu_image_pixfmt *fmt;
119 unsigned int stride;
120
121 /* # of rows (horizontal stripes) if dest height is > 1024 */
122 unsigned int num_rows;
123 /* # of columns (vertical stripes) if dest width is > 1024 */
124 unsigned int num_cols;
125
126 struct ipu_image_tile tile[MAX_TILES];
127 };
128
129 struct ipu_image_pixfmt {
130 u32 fourcc; /* V4L2 fourcc */
131 int bpp; /* total bpp */
132 int uv_width_dec; /* decimation in width for U/V planes */
133 int uv_height_dec; /* decimation in height for U/V planes */
134 bool planar; /* planar format */
135 bool uv_swapped; /* U and V planes are swapped */
136 bool uv_packed; /* partial planar (U and V in same plane) */
137 };
138
139 struct ipu_image_convert_ctx;
140 struct ipu_image_convert_chan;
141 struct ipu_image_convert_priv;
142
143 enum eof_irq_mask {
144 EOF_IRQ_IN = BIT(0),
145 EOF_IRQ_ROT_IN = BIT(1),
146 EOF_IRQ_OUT = BIT(2),
147 EOF_IRQ_ROT_OUT = BIT(3),
148 };
149
150 #define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT)
151 #define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT | \
152 EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT)
153
154 struct ipu_image_convert_ctx {
155 struct ipu_image_convert_chan *chan;
156
157 ipu_image_convert_cb_t complete;
158 void *complete_context;
159
160 /* Source/destination image data and rotation mode */
161 struct ipu_image_convert_image in;
162 struct ipu_image_convert_image out;
163 struct ipu_ic_csc csc;
164 enum ipu_rotate_mode rot_mode;
165 u32 downsize_coeff_h;
166 u32 downsize_coeff_v;
167 u32 image_resize_coeff_h;
168 u32 image_resize_coeff_v;
169 u32 resize_coeffs_h[MAX_STRIPES_W];
170 u32 resize_coeffs_v[MAX_STRIPES_H];
171
172 /* intermediate buffer for rotation */
173 struct ipu_image_convert_dma_buf rot_intermediate[2];
174
175 /* current buffer number for double buffering */
176 int cur_buf_num;
177
178 bool aborting;
179 struct completion aborted;
180
181 /* can we use double-buffering for this conversion operation? */
182 bool double_buffering;
183 /* num_rows * num_cols */
184 unsigned int num_tiles;
185 /* next tile to process */
186 unsigned int next_tile;
187 /* where to place converted tile in dest image */
188 unsigned int out_tile_map[MAX_TILES];
189
190 /* mask of completed EOF irqs at every tile conversion */
191 enum eof_irq_mask eof_mask;
192
193 struct list_head list;
194 };
195
196 struct ipu_image_convert_chan {
197 struct ipu_image_convert_priv *priv;
198
199 enum ipu_ic_task ic_task;
200 const struct ipu_image_convert_dma_chan *dma_ch;
201
202 struct ipu_ic *ic;
203 struct ipuv3_channel *in_chan;
204 struct ipuv3_channel *out_chan;
205 struct ipuv3_channel *rotation_in_chan;
206 struct ipuv3_channel *rotation_out_chan;
207
208 /* the IPU end-of-frame irqs */
209 int in_eof_irq;
210 int rot_in_eof_irq;
211 int out_eof_irq;
212 int rot_out_eof_irq;
213
214 spinlock_t irqlock;
215
216 /* list of convert contexts */
217 struct list_head ctx_list;
218 /* queue of conversion runs */
219 struct list_head pending_q;
220 /* queue of completed runs */
221 struct list_head done_q;
222
223 /* the current conversion run */
224 struct ipu_image_convert_run *current_run;
225 };
226
227 struct ipu_image_convert_priv {
228 struct ipu_image_convert_chan chan[IC_NUM_TASKS];
229 struct ipu_soc *ipu;
230 };
231
232 static const struct ipu_image_convert_dma_chan
233 image_convert_dma_chan[IC_NUM_TASKS] = {
234 [IC_TASK_VIEWFINDER] = {
235 .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
236 .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
237 .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
238 .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
239 .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
240 .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
241 .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
242 },
243 [IC_TASK_POST_PROCESSOR] = {
244 .in = IPUV3_CHANNEL_MEM_IC_PP,
245 .out = IPUV3_CHANNEL_IC_PP_MEM,
246 .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
247 .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
248 },
249 };
250
251 static const struct ipu_image_pixfmt image_convert_formats[] = {
252 {
253 .fourcc = V4L2_PIX_FMT_RGB565,
254 .bpp = 16,
255 }, {
256 .fourcc = V4L2_PIX_FMT_RGB24,
257 .bpp = 24,
258 }, {
259 .fourcc = V4L2_PIX_FMT_BGR24,
260 .bpp = 24,
261 }, {
262 .fourcc = V4L2_PIX_FMT_RGB32,
263 .bpp = 32,
264 }, {
265 .fourcc = V4L2_PIX_FMT_BGR32,
266 .bpp = 32,
267 }, {
268 .fourcc = V4L2_PIX_FMT_XRGB32,
269 .bpp = 32,
270 }, {
271 .fourcc = V4L2_PIX_FMT_XBGR32,
272 .bpp = 32,
273 }, {
274 .fourcc = V4L2_PIX_FMT_BGRX32,
275 .bpp = 32,
276 }, {
277 .fourcc = V4L2_PIX_FMT_RGBX32,
278 .bpp = 32,
279 }, {
280 .fourcc = V4L2_PIX_FMT_YUYV,
281 .bpp = 16,
282 .uv_width_dec = 2,
283 .uv_height_dec = 1,
284 }, {
285 .fourcc = V4L2_PIX_FMT_UYVY,
286 .bpp = 16,
287 .uv_width_dec = 2,
288 .uv_height_dec = 1,
289 }, {
290 .fourcc = V4L2_PIX_FMT_YUV420,
291 .bpp = 12,
292 .planar = true,
293 .uv_width_dec = 2,
294 .uv_height_dec = 2,
295 }, {
296 .fourcc = V4L2_PIX_FMT_YVU420,
297 .bpp = 12,
298 .planar = true,
299 .uv_width_dec = 2,
300 .uv_height_dec = 2,
301 .uv_swapped = true,
302 }, {
303 .fourcc = V4L2_PIX_FMT_NV12,
304 .bpp = 12,
305 .planar = true,
306 .uv_width_dec = 2,
307 .uv_height_dec = 2,
308 .uv_packed = true,
309 }, {
310 .fourcc = V4L2_PIX_FMT_YUV422P,
311 .bpp = 16,
312 .planar = true,
313 .uv_width_dec = 2,
314 .uv_height_dec = 1,
315 }, {
316 .fourcc = V4L2_PIX_FMT_NV16,
317 .bpp = 16,
318 .planar = true,
319 .uv_width_dec = 2,
320 .uv_height_dec = 1,
321 .uv_packed = true,
322 },
323 };
324
get_format(u32 fourcc)325 static const struct ipu_image_pixfmt *get_format(u32 fourcc)
326 {
327 const struct ipu_image_pixfmt *ret = NULL;
328 unsigned int i;
329
330 for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
331 if (image_convert_formats[i].fourcc == fourcc) {
332 ret = &image_convert_formats[i];
333 break;
334 }
335 }
336
337 return ret;
338 }
339
dump_format(struct ipu_image_convert_ctx * ctx,struct ipu_image_convert_image * ic_image)340 static void dump_format(struct ipu_image_convert_ctx *ctx,
341 struct ipu_image_convert_image *ic_image)
342 {
343 struct ipu_image_convert_chan *chan = ctx->chan;
344 struct ipu_image_convert_priv *priv = chan->priv;
345
346 dev_dbg(priv->ipu->dev,
347 "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n",
348 chan->ic_task, ctx,
349 ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
350 ic_image->base.pix.width, ic_image->base.pix.height,
351 ic_image->num_cols, ic_image->num_rows,
352 ic_image->fmt->fourcc & 0xff,
353 (ic_image->fmt->fourcc >> 8) & 0xff,
354 (ic_image->fmt->fourcc >> 16) & 0xff,
355 (ic_image->fmt->fourcc >> 24) & 0xff);
356 }
357
ipu_image_convert_enum_format(int index,u32 * fourcc)358 int ipu_image_convert_enum_format(int index, u32 *fourcc)
359 {
360 const struct ipu_image_pixfmt *fmt;
361
362 if (index >= (int)ARRAY_SIZE(image_convert_formats))
363 return -EINVAL;
364
365 /* Format found */
366 fmt = &image_convert_formats[index];
367 *fourcc = fmt->fourcc;
368 return 0;
369 }
370 EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
371
free_dma_buf(struct ipu_image_convert_priv * priv,struct ipu_image_convert_dma_buf * buf)372 static void free_dma_buf(struct ipu_image_convert_priv *priv,
373 struct ipu_image_convert_dma_buf *buf)
374 {
375 if (buf->virt)
376 dma_free_coherent(priv->ipu->dev,
377 buf->len, buf->virt, buf->phys);
378 buf->virt = NULL;
379 buf->phys = 0;
380 }
381
alloc_dma_buf(struct ipu_image_convert_priv * priv,struct ipu_image_convert_dma_buf * buf,int size)382 static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
383 struct ipu_image_convert_dma_buf *buf,
384 int size)
385 {
386 buf->len = PAGE_ALIGN(size);
387 buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
388 GFP_DMA | GFP_KERNEL);
389 if (!buf->virt) {
390 dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
391 return -ENOMEM;
392 }
393
394 return 0;
395 }
396
num_stripes(int dim)397 static inline int num_stripes(int dim)
398 {
399 return (dim - 1) / 1024 + 1;
400 }
401
402 /*
403 * Calculate downsizing coefficients, which are the same for all tiles,
404 * and initial bilinear resizing coefficients, which are used to find the
405 * best seam positions.
406 * Also determine the number of tiles necessary to guarantee that no tile
407 * is larger than 1024 pixels in either dimension at the output and between
408 * IC downsizing and main processing sections.
409 */
calc_image_resize_coefficients(struct ipu_image_convert_ctx * ctx,struct ipu_image * in,struct ipu_image * out)410 static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
411 struct ipu_image *in,
412 struct ipu_image *out)
413 {
414 u32 downsized_width = in->rect.width;
415 u32 downsized_height = in->rect.height;
416 u32 downsize_coeff_v = 0;
417 u32 downsize_coeff_h = 0;
418 u32 resized_width = out->rect.width;
419 u32 resized_height = out->rect.height;
420 u32 resize_coeff_h;
421 u32 resize_coeff_v;
422 u32 cols;
423 u32 rows;
424
425 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
426 resized_width = out->rect.height;
427 resized_height = out->rect.width;
428 }
429
430 /* Do not let invalid input lead to an endless loop below */
431 if (WARN_ON(resized_width == 0 || resized_height == 0))
432 return -EINVAL;
433
434 while (downsized_width >= resized_width * 2) {
435 downsized_width >>= 1;
436 downsize_coeff_h++;
437 }
438
439 while (downsized_height >= resized_height * 2) {
440 downsized_height >>= 1;
441 downsize_coeff_v++;
442 }
443
444 /*
445 * Calculate the bilinear resizing coefficients that could be used if
446 * we were converting with a single tile. The bottom right output pixel
447 * should sample as close as possible to the bottom right input pixel
448 * out of the decimator, but not overshoot it:
449 */
450 resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
451 resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
452
453 /*
454 * Both the output of the IC downsizing section before being passed to
455 * the IC main processing section and the final output of the IC main
456 * processing section must be <= 1024 pixels in both dimensions.
457 */
458 cols = num_stripes(max_t(u32, downsized_width, resized_width));
459 rows = num_stripes(max_t(u32, downsized_height, resized_height));
460
461 dev_dbg(ctx->chan->priv->ipu->dev,
462 "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
463 __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
464 resize_coeff_v, cols, rows);
465
466 if (downsize_coeff_h > 2 || downsize_coeff_v > 2 ||
467 resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
468 return -EINVAL;
469
470 ctx->downsize_coeff_h = downsize_coeff_h;
471 ctx->downsize_coeff_v = downsize_coeff_v;
472 ctx->image_resize_coeff_h = resize_coeff_h;
473 ctx->image_resize_coeff_v = resize_coeff_v;
474 ctx->in.num_cols = cols;
475 ctx->in.num_rows = rows;
476
477 return 0;
478 }
479
480 #define round_closest(x, y) round_down((x) + (y)/2, (y))
481
482 /*
483 * Find the best aligned seam position for the given column / row index.
484 * Rotation and image offsets are out of scope.
485 *
486 * @index: column / row index, used to calculate valid interval
487 * @in_edge: input right / bottom edge
488 * @out_edge: output right / bottom edge
489 * @in_align: input alignment, either horizontal 8-byte line start address
490 * alignment, or pixel alignment due to image format
491 * @out_align: output alignment, either horizontal 8-byte line start address
492 * alignment, or pixel alignment due to image format or rotator
493 * block size
494 * @in_burst: horizontal input burst size in case of horizontal flip
495 * @out_burst: horizontal output burst size or rotator block size
496 * @downsize_coeff: downsizing section coefficient
497 * @resize_coeff: main processing section resizing coefficient
498 * @_in_seam: aligned input seam position return value
499 * @_out_seam: aligned output seam position return value
500 */
find_best_seam(struct ipu_image_convert_ctx * ctx,unsigned int index,unsigned int in_edge,unsigned int out_edge,unsigned int in_align,unsigned int out_align,unsigned int in_burst,unsigned int out_burst,unsigned int downsize_coeff,unsigned int resize_coeff,u32 * _in_seam,u32 * _out_seam)501 static void find_best_seam(struct ipu_image_convert_ctx *ctx,
502 unsigned int index,
503 unsigned int in_edge,
504 unsigned int out_edge,
505 unsigned int in_align,
506 unsigned int out_align,
507 unsigned int in_burst,
508 unsigned int out_burst,
509 unsigned int downsize_coeff,
510 unsigned int resize_coeff,
511 u32 *_in_seam,
512 u32 *_out_seam)
513 {
514 struct device *dev = ctx->chan->priv->ipu->dev;
515 unsigned int out_pos;
516 /* Input / output seam position candidates */
517 unsigned int out_seam = 0;
518 unsigned int in_seam = 0;
519 unsigned int min_diff = UINT_MAX;
520 unsigned int out_start;
521 unsigned int out_end;
522 unsigned int in_start;
523 unsigned int in_end;
524
525 /* Start within 1024 pixels of the right / bottom edge */
526 out_start = max_t(int, index * out_align, out_edge - 1024);
527 /* End before having to add more columns to the left / rows above */
528 out_end = min_t(unsigned int, out_edge, index * 1024 + 1);
529
530 /*
531 * Limit input seam position to make sure that the downsized input tile
532 * to the right or bottom does not exceed 1024 pixels.
533 */
534 in_start = max_t(int, index * in_align,
535 in_edge - (1024 << downsize_coeff));
536 in_end = min_t(unsigned int, in_edge,
537 index * (1024 << downsize_coeff) + 1);
538
539 /*
540 * Output tiles must start at a multiple of 8 bytes horizontally and
541 * possibly at an even line horizontally depending on the pixel format.
542 * Only consider output aligned positions for the seam.
543 */
544 out_start = round_up(out_start, out_align);
545 for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
546 unsigned int in_pos;
547 unsigned int in_pos_aligned;
548 unsigned int in_pos_rounded;
549 unsigned int diff;
550
551 /*
552 * Tiles in the right row / bottom column may not be allowed to
553 * overshoot horizontally / vertically. out_burst may be the
554 * actual DMA burst size, or the rotator block size.
555 */
556 if ((out_burst > 1) && (out_edge - out_pos) % out_burst)
557 continue;
558
559 /*
560 * Input sample position, corresponding to out_pos, 19.13 fixed
561 * point.
562 */
563 in_pos = (out_pos * resize_coeff) << downsize_coeff;
564 /*
565 * The closest input sample position that we could actually
566 * start the input tile at, 19.13 fixed point.
567 */
568 in_pos_aligned = round_closest(in_pos, 8192U * in_align);
569 /* Convert 19.13 fixed point to integer */
570 in_pos_rounded = in_pos_aligned / 8192U;
571
572 if (in_pos_rounded < in_start)
573 continue;
574 if (in_pos_rounded >= in_end)
575 break;
576
577 if ((in_burst > 1) &&
578 (in_edge - in_pos_rounded) % in_burst)
579 continue;
580
581 diff = abs_diff(in_pos, in_pos_aligned);
582 if (diff < min_diff) {
583 in_seam = in_pos_rounded;
584 out_seam = out_pos;
585 min_diff = diff;
586 }
587 }
588
589 *_out_seam = out_seam;
590 *_in_seam = in_seam;
591
592 dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) in [%u, %u] diff %u.%03u\n",
593 __func__, out_seam, out_align, out_start, out_end,
594 in_seam, in_align, in_start, in_end, min_diff / 8192,
595 DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
596 }
597
598 /*
599 * Tile left edges are required to be aligned to multiples of 8 bytes
600 * by the IDMAC.
601 */
tile_left_align(const struct ipu_image_pixfmt * fmt)602 static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt)
603 {
604 if (fmt->planar)
605 return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec;
606 else
607 return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8;
608 }
609
610 /*
611 * Tile top edge alignment is only limited by chroma subsampling.
612 */
tile_top_align(const struct ipu_image_pixfmt * fmt)613 static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt)
614 {
615 return fmt->uv_height_dec > 1 ? 2 : 1;
616 }
617
tile_width_align(enum ipu_image_convert_type type,const struct ipu_image_pixfmt * fmt,enum ipu_rotate_mode rot_mode)618 static inline u32 tile_width_align(enum ipu_image_convert_type type,
619 const struct ipu_image_pixfmt *fmt,
620 enum ipu_rotate_mode rot_mode)
621 {
622 if (type == IMAGE_CONVERT_IN) {
623 /*
624 * The IC burst reads 8 pixels at a time. Reading beyond the
625 * end of the line is usually acceptable. Those pixels are
626 * ignored, unless the IC has to write the scaled line in
627 * reverse.
628 */
629 return (!ipu_rot_mode_is_irt(rot_mode) &&
630 (rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2;
631 }
632
633 /*
634 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
635 * formats to guarantee 8-byte aligned line start addresses in the
636 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
637 * for all other formats.
638 */
639 return (ipu_rot_mode_is_irt(rot_mode) &&
640 fmt->planar && !fmt->uv_packed) ?
641 8 * fmt->uv_width_dec : 8;
642 }
643
tile_height_align(enum ipu_image_convert_type type,const struct ipu_image_pixfmt * fmt,enum ipu_rotate_mode rot_mode)644 static inline u32 tile_height_align(enum ipu_image_convert_type type,
645 const struct ipu_image_pixfmt *fmt,
646 enum ipu_rotate_mode rot_mode)
647 {
648 if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode))
649 return 2;
650
651 /*
652 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
653 * formats to guarantee 8-byte aligned line start addresses in the
654 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
655 * for all other formats.
656 */
657 return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8;
658 }
659
660 /*
661 * Fill in left position and width and for all tiles in an input column, and
662 * for all corresponding output tiles. If the 90° rotator is used, the output
663 * tiles are in a row, and output tile top position and height are set.
664 */
fill_tile_column(struct ipu_image_convert_ctx * ctx,unsigned int col,struct ipu_image_convert_image * in,unsigned int in_left,unsigned int in_width,struct ipu_image_convert_image * out,unsigned int out_left,unsigned int out_width)665 static void fill_tile_column(struct ipu_image_convert_ctx *ctx,
666 unsigned int col,
667 struct ipu_image_convert_image *in,
668 unsigned int in_left, unsigned int in_width,
669 struct ipu_image_convert_image *out,
670 unsigned int out_left, unsigned int out_width)
671 {
672 unsigned int row, tile_idx;
673 struct ipu_image_tile *in_tile, *out_tile;
674
675 for (row = 0; row < in->num_rows; row++) {
676 tile_idx = in->num_cols * row + col;
677 in_tile = &in->tile[tile_idx];
678 out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
679
680 in_tile->left = in_left;
681 in_tile->width = in_width;
682
683 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
684 out_tile->top = out_left;
685 out_tile->height = out_width;
686 } else {
687 out_tile->left = out_left;
688 out_tile->width = out_width;
689 }
690 }
691 }
692
693 /*
694 * Fill in top position and height and for all tiles in an input row, and
695 * for all corresponding output tiles. If the 90° rotator is used, the output
696 * tiles are in a column, and output tile left position and width are set.
697 */
fill_tile_row(struct ipu_image_convert_ctx * ctx,unsigned int row,struct ipu_image_convert_image * in,unsigned int in_top,unsigned int in_height,struct ipu_image_convert_image * out,unsigned int out_top,unsigned int out_height)698 static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row,
699 struct ipu_image_convert_image *in,
700 unsigned int in_top, unsigned int in_height,
701 struct ipu_image_convert_image *out,
702 unsigned int out_top, unsigned int out_height)
703 {
704 unsigned int col, tile_idx;
705 struct ipu_image_tile *in_tile, *out_tile;
706
707 for (col = 0; col < in->num_cols; col++) {
708 tile_idx = in->num_cols * row + col;
709 in_tile = &in->tile[tile_idx];
710 out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
711
712 in_tile->top = in_top;
713 in_tile->height = in_height;
714
715 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
716 out_tile->left = out_top;
717 out_tile->width = out_height;
718 } else {
719 out_tile->top = out_top;
720 out_tile->height = out_height;
721 }
722 }
723 }
724
725 /*
726 * Find the best horizontal and vertical seam positions to split into tiles.
727 * Minimize the fractional part of the input sampling position for the
728 * top / left pixels of each tile.
729 */
find_seams(struct ipu_image_convert_ctx * ctx,struct ipu_image_convert_image * in,struct ipu_image_convert_image * out)730 static void find_seams(struct ipu_image_convert_ctx *ctx,
731 struct ipu_image_convert_image *in,
732 struct ipu_image_convert_image *out)
733 {
734 struct device *dev = ctx->chan->priv->ipu->dev;
735 unsigned int resized_width = out->base.rect.width;
736 unsigned int resized_height = out->base.rect.height;
737 unsigned int col;
738 unsigned int row;
739 unsigned int in_left_align = tile_left_align(in->fmt);
740 unsigned int in_top_align = tile_top_align(in->fmt);
741 unsigned int out_left_align = tile_left_align(out->fmt);
742 unsigned int out_top_align = tile_top_align(out->fmt);
743 unsigned int out_width_align = tile_width_align(out->type, out->fmt,
744 ctx->rot_mode);
745 unsigned int out_height_align = tile_height_align(out->type, out->fmt,
746 ctx->rot_mode);
747 unsigned int in_right = in->base.rect.width;
748 unsigned int in_bottom = in->base.rect.height;
749 unsigned int out_right = out->base.rect.width;
750 unsigned int out_bottom = out->base.rect.height;
751 unsigned int flipped_out_left;
752 unsigned int flipped_out_top;
753
754 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
755 /* Switch width/height and align top left to IRT block size */
756 resized_width = out->base.rect.height;
757 resized_height = out->base.rect.width;
758 out_left_align = out_height_align;
759 out_top_align = out_width_align;
760 out_width_align = out_left_align;
761 out_height_align = out_top_align;
762 out_right = out->base.rect.height;
763 out_bottom = out->base.rect.width;
764 }
765
766 for (col = in->num_cols - 1; col > 0; col--) {
767 bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) ||
768 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
769 bool allow_out_overshoot = (col < in->num_cols - 1) &&
770 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
771 unsigned int in_left;
772 unsigned int out_left;
773
774 /*
775 * Align input width to burst length if the scaling step flips
776 * horizontally.
777 */
778
779 find_best_seam(ctx, col,
780 in_right, out_right,
781 in_left_align, out_left_align,
782 allow_in_overshoot ? 1 : 8 /* burst length */,
783 allow_out_overshoot ? 1 : out_width_align,
784 ctx->downsize_coeff_h, ctx->image_resize_coeff_h,
785 &in_left, &out_left);
786
787 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
788 flipped_out_left = resized_width - out_right;
789 else
790 flipped_out_left = out_left;
791
792 fill_tile_column(ctx, col, in, in_left, in_right - in_left,
793 out, flipped_out_left, out_right - out_left);
794
795 dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col,
796 in_left, in_right - in_left,
797 flipped_out_left, out_right - out_left);
798
799 in_right = in_left;
800 out_right = out_left;
801 }
802
803 flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ?
804 resized_width - out_right : 0;
805
806 fill_tile_column(ctx, 0, in, 0, in_right,
807 out, flipped_out_left, out_right);
808
809 dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__,
810 in_right, flipped_out_left, out_right);
811
812 for (row = in->num_rows - 1; row > 0; row--) {
813 bool allow_overshoot = row < in->num_rows - 1;
814 unsigned int in_top;
815 unsigned int out_top;
816
817 find_best_seam(ctx, row,
818 in_bottom, out_bottom,
819 in_top_align, out_top_align,
820 1, allow_overshoot ? 1 : out_height_align,
821 ctx->downsize_coeff_v, ctx->image_resize_coeff_v,
822 &in_top, &out_top);
823
824 if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
825 ipu_rot_mode_is_irt(ctx->rot_mode))
826 flipped_out_top = resized_height - out_bottom;
827 else
828 flipped_out_top = out_top;
829
830 fill_tile_row(ctx, row, in, in_top, in_bottom - in_top,
831 out, flipped_out_top, out_bottom - out_top);
832
833 dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row,
834 in_top, in_bottom - in_top,
835 flipped_out_top, out_bottom - out_top);
836
837 in_bottom = in_top;
838 out_bottom = out_top;
839 }
840
841 if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
842 ipu_rot_mode_is_irt(ctx->rot_mode))
843 flipped_out_top = resized_height - out_bottom;
844 else
845 flipped_out_top = 0;
846
847 fill_tile_row(ctx, 0, in, 0, in_bottom,
848 out, flipped_out_top, out_bottom);
849
850 dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__,
851 in_bottom, flipped_out_top, out_bottom);
852 }
853
calc_tile_dimensions(struct ipu_image_convert_ctx * ctx,struct ipu_image_convert_image * image)854 static int calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
855 struct ipu_image_convert_image *image)
856 {
857 struct ipu_image_convert_chan *chan = ctx->chan;
858 struct ipu_image_convert_priv *priv = chan->priv;
859 unsigned int max_width = 1024;
860 unsigned int max_height = 1024;
861 unsigned int i;
862
863 if (image->type == IMAGE_CONVERT_IN) {
864 /* Up to 4096x4096 input tile size */
865 max_width <<= ctx->downsize_coeff_h;
866 max_height <<= ctx->downsize_coeff_v;
867 }
868
869 for (i = 0; i < ctx->num_tiles; i++) {
870 struct ipu_image_tile *tile;
871 const unsigned int row = i / image->num_cols;
872 const unsigned int col = i % image->num_cols;
873
874 if (image->type == IMAGE_CONVERT_OUT)
875 tile = &image->tile[ctx->out_tile_map[i]];
876 else
877 tile = &image->tile[i];
878
879 tile->size = ((tile->height * image->fmt->bpp) >> 3) *
880 tile->width;
881
882 if (image->fmt->planar) {
883 tile->stride = tile->width;
884 tile->rot_stride = tile->height;
885 } else {
886 tile->stride =
887 (image->fmt->bpp * tile->width) >> 3;
888 tile->rot_stride =
889 (image->fmt->bpp * tile->height) >> 3;
890 }
891
892 dev_dbg(priv->ipu->dev,
893 "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n",
894 chan->ic_task, ctx,
895 image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
896 row, col,
897 tile->width, tile->height, tile->left, tile->top);
898
899 if (!tile->width || tile->width > max_width ||
900 !tile->height || tile->height > max_height) {
901 dev_err(priv->ipu->dev, "invalid %s tile size: %ux%u\n",
902 image->type == IMAGE_CONVERT_IN ? "input" :
903 "output", tile->width, tile->height);
904 return -EINVAL;
905 }
906 }
907
908 return 0;
909 }
910
911 /*
912 * Use the rotation transformation to find the tile coordinates
913 * (row, col) of a tile in the destination frame that corresponds
914 * to the given tile coordinates of a source frame. The destination
915 * coordinate is then converted to a tile index.
916 */
transform_tile_index(struct ipu_image_convert_ctx * ctx,int src_row,int src_col)917 static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
918 int src_row, int src_col)
919 {
920 struct ipu_image_convert_chan *chan = ctx->chan;
921 struct ipu_image_convert_priv *priv = chan->priv;
922 struct ipu_image_convert_image *s_image = &ctx->in;
923 struct ipu_image_convert_image *d_image = &ctx->out;
924 int dst_row, dst_col;
925
926 /* with no rotation it's a 1:1 mapping */
927 if (ctx->rot_mode == IPU_ROTATE_NONE)
928 return src_row * s_image->num_cols + src_col;
929
930 /*
931 * before doing the transform, first we have to translate
932 * source row,col for an origin in the center of s_image
933 */
934 src_row = src_row * 2 - (s_image->num_rows - 1);
935 src_col = src_col * 2 - (s_image->num_cols - 1);
936
937 /* do the rotation transform */
938 if (ctx->rot_mode & IPU_ROT_BIT_90) {
939 dst_col = -src_row;
940 dst_row = src_col;
941 } else {
942 dst_col = src_col;
943 dst_row = src_row;
944 }
945
946 /* apply flip */
947 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
948 dst_col = -dst_col;
949 if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
950 dst_row = -dst_row;
951
952 dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
953 chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
954
955 /*
956 * finally translate dest row,col using an origin in upper
957 * left of d_image
958 */
959 dst_row += d_image->num_rows - 1;
960 dst_col += d_image->num_cols - 1;
961 dst_row /= 2;
962 dst_col /= 2;
963
964 return dst_row * d_image->num_cols + dst_col;
965 }
966
967 /*
968 * Fill the out_tile_map[] with transformed destination tile indeces.
969 */
calc_out_tile_map(struct ipu_image_convert_ctx * ctx)970 static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
971 {
972 struct ipu_image_convert_image *s_image = &ctx->in;
973 unsigned int row, col, tile = 0;
974
975 for (row = 0; row < s_image->num_rows; row++) {
976 for (col = 0; col < s_image->num_cols; col++) {
977 ctx->out_tile_map[tile] =
978 transform_tile_index(ctx, row, col);
979 tile++;
980 }
981 }
982 }
983
calc_tile_offsets_planar(struct ipu_image_convert_ctx * ctx,struct ipu_image_convert_image * image)984 static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
985 struct ipu_image_convert_image *image)
986 {
987 struct ipu_image_convert_chan *chan = ctx->chan;
988 struct ipu_image_convert_priv *priv = chan->priv;
989 const struct ipu_image_pixfmt *fmt = image->fmt;
990 unsigned int row, col, tile = 0;
991 u32 H, top, y_stride, uv_stride;
992 u32 uv_row_off, uv_col_off, uv_off, u_off, v_off;
993 u32 y_row_off, y_col_off, y_off;
994 u32 y_size, uv_size;
995
996 /* setup some convenience vars */
997 H = image->base.pix.height;
998
999 y_stride = image->stride;
1000 uv_stride = y_stride / fmt->uv_width_dec;
1001 if (fmt->uv_packed)
1002 uv_stride *= 2;
1003
1004 y_size = H * y_stride;
1005 uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
1006
1007 for (row = 0; row < image->num_rows; row++) {
1008 top = image->tile[tile].top;
1009 y_row_off = top * y_stride;
1010 uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
1011
1012 for (col = 0; col < image->num_cols; col++) {
1013 y_col_off = image->tile[tile].left;
1014 uv_col_off = y_col_off / fmt->uv_width_dec;
1015 if (fmt->uv_packed)
1016 uv_col_off *= 2;
1017
1018 y_off = y_row_off + y_col_off;
1019 uv_off = uv_row_off + uv_col_off;
1020
1021 u_off = y_size - y_off + uv_off;
1022 v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
1023 if (fmt->uv_swapped)
1024 swap(u_off, v_off);
1025
1026 image->tile[tile].offset = y_off;
1027 image->tile[tile].u_off = u_off;
1028 image->tile[tile++].v_off = v_off;
1029
1030 if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
1031 dev_err(priv->ipu->dev,
1032 "task %u: ctx %p: %s@[%d,%d]: "
1033 "y_off %08x, u_off %08x, v_off %08x\n",
1034 chan->ic_task, ctx,
1035 image->type == IMAGE_CONVERT_IN ?
1036 "Input" : "Output", row, col,
1037 y_off, u_off, v_off);
1038 return -EINVAL;
1039 }
1040 }
1041 }
1042
1043 return 0;
1044 }
1045
calc_tile_offsets_packed(struct ipu_image_convert_ctx * ctx,struct ipu_image_convert_image * image)1046 static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
1047 struct ipu_image_convert_image *image)
1048 {
1049 struct ipu_image_convert_chan *chan = ctx->chan;
1050 struct ipu_image_convert_priv *priv = chan->priv;
1051 const struct ipu_image_pixfmt *fmt = image->fmt;
1052 unsigned int row, col, tile = 0;
1053 u32 bpp, stride, offset;
1054 u32 row_off, col_off;
1055
1056 /* setup some convenience vars */
1057 stride = image->stride;
1058 bpp = fmt->bpp;
1059
1060 for (row = 0; row < image->num_rows; row++) {
1061 row_off = image->tile[tile].top * stride;
1062
1063 for (col = 0; col < image->num_cols; col++) {
1064 col_off = (image->tile[tile].left * bpp) >> 3;
1065
1066 offset = row_off + col_off;
1067
1068 image->tile[tile].offset = offset;
1069 image->tile[tile].u_off = 0;
1070 image->tile[tile++].v_off = 0;
1071
1072 if (offset & 0x7) {
1073 dev_err(priv->ipu->dev,
1074 "task %u: ctx %p: %s@[%d,%d]: "
1075 "phys %08x\n",
1076 chan->ic_task, ctx,
1077 image->type == IMAGE_CONVERT_IN ?
1078 "Input" : "Output", row, col,
1079 row_off + col_off);
1080 return -EINVAL;
1081 }
1082 }
1083 }
1084
1085 return 0;
1086 }
1087
calc_tile_offsets(struct ipu_image_convert_ctx * ctx,struct ipu_image_convert_image * image)1088 static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
1089 struct ipu_image_convert_image *image)
1090 {
1091 if (image->fmt->planar)
1092 return calc_tile_offsets_planar(ctx, image);
1093
1094 return calc_tile_offsets_packed(ctx, image);
1095 }
1096
1097 /*
1098 * Calculate the resizing ratio for the IC main processing section given input
1099 * size, fixed downsizing coefficient, and output size.
1100 * Either round to closest for the next tile's first pixel to minimize seams
1101 * and distortion (for all but right column / bottom row), or round down to
1102 * avoid sampling beyond the edges of the input image for this tile's last
1103 * pixel.
1104 * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
1105 */
calc_resize_coeff(u32 input_size,u32 downsize_coeff,u32 output_size,bool allow_overshoot)1106 static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
1107 u32 output_size, bool allow_overshoot)
1108 {
1109 u32 downsized = input_size >> downsize_coeff;
1110
1111 if (allow_overshoot)
1112 return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
1113 else
1114 return 8192 * (downsized - 1) / (output_size - 1);
1115 }
1116
1117 /*
1118 * Slightly modify resize coefficients per tile to hide the bilinear
1119 * interpolator reset at tile borders, shifting the right / bottom edge
1120 * by up to a half input pixel. This removes noticeable seams between
1121 * tiles at higher upscaling factors.
1122 */
calc_tile_resize_coefficients(struct ipu_image_convert_ctx * ctx)1123 static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
1124 {
1125 struct ipu_image_convert_chan *chan = ctx->chan;
1126 struct ipu_image_convert_priv *priv = chan->priv;
1127 struct ipu_image_tile *in_tile, *out_tile;
1128 unsigned int col, row, tile_idx;
1129 unsigned int last_output;
1130
1131 for (col = 0; col < ctx->in.num_cols; col++) {
1132 bool closest = (col < ctx->in.num_cols - 1) &&
1133 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
1134 u32 resized_width;
1135 u32 resize_coeff_h;
1136 u32 in_width;
1137
1138 tile_idx = col;
1139 in_tile = &ctx->in.tile[tile_idx];
1140 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
1141
1142 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1143 resized_width = out_tile->height;
1144 else
1145 resized_width = out_tile->width;
1146
1147 resize_coeff_h = calc_resize_coeff(in_tile->width,
1148 ctx->downsize_coeff_h,
1149 resized_width, closest);
1150
1151 dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
1152 __func__, col, resize_coeff_h);
1153
1154 /*
1155 * With the horizontal scaling factor known, round up resized
1156 * width (output width or height) to burst size.
1157 */
1158 resized_width = round_up(resized_width, 8);
1159
1160 /*
1161 * Calculate input width from the last accessed input pixel
1162 * given resized width and scaling coefficients. Round up to
1163 * burst size.
1164 */
1165 last_output = resized_width - 1;
1166 if (closest && ((last_output * resize_coeff_h) % 8192))
1167 last_output++;
1168 in_width = round_up(
1169 (DIV_ROUND_UP(last_output * resize_coeff_h, 8192) + 1)
1170 << ctx->downsize_coeff_h, 8);
1171
1172 for (row = 0; row < ctx->in.num_rows; row++) {
1173 tile_idx = row * ctx->in.num_cols + col;
1174 in_tile = &ctx->in.tile[tile_idx];
1175 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
1176
1177 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1178 out_tile->height = resized_width;
1179 else
1180 out_tile->width = resized_width;
1181
1182 in_tile->width = in_width;
1183 }
1184
1185 ctx->resize_coeffs_h[col] = resize_coeff_h;
1186 }
1187
1188 for (row = 0; row < ctx->in.num_rows; row++) {
1189 bool closest = (row < ctx->in.num_rows - 1) &&
1190 !(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
1191 u32 resized_height;
1192 u32 resize_coeff_v;
1193 u32 in_height;
1194
1195 tile_idx = row * ctx->in.num_cols;
1196 in_tile = &ctx->in.tile[tile_idx];
1197 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
1198
1199 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1200 resized_height = out_tile->width;
1201 else
1202 resized_height = out_tile->height;
1203
1204 resize_coeff_v = calc_resize_coeff(in_tile->height,
1205 ctx->downsize_coeff_v,
1206 resized_height, closest);
1207
1208 dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
1209 __func__, row, resize_coeff_v);
1210
1211 /*
1212 * With the vertical scaling factor known, round up resized
1213 * height (output width or height) to IDMAC limitations.
1214 */
1215 resized_height = round_up(resized_height, 2);
1216
1217 /*
1218 * Calculate input width from the last accessed input pixel
1219 * given resized height and scaling coefficients. Align to
1220 * IDMAC restrictions.
1221 */
1222 last_output = resized_height - 1;
1223 if (closest && ((last_output * resize_coeff_v) % 8192))
1224 last_output++;
1225 in_height = round_up(
1226 (DIV_ROUND_UP(last_output * resize_coeff_v, 8192) + 1)
1227 << ctx->downsize_coeff_v, 2);
1228
1229 for (col = 0; col < ctx->in.num_cols; col++) {
1230 tile_idx = row * ctx->in.num_cols + col;
1231 in_tile = &ctx->in.tile[tile_idx];
1232 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
1233
1234 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1235 out_tile->width = resized_height;
1236 else
1237 out_tile->height = resized_height;
1238
1239 in_tile->height = in_height;
1240 }
1241
1242 ctx->resize_coeffs_v[row] = resize_coeff_v;
1243 }
1244 }
1245
1246 /*
1247 * return the number of runs in given queue (pending_q or done_q)
1248 * for this context. hold irqlock when calling.
1249 */
get_run_count(struct ipu_image_convert_ctx * ctx,struct list_head * q)1250 static int get_run_count(struct ipu_image_convert_ctx *ctx,
1251 struct list_head *q)
1252 {
1253 struct ipu_image_convert_run *run;
1254 int count = 0;
1255
1256 lockdep_assert_held(&ctx->chan->irqlock);
1257
1258 list_for_each_entry(run, q, list) {
1259 if (run->ctx == ctx)
1260 count++;
1261 }
1262
1263 return count;
1264 }
1265
convert_stop(struct ipu_image_convert_run * run)1266 static void convert_stop(struct ipu_image_convert_run *run)
1267 {
1268 struct ipu_image_convert_ctx *ctx = run->ctx;
1269 struct ipu_image_convert_chan *chan = ctx->chan;
1270 struct ipu_image_convert_priv *priv = chan->priv;
1271
1272 dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
1273 __func__, chan->ic_task, ctx, run);
1274
1275 /* disable IC tasks and the channels */
1276 ipu_ic_task_disable(chan->ic);
1277 ipu_idmac_disable_channel(chan->in_chan);
1278 ipu_idmac_disable_channel(chan->out_chan);
1279
1280 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1281 ipu_idmac_disable_channel(chan->rotation_in_chan);
1282 ipu_idmac_disable_channel(chan->rotation_out_chan);
1283 ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
1284 }
1285
1286 ipu_ic_disable(chan->ic);
1287 }
1288
init_idmac_channel(struct ipu_image_convert_ctx * ctx,struct ipuv3_channel * channel,struct ipu_image_convert_image * image,enum ipu_rotate_mode rot_mode,bool rot_swap_width_height,unsigned int tile)1289 static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
1290 struct ipuv3_channel *channel,
1291 struct ipu_image_convert_image *image,
1292 enum ipu_rotate_mode rot_mode,
1293 bool rot_swap_width_height,
1294 unsigned int tile)
1295 {
1296 struct ipu_image_convert_chan *chan = ctx->chan;
1297 unsigned int burst_size;
1298 u32 width, height, stride;
1299 dma_addr_t addr0, addr1 = 0;
1300 struct ipu_image tile_image;
1301 unsigned int tile_idx[2];
1302
1303 if (image->type == IMAGE_CONVERT_OUT) {
1304 tile_idx[0] = ctx->out_tile_map[tile];
1305 tile_idx[1] = ctx->out_tile_map[1];
1306 } else {
1307 tile_idx[0] = tile;
1308 tile_idx[1] = 1;
1309 }
1310
1311 if (rot_swap_width_height) {
1312 width = image->tile[tile_idx[0]].height;
1313 height = image->tile[tile_idx[0]].width;
1314 stride = image->tile[tile_idx[0]].rot_stride;
1315 addr0 = ctx->rot_intermediate[0].phys;
1316 if (ctx->double_buffering)
1317 addr1 = ctx->rot_intermediate[1].phys;
1318 } else {
1319 width = image->tile[tile_idx[0]].width;
1320 height = image->tile[tile_idx[0]].height;
1321 stride = image->stride;
1322 addr0 = image->base.phys0 +
1323 image->tile[tile_idx[0]].offset;
1324 if (ctx->double_buffering)
1325 addr1 = image->base.phys0 +
1326 image->tile[tile_idx[1]].offset;
1327 }
1328
1329 ipu_cpmem_zero(channel);
1330
1331 memset(&tile_image, 0, sizeof(tile_image));
1332 tile_image.pix.width = tile_image.rect.width = width;
1333 tile_image.pix.height = tile_image.rect.height = height;
1334 tile_image.pix.bytesperline = stride;
1335 tile_image.pix.pixelformat = image->fmt->fourcc;
1336 tile_image.phys0 = addr0;
1337 tile_image.phys1 = addr1;
1338 if (image->fmt->planar && !rot_swap_width_height) {
1339 tile_image.u_offset = image->tile[tile_idx[0]].u_off;
1340 tile_image.v_offset = image->tile[tile_idx[0]].v_off;
1341 }
1342
1343 ipu_cpmem_set_image(channel, &tile_image);
1344
1345 if (rot_mode)
1346 ipu_cpmem_set_rotation(channel, rot_mode);
1347
1348 /*
1349 * Skip writing U and V components to odd rows in the output
1350 * channels for planar 4:2:0.
1351 */
1352 if ((channel == chan->out_chan ||
1353 channel == chan->rotation_out_chan) &&
1354 image->fmt->planar && image->fmt->uv_height_dec == 2)
1355 ipu_cpmem_skip_odd_chroma_rows(channel);
1356
1357 if (channel == chan->rotation_in_chan ||
1358 channel == chan->rotation_out_chan) {
1359 burst_size = 8;
1360 ipu_cpmem_set_block_mode(channel);
1361 } else
1362 burst_size = (width % 16) ? 8 : 16;
1363
1364 ipu_cpmem_set_burstsize(channel, burst_size);
1365
1366 ipu_ic_task_idma_init(chan->ic, channel, width, height,
1367 burst_size, rot_mode);
1368
1369 /*
1370 * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
1371 * only do this when there is no PRG present.
1372 */
1373 if (!channel->ipu->prg_priv)
1374 ipu_cpmem_set_axi_id(channel, 1);
1375
1376 ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
1377 }
1378
convert_start(struct ipu_image_convert_run * run,unsigned int tile)1379 static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
1380 {
1381 struct ipu_image_convert_ctx *ctx = run->ctx;
1382 struct ipu_image_convert_chan *chan = ctx->chan;
1383 struct ipu_image_convert_priv *priv = chan->priv;
1384 struct ipu_image_convert_image *s_image = &ctx->in;
1385 struct ipu_image_convert_image *d_image = &ctx->out;
1386 unsigned int dst_tile = ctx->out_tile_map[tile];
1387 unsigned int dest_width, dest_height;
1388 unsigned int col, row;
1389 u32 rsc;
1390 int ret;
1391
1392 dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
1393 __func__, chan->ic_task, ctx, run, tile, dst_tile);
1394
1395 /* clear EOF irq mask */
1396 ctx->eof_mask = 0;
1397
1398 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1399 /* swap width/height for resizer */
1400 dest_width = d_image->tile[dst_tile].height;
1401 dest_height = d_image->tile[dst_tile].width;
1402 } else {
1403 dest_width = d_image->tile[dst_tile].width;
1404 dest_height = d_image->tile[dst_tile].height;
1405 }
1406
1407 row = tile / s_image->num_cols;
1408 col = tile % s_image->num_cols;
1409
1410 rsc = (ctx->downsize_coeff_v << 30) |
1411 (ctx->resize_coeffs_v[row] << 16) |
1412 (ctx->downsize_coeff_h << 14) |
1413 (ctx->resize_coeffs_h[col]);
1414
1415 dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
1416 __func__, s_image->tile[tile].width,
1417 s_image->tile[tile].height, dest_width, dest_height, rsc);
1418
1419 /* setup the IC resizer and CSC */
1420 ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc,
1421 s_image->tile[tile].width,
1422 s_image->tile[tile].height,
1423 dest_width,
1424 dest_height,
1425 rsc);
1426 if (ret) {
1427 dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
1428 return ret;
1429 }
1430
1431 /* init the source MEM-->IC PP IDMAC channel */
1432 init_idmac_channel(ctx, chan->in_chan, s_image,
1433 IPU_ROTATE_NONE, false, tile);
1434
1435 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1436 /* init the IC PP-->MEM IDMAC channel */
1437 init_idmac_channel(ctx, chan->out_chan, d_image,
1438 IPU_ROTATE_NONE, true, tile);
1439
1440 /* init the MEM-->IC PP ROT IDMAC channel */
1441 init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
1442 ctx->rot_mode, true, tile);
1443
1444 /* init the destination IC PP ROT-->MEM IDMAC channel */
1445 init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
1446 IPU_ROTATE_NONE, false, tile);
1447
1448 /* now link IC PP-->MEM to MEM-->IC PP ROT */
1449 ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
1450 } else {
1451 /* init the destination IC PP-->MEM IDMAC channel */
1452 init_idmac_channel(ctx, chan->out_chan, d_image,
1453 ctx->rot_mode, false, tile);
1454 }
1455
1456 /* enable the IC */
1457 ipu_ic_enable(chan->ic);
1458
1459 /* set buffers ready */
1460 ipu_idmac_select_buffer(chan->in_chan, 0);
1461 ipu_idmac_select_buffer(chan->out_chan, 0);
1462 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1463 ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
1464 if (ctx->double_buffering) {
1465 ipu_idmac_select_buffer(chan->in_chan, 1);
1466 ipu_idmac_select_buffer(chan->out_chan, 1);
1467 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1468 ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
1469 }
1470
1471 /* enable the channels! */
1472 ipu_idmac_enable_channel(chan->in_chan);
1473 ipu_idmac_enable_channel(chan->out_chan);
1474 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1475 ipu_idmac_enable_channel(chan->rotation_in_chan);
1476 ipu_idmac_enable_channel(chan->rotation_out_chan);
1477 }
1478
1479 ipu_ic_task_enable(chan->ic);
1480
1481 ipu_cpmem_dump(chan->in_chan);
1482 ipu_cpmem_dump(chan->out_chan);
1483 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1484 ipu_cpmem_dump(chan->rotation_in_chan);
1485 ipu_cpmem_dump(chan->rotation_out_chan);
1486 }
1487
1488 ipu_dump(priv->ipu);
1489
1490 return 0;
1491 }
1492
1493 /* hold irqlock when calling */
do_run(struct ipu_image_convert_run * run)1494 static int do_run(struct ipu_image_convert_run *run)
1495 {
1496 struct ipu_image_convert_ctx *ctx = run->ctx;
1497 struct ipu_image_convert_chan *chan = ctx->chan;
1498
1499 lockdep_assert_held(&chan->irqlock);
1500
1501 ctx->in.base.phys0 = run->in_phys;
1502 ctx->out.base.phys0 = run->out_phys;
1503
1504 ctx->cur_buf_num = 0;
1505 ctx->next_tile = 1;
1506
1507 /* remove run from pending_q and set as current */
1508 list_del(&run->list);
1509 chan->current_run = run;
1510
1511 return convert_start(run, 0);
1512 }
1513
1514 /* hold irqlock when calling */
run_next(struct ipu_image_convert_chan * chan)1515 static void run_next(struct ipu_image_convert_chan *chan)
1516 {
1517 struct ipu_image_convert_priv *priv = chan->priv;
1518 struct ipu_image_convert_run *run, *tmp;
1519 int ret;
1520
1521 lockdep_assert_held(&chan->irqlock);
1522
1523 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
1524 /* skip contexts that are aborting */
1525 if (run->ctx->aborting) {
1526 dev_dbg(priv->ipu->dev,
1527 "%s: task %u: skipping aborting ctx %p run %p\n",
1528 __func__, chan->ic_task, run->ctx, run);
1529 continue;
1530 }
1531
1532 ret = do_run(run);
1533 if (!ret)
1534 break;
1535
1536 /*
1537 * something went wrong with start, add the run
1538 * to done q and continue to the next run in the
1539 * pending q.
1540 */
1541 run->status = ret;
1542 list_add_tail(&run->list, &chan->done_q);
1543 chan->current_run = NULL;
1544 }
1545 }
1546
empty_done_q(struct ipu_image_convert_chan * chan)1547 static void empty_done_q(struct ipu_image_convert_chan *chan)
1548 {
1549 struct ipu_image_convert_priv *priv = chan->priv;
1550 struct ipu_image_convert_run *run;
1551 unsigned long flags;
1552
1553 spin_lock_irqsave(&chan->irqlock, flags);
1554
1555 while (!list_empty(&chan->done_q)) {
1556 run = list_entry(chan->done_q.next,
1557 struct ipu_image_convert_run,
1558 list);
1559
1560 list_del(&run->list);
1561
1562 dev_dbg(priv->ipu->dev,
1563 "%s: task %u: completing ctx %p run %p with %d\n",
1564 __func__, chan->ic_task, run->ctx, run, run->status);
1565
1566 /* call the completion callback and free the run */
1567 spin_unlock_irqrestore(&chan->irqlock, flags);
1568 run->ctx->complete(run, run->ctx->complete_context);
1569 spin_lock_irqsave(&chan->irqlock, flags);
1570 }
1571
1572 spin_unlock_irqrestore(&chan->irqlock, flags);
1573 }
1574
1575 /*
1576 * the bottom half thread clears out the done_q, calling the
1577 * completion handler for each.
1578 */
do_bh(int irq,void * dev_id)1579 static irqreturn_t do_bh(int irq, void *dev_id)
1580 {
1581 struct ipu_image_convert_chan *chan = dev_id;
1582 struct ipu_image_convert_priv *priv = chan->priv;
1583 struct ipu_image_convert_ctx *ctx;
1584 unsigned long flags;
1585
1586 dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
1587 chan->ic_task);
1588
1589 empty_done_q(chan);
1590
1591 spin_lock_irqsave(&chan->irqlock, flags);
1592
1593 /*
1594 * the done_q is cleared out, signal any contexts
1595 * that are aborting that abort can complete.
1596 */
1597 list_for_each_entry(ctx, &chan->ctx_list, list) {
1598 if (ctx->aborting) {
1599 dev_dbg(priv->ipu->dev,
1600 "%s: task %u: signaling abort for ctx %p\n",
1601 __func__, chan->ic_task, ctx);
1602 complete_all(&ctx->aborted);
1603 }
1604 }
1605
1606 spin_unlock_irqrestore(&chan->irqlock, flags);
1607
1608 dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
1609 chan->ic_task);
1610
1611 return IRQ_HANDLED;
1612 }
1613
ic_settings_changed(struct ipu_image_convert_ctx * ctx)1614 static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
1615 {
1616 unsigned int cur_tile = ctx->next_tile - 1;
1617 unsigned int next_tile = ctx->next_tile;
1618
1619 if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
1620 ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
1621 ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
1622 ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
1623 ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
1624 ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
1625 ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
1626 ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
1627 return true;
1628
1629 return false;
1630 }
1631
1632 /* hold irqlock when calling */
do_tile_complete(struct ipu_image_convert_run * run)1633 static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run)
1634 {
1635 struct ipu_image_convert_ctx *ctx = run->ctx;
1636 struct ipu_image_convert_chan *chan = ctx->chan;
1637 struct ipu_image_tile *src_tile, *dst_tile;
1638 struct ipu_image_convert_image *s_image = &ctx->in;
1639 struct ipu_image_convert_image *d_image = &ctx->out;
1640 struct ipuv3_channel *outch;
1641 unsigned int dst_idx;
1642
1643 lockdep_assert_held(&chan->irqlock);
1644
1645 outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
1646 chan->rotation_out_chan : chan->out_chan;
1647
1648 /*
1649 * It is difficult to stop the channel DMA before the channels
1650 * enter the paused state. Without double-buffering the channels
1651 * are always in a paused state when the EOF irq occurs, so it
1652 * is safe to stop the channels now. For double-buffering we
1653 * just ignore the abort until the operation completes, when it
1654 * is safe to shut down.
1655 */
1656 if (ctx->aborting && !ctx->double_buffering) {
1657 convert_stop(run);
1658 run->status = -EIO;
1659 goto done;
1660 }
1661
1662 if (ctx->next_tile == ctx->num_tiles) {
1663 /*
1664 * the conversion is complete
1665 */
1666 convert_stop(run);
1667 run->status = 0;
1668 goto done;
1669 }
1670
1671 /*
1672 * not done, place the next tile buffers.
1673 */
1674 if (!ctx->double_buffering) {
1675 if (ic_settings_changed(ctx)) {
1676 convert_stop(run);
1677 convert_start(run, ctx->next_tile);
1678 } else {
1679 src_tile = &s_image->tile[ctx->next_tile];
1680 dst_idx = ctx->out_tile_map[ctx->next_tile];
1681 dst_tile = &d_image->tile[dst_idx];
1682
1683 ipu_cpmem_set_buffer(chan->in_chan, 0,
1684 s_image->base.phys0 +
1685 src_tile->offset);
1686 ipu_cpmem_set_buffer(outch, 0,
1687 d_image->base.phys0 +
1688 dst_tile->offset);
1689 if (s_image->fmt->planar)
1690 ipu_cpmem_set_uv_offset(chan->in_chan,
1691 src_tile->u_off,
1692 src_tile->v_off);
1693 if (d_image->fmt->planar)
1694 ipu_cpmem_set_uv_offset(outch,
1695 dst_tile->u_off,
1696 dst_tile->v_off);
1697
1698 ipu_idmac_select_buffer(chan->in_chan, 0);
1699 ipu_idmac_select_buffer(outch, 0);
1700 }
1701 } else if (ctx->next_tile < ctx->num_tiles - 1) {
1702
1703 src_tile = &s_image->tile[ctx->next_tile + 1];
1704 dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
1705 dst_tile = &d_image->tile[dst_idx];
1706
1707 ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
1708 s_image->base.phys0 + src_tile->offset);
1709 ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
1710 d_image->base.phys0 + dst_tile->offset);
1711
1712 ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
1713 ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
1714
1715 ctx->cur_buf_num ^= 1;
1716 }
1717
1718 ctx->eof_mask = 0; /* clear EOF irq mask for next tile */
1719 ctx->next_tile++;
1720 return IRQ_HANDLED;
1721 done:
1722 list_add_tail(&run->list, &chan->done_q);
1723 chan->current_run = NULL;
1724 run_next(chan);
1725 return IRQ_WAKE_THREAD;
1726 }
1727
eof_irq(int irq,void * data)1728 static irqreturn_t eof_irq(int irq, void *data)
1729 {
1730 struct ipu_image_convert_chan *chan = data;
1731 struct ipu_image_convert_priv *priv = chan->priv;
1732 struct ipu_image_convert_ctx *ctx;
1733 struct ipu_image_convert_run *run;
1734 irqreturn_t ret = IRQ_HANDLED;
1735 bool tile_complete = false;
1736 unsigned long flags;
1737
1738 spin_lock_irqsave(&chan->irqlock, flags);
1739
1740 /* get current run and its context */
1741 run = chan->current_run;
1742 if (!run) {
1743 ret = IRQ_NONE;
1744 goto out;
1745 }
1746
1747 ctx = run->ctx;
1748
1749 if (irq == chan->in_eof_irq) {
1750 ctx->eof_mask |= EOF_IRQ_IN;
1751 } else if (irq == chan->out_eof_irq) {
1752 ctx->eof_mask |= EOF_IRQ_OUT;
1753 } else if (irq == chan->rot_in_eof_irq ||
1754 irq == chan->rot_out_eof_irq) {
1755 if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
1756 /* this was NOT a rotation op, shouldn't happen */
1757 dev_err(priv->ipu->dev,
1758 "Unexpected rotation interrupt\n");
1759 goto out;
1760 }
1761 ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ?
1762 EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT;
1763 } else {
1764 dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq);
1765 ret = IRQ_NONE;
1766 goto out;
1767 }
1768
1769 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1770 tile_complete = (ctx->eof_mask == EOF_IRQ_ROT_COMPLETE);
1771 else
1772 tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE);
1773
1774 if (tile_complete)
1775 ret = do_tile_complete(run);
1776 out:
1777 spin_unlock_irqrestore(&chan->irqlock, flags);
1778 return ret;
1779 }
1780
1781 /*
1782 * try to force the completion of runs for this ctx. Called when
1783 * abort wait times out in ipu_image_convert_abort().
1784 */
force_abort(struct ipu_image_convert_ctx * ctx)1785 static void force_abort(struct ipu_image_convert_ctx *ctx)
1786 {
1787 struct ipu_image_convert_chan *chan = ctx->chan;
1788 struct ipu_image_convert_run *run;
1789 unsigned long flags;
1790
1791 spin_lock_irqsave(&chan->irqlock, flags);
1792
1793 run = chan->current_run;
1794 if (run && run->ctx == ctx) {
1795 convert_stop(run);
1796 run->status = -EIO;
1797 list_add_tail(&run->list, &chan->done_q);
1798 chan->current_run = NULL;
1799 run_next(chan);
1800 }
1801
1802 spin_unlock_irqrestore(&chan->irqlock, flags);
1803
1804 empty_done_q(chan);
1805 }
1806
release_ipu_resources(struct ipu_image_convert_chan * chan)1807 static void release_ipu_resources(struct ipu_image_convert_chan *chan)
1808 {
1809 if (chan->in_eof_irq >= 0)
1810 free_irq(chan->in_eof_irq, chan);
1811 if (chan->rot_in_eof_irq >= 0)
1812 free_irq(chan->rot_in_eof_irq, chan);
1813 if (chan->out_eof_irq >= 0)
1814 free_irq(chan->out_eof_irq, chan);
1815 if (chan->rot_out_eof_irq >= 0)
1816 free_irq(chan->rot_out_eof_irq, chan);
1817
1818 if (!IS_ERR_OR_NULL(chan->in_chan))
1819 ipu_idmac_put(chan->in_chan);
1820 if (!IS_ERR_OR_NULL(chan->out_chan))
1821 ipu_idmac_put(chan->out_chan);
1822 if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
1823 ipu_idmac_put(chan->rotation_in_chan);
1824 if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
1825 ipu_idmac_put(chan->rotation_out_chan);
1826 if (!IS_ERR_OR_NULL(chan->ic))
1827 ipu_ic_put(chan->ic);
1828
1829 chan->in_chan = chan->out_chan = chan->rotation_in_chan =
1830 chan->rotation_out_chan = NULL;
1831 chan->in_eof_irq = -1;
1832 chan->rot_in_eof_irq = -1;
1833 chan->out_eof_irq = -1;
1834 chan->rot_out_eof_irq = -1;
1835 }
1836
get_eof_irq(struct ipu_image_convert_chan * chan,struct ipuv3_channel * channel)1837 static int get_eof_irq(struct ipu_image_convert_chan *chan,
1838 struct ipuv3_channel *channel)
1839 {
1840 struct ipu_image_convert_priv *priv = chan->priv;
1841 int ret, irq;
1842
1843 irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF);
1844
1845 ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan);
1846 if (ret < 0) {
1847 dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq);
1848 return ret;
1849 }
1850
1851 return irq;
1852 }
1853
get_ipu_resources(struct ipu_image_convert_chan * chan)1854 static int get_ipu_resources(struct ipu_image_convert_chan *chan)
1855 {
1856 const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
1857 struct ipu_image_convert_priv *priv = chan->priv;
1858 int ret;
1859
1860 /* get IC */
1861 chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
1862 if (IS_ERR(chan->ic)) {
1863 dev_err(priv->ipu->dev, "could not acquire IC\n");
1864 ret = PTR_ERR(chan->ic);
1865 goto err;
1866 }
1867
1868 /* get IDMAC channels */
1869 chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
1870 chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
1871 if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
1872 dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
1873 ret = -EBUSY;
1874 goto err;
1875 }
1876
1877 chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
1878 chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
1879 if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
1880 dev_err(priv->ipu->dev,
1881 "could not acquire idmac rotation channels\n");
1882 ret = -EBUSY;
1883 goto err;
1884 }
1885
1886 /* acquire the EOF interrupts */
1887 ret = get_eof_irq(chan, chan->in_chan);
1888 if (ret < 0) {
1889 chan->in_eof_irq = -1;
1890 goto err;
1891 }
1892 chan->in_eof_irq = ret;
1893
1894 ret = get_eof_irq(chan, chan->rotation_in_chan);
1895 if (ret < 0) {
1896 chan->rot_in_eof_irq = -1;
1897 goto err;
1898 }
1899 chan->rot_in_eof_irq = ret;
1900
1901 ret = get_eof_irq(chan, chan->out_chan);
1902 if (ret < 0) {
1903 chan->out_eof_irq = -1;
1904 goto err;
1905 }
1906 chan->out_eof_irq = ret;
1907
1908 ret = get_eof_irq(chan, chan->rotation_out_chan);
1909 if (ret < 0) {
1910 chan->rot_out_eof_irq = -1;
1911 goto err;
1912 }
1913 chan->rot_out_eof_irq = ret;
1914
1915 return 0;
1916 err:
1917 release_ipu_resources(chan);
1918 return ret;
1919 }
1920
fill_image(struct ipu_image_convert_ctx * ctx,struct ipu_image_convert_image * ic_image,struct ipu_image * image,enum ipu_image_convert_type type)1921 static int fill_image(struct ipu_image_convert_ctx *ctx,
1922 struct ipu_image_convert_image *ic_image,
1923 struct ipu_image *image,
1924 enum ipu_image_convert_type type)
1925 {
1926 struct ipu_image_convert_priv *priv = ctx->chan->priv;
1927
1928 ic_image->base = *image;
1929 ic_image->type = type;
1930
1931 ic_image->fmt = get_format(image->pix.pixelformat);
1932 if (!ic_image->fmt) {
1933 dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
1934 type == IMAGE_CONVERT_OUT ? "Output" : "Input");
1935 return -EINVAL;
1936 }
1937
1938 if (ic_image->fmt->planar)
1939 ic_image->stride = ic_image->base.pix.width;
1940 else
1941 ic_image->stride = ic_image->base.pix.bytesperline;
1942
1943 return 0;
1944 }
1945
1946 /* borrowed from drivers/media/v4l2-core/v4l2-common.c */
clamp_align(unsigned int x,unsigned int min,unsigned int max,unsigned int align)1947 static unsigned int clamp_align(unsigned int x, unsigned int min,
1948 unsigned int max, unsigned int align)
1949 {
1950 /* Bits that must be zero to be aligned */
1951 unsigned int mask = ~((1 << align) - 1);
1952
1953 /* Clamp to aligned min and max */
1954 x = clamp(x, (min + ~mask) & mask, max & mask);
1955
1956 /* Round to nearest aligned value */
1957 if (align)
1958 x = (x + (1 << (align - 1))) & mask;
1959
1960 return x;
1961 }
1962
1963 /* Adjusts input/output images to IPU restrictions */
ipu_image_convert_adjust(struct ipu_image * in,struct ipu_image * out,enum ipu_rotate_mode rot_mode)1964 void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
1965 enum ipu_rotate_mode rot_mode)
1966 {
1967 const struct ipu_image_pixfmt *infmt, *outfmt;
1968 u32 w_align_out, h_align_out;
1969 u32 w_align_in, h_align_in;
1970
1971 infmt = get_format(in->pix.pixelformat);
1972 outfmt = get_format(out->pix.pixelformat);
1973
1974 /* set some default pixel formats if needed */
1975 if (!infmt) {
1976 in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1977 infmt = get_format(V4L2_PIX_FMT_RGB24);
1978 }
1979 if (!outfmt) {
1980 out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1981 outfmt = get_format(V4L2_PIX_FMT_RGB24);
1982 }
1983
1984 /* image converter does not handle fields */
1985 in->pix.field = out->pix.field = V4L2_FIELD_NONE;
1986
1987 /* resizer cannot downsize more than 4:1 */
1988 if (ipu_rot_mode_is_irt(rot_mode)) {
1989 out->pix.height = max_t(__u32, out->pix.height,
1990 in->pix.width / 4);
1991 out->pix.width = max_t(__u32, out->pix.width,
1992 in->pix.height / 4);
1993 } else {
1994 out->pix.width = max_t(__u32, out->pix.width,
1995 in->pix.width / 4);
1996 out->pix.height = max_t(__u32, out->pix.height,
1997 in->pix.height / 4);
1998 }
1999
2000 /* align input width/height */
2001 w_align_in = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt,
2002 rot_mode));
2003 h_align_in = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt,
2004 rot_mode));
2005 in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W,
2006 w_align_in);
2007 in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H,
2008 h_align_in);
2009
2010 /* align output width/height */
2011 w_align_out = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt,
2012 rot_mode));
2013 h_align_out = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt,
2014 rot_mode));
2015 out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W,
2016 w_align_out);
2017 out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H,
2018 h_align_out);
2019
2020 /* set input/output strides and image sizes */
2021 in->pix.bytesperline = infmt->planar ?
2022 clamp_align(in->pix.width, 2 << w_align_in, MAX_W,
2023 w_align_in) :
2024 clamp_align((in->pix.width * infmt->bpp) >> 3,
2025 ((2 << w_align_in) * infmt->bpp) >> 3,
2026 (MAX_W * infmt->bpp) >> 3,
2027 w_align_in);
2028 in->pix.sizeimage = infmt->planar ?
2029 (in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 :
2030 in->pix.height * in->pix.bytesperline;
2031 out->pix.bytesperline = outfmt->planar ? out->pix.width :
2032 (out->pix.width * outfmt->bpp) >> 3;
2033 out->pix.sizeimage = outfmt->planar ?
2034 (out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 :
2035 out->pix.height * out->pix.bytesperline;
2036 }
2037 EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
2038
2039 /*
2040 * this is used by ipu_image_convert_prepare() to verify set input and
2041 * output images are valid before starting the conversion. Clients can
2042 * also call it before calling ipu_image_convert_prepare().
2043 */
ipu_image_convert_verify(struct ipu_image * in,struct ipu_image * out,enum ipu_rotate_mode rot_mode)2044 int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
2045 enum ipu_rotate_mode rot_mode)
2046 {
2047 struct ipu_image testin, testout;
2048
2049 testin = *in;
2050 testout = *out;
2051
2052 ipu_image_convert_adjust(&testin, &testout, rot_mode);
2053
2054 if (testin.pix.width != in->pix.width ||
2055 testin.pix.height != in->pix.height ||
2056 testout.pix.width != out->pix.width ||
2057 testout.pix.height != out->pix.height)
2058 return -EINVAL;
2059
2060 return 0;
2061 }
2062 EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
2063
2064 /*
2065 * Call ipu_image_convert_prepare() to prepare for the conversion of
2066 * given images and rotation mode. Returns a new conversion context.
2067 */
2068 struct ipu_image_convert_ctx *
ipu_image_convert_prepare(struct ipu_soc * ipu,enum ipu_ic_task ic_task,struct ipu_image * in,struct ipu_image * out,enum ipu_rotate_mode rot_mode,ipu_image_convert_cb_t complete,void * complete_context)2069 ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
2070 struct ipu_image *in, struct ipu_image *out,
2071 enum ipu_rotate_mode rot_mode,
2072 ipu_image_convert_cb_t complete,
2073 void *complete_context)
2074 {
2075 struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
2076 struct ipu_image_convert_image *s_image, *d_image;
2077 struct ipu_image_convert_chan *chan;
2078 struct ipu_image_convert_ctx *ctx;
2079 unsigned long flags;
2080 unsigned int i;
2081 bool get_res;
2082 int ret;
2083
2084 if (!in || !out || !complete ||
2085 (ic_task != IC_TASK_VIEWFINDER &&
2086 ic_task != IC_TASK_POST_PROCESSOR))
2087 return ERR_PTR(-EINVAL);
2088
2089 /* verify the in/out images before continuing */
2090 ret = ipu_image_convert_verify(in, out, rot_mode);
2091 if (ret) {
2092 dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
2093 __func__);
2094 return ERR_PTR(ret);
2095 }
2096
2097 chan = &priv->chan[ic_task];
2098
2099 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2100 if (!ctx)
2101 return ERR_PTR(-ENOMEM);
2102
2103 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
2104 chan->ic_task, ctx);
2105
2106 ctx->chan = chan;
2107 init_completion(&ctx->aborted);
2108
2109 ctx->rot_mode = rot_mode;
2110
2111 /* Sets ctx->in.num_rows/cols as well */
2112 ret = calc_image_resize_coefficients(ctx, in, out);
2113 if (ret)
2114 goto out_free;
2115
2116 s_image = &ctx->in;
2117 d_image = &ctx->out;
2118
2119 /* set tiling and rotation */
2120 if (ipu_rot_mode_is_irt(rot_mode)) {
2121 d_image->num_rows = s_image->num_cols;
2122 d_image->num_cols = s_image->num_rows;
2123 } else {
2124 d_image->num_rows = s_image->num_rows;
2125 d_image->num_cols = s_image->num_cols;
2126 }
2127
2128 ctx->num_tiles = d_image->num_cols * d_image->num_rows;
2129
2130 ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
2131 if (ret)
2132 goto out_free;
2133 ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
2134 if (ret)
2135 goto out_free;
2136
2137 calc_out_tile_map(ctx);
2138
2139 find_seams(ctx, s_image, d_image);
2140
2141 ret = calc_tile_dimensions(ctx, s_image);
2142 if (ret)
2143 goto out_free;
2144
2145 ret = calc_tile_offsets(ctx, s_image);
2146 if (ret)
2147 goto out_free;
2148
2149 calc_tile_dimensions(ctx, d_image);
2150 ret = calc_tile_offsets(ctx, d_image);
2151 if (ret)
2152 goto out_free;
2153
2154 calc_tile_resize_coefficients(ctx);
2155
2156 ret = ipu_ic_calc_csc(&ctx->csc,
2157 s_image->base.pix.ycbcr_enc,
2158 s_image->base.pix.quantization,
2159 ipu_pixelformat_to_colorspace(s_image->fmt->fourcc),
2160 d_image->base.pix.ycbcr_enc,
2161 d_image->base.pix.quantization,
2162 ipu_pixelformat_to_colorspace(d_image->fmt->fourcc));
2163 if (ret)
2164 goto out_free;
2165
2166 dump_format(ctx, s_image);
2167 dump_format(ctx, d_image);
2168
2169 ctx->complete = complete;
2170 ctx->complete_context = complete_context;
2171
2172 /*
2173 * Can we use double-buffering for this operation? If there is
2174 * only one tile (the whole image can be converted in a single
2175 * operation) there's no point in using double-buffering. Also,
2176 * the IPU's IDMAC channels allow only a single U and V plane
2177 * offset shared between both buffers, but these offsets change
2178 * for every tile, and therefore would have to be updated for
2179 * each buffer which is not possible. So double-buffering is
2180 * impossible when either the source or destination images are
2181 * a planar format (YUV420, YUV422P, etc.). Further, differently
2182 * sized tiles or different resizing coefficients per tile
2183 * prevent double-buffering as well.
2184 */
2185 ctx->double_buffering = (ctx->num_tiles > 1 &&
2186 !s_image->fmt->planar &&
2187 !d_image->fmt->planar);
2188 for (i = 1; i < ctx->num_tiles; i++) {
2189 if (ctx->in.tile[i].width != ctx->in.tile[0].width ||
2190 ctx->in.tile[i].height != ctx->in.tile[0].height ||
2191 ctx->out.tile[i].width != ctx->out.tile[0].width ||
2192 ctx->out.tile[i].height != ctx->out.tile[0].height) {
2193 ctx->double_buffering = false;
2194 break;
2195 }
2196 }
2197 for (i = 1; i < ctx->in.num_cols; i++) {
2198 if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) {
2199 ctx->double_buffering = false;
2200 break;
2201 }
2202 }
2203 for (i = 1; i < ctx->in.num_rows; i++) {
2204 if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) {
2205 ctx->double_buffering = false;
2206 break;
2207 }
2208 }
2209
2210 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
2211 unsigned long intermediate_size = d_image->tile[0].size;
2212
2213 for (i = 1; i < ctx->num_tiles; i++) {
2214 if (d_image->tile[i].size > intermediate_size)
2215 intermediate_size = d_image->tile[i].size;
2216 }
2217
2218 ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
2219 intermediate_size);
2220 if (ret)
2221 goto out_free;
2222 if (ctx->double_buffering) {
2223 ret = alloc_dma_buf(priv,
2224 &ctx->rot_intermediate[1],
2225 intermediate_size);
2226 if (ret)
2227 goto out_free_dmabuf0;
2228 }
2229 }
2230
2231 spin_lock_irqsave(&chan->irqlock, flags);
2232
2233 get_res = list_empty(&chan->ctx_list);
2234
2235 list_add_tail(&ctx->list, &chan->ctx_list);
2236
2237 spin_unlock_irqrestore(&chan->irqlock, flags);
2238
2239 if (get_res) {
2240 ret = get_ipu_resources(chan);
2241 if (ret)
2242 goto out_free_dmabuf1;
2243 }
2244
2245 return ctx;
2246
2247 out_free_dmabuf1:
2248 free_dma_buf(priv, &ctx->rot_intermediate[1]);
2249 spin_lock_irqsave(&chan->irqlock, flags);
2250 list_del(&ctx->list);
2251 spin_unlock_irqrestore(&chan->irqlock, flags);
2252 out_free_dmabuf0:
2253 free_dma_buf(priv, &ctx->rot_intermediate[0]);
2254 out_free:
2255 kfree(ctx);
2256 return ERR_PTR(ret);
2257 }
2258 EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
2259
2260 /*
2261 * Carry out a single image conversion run. Only the physaddr's of the input
2262 * and output image buffers are needed. The conversion context must have
2263 * been created previously with ipu_image_convert_prepare().
2264 */
ipu_image_convert_queue(struct ipu_image_convert_run * run)2265 int ipu_image_convert_queue(struct ipu_image_convert_run *run)
2266 {
2267 struct ipu_image_convert_chan *chan;
2268 struct ipu_image_convert_priv *priv;
2269 struct ipu_image_convert_ctx *ctx;
2270 unsigned long flags;
2271 int ret = 0;
2272
2273 if (!run || !run->ctx || !run->in_phys || !run->out_phys)
2274 return -EINVAL;
2275
2276 ctx = run->ctx;
2277 chan = ctx->chan;
2278 priv = chan->priv;
2279
2280 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
2281 chan->ic_task, ctx, run);
2282
2283 INIT_LIST_HEAD(&run->list);
2284
2285 spin_lock_irqsave(&chan->irqlock, flags);
2286
2287 if (ctx->aborting) {
2288 ret = -EIO;
2289 goto unlock;
2290 }
2291
2292 list_add_tail(&run->list, &chan->pending_q);
2293
2294 if (!chan->current_run) {
2295 ret = do_run(run);
2296 if (ret)
2297 chan->current_run = NULL;
2298 }
2299 unlock:
2300 spin_unlock_irqrestore(&chan->irqlock, flags);
2301 return ret;
2302 }
2303 EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
2304
2305 /* Abort any active or pending conversions for this context */
__ipu_image_convert_abort(struct ipu_image_convert_ctx * ctx)2306 static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
2307 {
2308 struct ipu_image_convert_chan *chan = ctx->chan;
2309 struct ipu_image_convert_priv *priv = chan->priv;
2310 struct ipu_image_convert_run *run, *active_run, *tmp;
2311 unsigned long flags;
2312 int run_count, ret;
2313
2314 spin_lock_irqsave(&chan->irqlock, flags);
2315
2316 /* move all remaining pending runs in this context to done_q */
2317 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
2318 if (run->ctx != ctx)
2319 continue;
2320 run->status = -EIO;
2321 list_move_tail(&run->list, &chan->done_q);
2322 }
2323
2324 run_count = get_run_count(ctx, &chan->done_q);
2325 active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
2326 chan->current_run : NULL;
2327
2328 if (active_run)
2329 reinit_completion(&ctx->aborted);
2330
2331 ctx->aborting = true;
2332
2333 spin_unlock_irqrestore(&chan->irqlock, flags);
2334
2335 if (!run_count && !active_run) {
2336 dev_dbg(priv->ipu->dev,
2337 "%s: task %u: no abort needed for ctx %p\n",
2338 __func__, chan->ic_task, ctx);
2339 return;
2340 }
2341
2342 if (!active_run) {
2343 empty_done_q(chan);
2344 return;
2345 }
2346
2347 dev_dbg(priv->ipu->dev,
2348 "%s: task %u: wait for completion: %d runs\n",
2349 __func__, chan->ic_task, run_count);
2350
2351 ret = wait_for_completion_timeout(&ctx->aborted,
2352 msecs_to_jiffies(10000));
2353 if (ret == 0) {
2354 dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
2355 force_abort(ctx);
2356 }
2357 }
2358
ipu_image_convert_abort(struct ipu_image_convert_ctx * ctx)2359 void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
2360 {
2361 __ipu_image_convert_abort(ctx);
2362 ctx->aborting = false;
2363 }
2364 EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
2365
2366 /* Unprepare image conversion context */
ipu_image_convert_unprepare(struct ipu_image_convert_ctx * ctx)2367 void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
2368 {
2369 struct ipu_image_convert_chan *chan = ctx->chan;
2370 struct ipu_image_convert_priv *priv = chan->priv;
2371 unsigned long flags;
2372 bool put_res;
2373
2374 /* make sure no runs are hanging around */
2375 __ipu_image_convert_abort(ctx);
2376
2377 dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
2378 chan->ic_task, ctx);
2379
2380 spin_lock_irqsave(&chan->irqlock, flags);
2381
2382 list_del(&ctx->list);
2383
2384 put_res = list_empty(&chan->ctx_list);
2385
2386 spin_unlock_irqrestore(&chan->irqlock, flags);
2387
2388 if (put_res)
2389 release_ipu_resources(chan);
2390
2391 free_dma_buf(priv, &ctx->rot_intermediate[1]);
2392 free_dma_buf(priv, &ctx->rot_intermediate[0]);
2393
2394 kfree(ctx);
2395 }
2396 EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
2397
2398 /*
2399 * "Canned" asynchronous single image conversion. Allocates and returns
2400 * a new conversion run. On successful return the caller must free the
2401 * run and call ipu_image_convert_unprepare() after conversion completes.
2402 */
2403 struct ipu_image_convert_run *
ipu_image_convert(struct ipu_soc * ipu,enum ipu_ic_task ic_task,struct ipu_image * in,struct ipu_image * out,enum ipu_rotate_mode rot_mode,ipu_image_convert_cb_t complete,void * complete_context)2404 ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
2405 struct ipu_image *in, struct ipu_image *out,
2406 enum ipu_rotate_mode rot_mode,
2407 ipu_image_convert_cb_t complete,
2408 void *complete_context)
2409 {
2410 struct ipu_image_convert_ctx *ctx;
2411 struct ipu_image_convert_run *run;
2412 int ret;
2413
2414 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
2415 complete, complete_context);
2416 if (IS_ERR(ctx))
2417 return ERR_CAST(ctx);
2418
2419 run = kzalloc(sizeof(*run), GFP_KERNEL);
2420 if (!run) {
2421 ipu_image_convert_unprepare(ctx);
2422 return ERR_PTR(-ENOMEM);
2423 }
2424
2425 run->ctx = ctx;
2426 run->in_phys = in->phys0;
2427 run->out_phys = out->phys0;
2428
2429 ret = ipu_image_convert_queue(run);
2430 if (ret) {
2431 ipu_image_convert_unprepare(ctx);
2432 kfree(run);
2433 return ERR_PTR(ret);
2434 }
2435
2436 return run;
2437 }
2438 EXPORT_SYMBOL_GPL(ipu_image_convert);
2439
2440 /* "Canned" synchronous single image conversion */
image_convert_sync_complete(struct ipu_image_convert_run * run,void * data)2441 static void image_convert_sync_complete(struct ipu_image_convert_run *run,
2442 void *data)
2443 {
2444 struct completion *comp = data;
2445
2446 complete(comp);
2447 }
2448
ipu_image_convert_sync(struct ipu_soc * ipu,enum ipu_ic_task ic_task,struct ipu_image * in,struct ipu_image * out,enum ipu_rotate_mode rot_mode)2449 int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
2450 struct ipu_image *in, struct ipu_image *out,
2451 enum ipu_rotate_mode rot_mode)
2452 {
2453 struct ipu_image_convert_run *run;
2454 struct completion comp;
2455 int ret;
2456
2457 init_completion(&comp);
2458
2459 run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
2460 image_convert_sync_complete, &comp);
2461 if (IS_ERR(run))
2462 return PTR_ERR(run);
2463
2464 ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
2465 ret = (ret == 0) ? -ETIMEDOUT : 0;
2466
2467 ipu_image_convert_unprepare(run->ctx);
2468 kfree(run);
2469
2470 return ret;
2471 }
2472 EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
2473
ipu_image_convert_init(struct ipu_soc * ipu,struct device * dev)2474 int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
2475 {
2476 struct ipu_image_convert_priv *priv;
2477 int i;
2478
2479 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
2480 if (!priv)
2481 return -ENOMEM;
2482
2483 ipu->image_convert_priv = priv;
2484 priv->ipu = ipu;
2485
2486 for (i = 0; i < IC_NUM_TASKS; i++) {
2487 struct ipu_image_convert_chan *chan = &priv->chan[i];
2488
2489 chan->ic_task = i;
2490 chan->priv = priv;
2491 chan->dma_ch = &image_convert_dma_chan[i];
2492 chan->in_eof_irq = -1;
2493 chan->rot_in_eof_irq = -1;
2494 chan->out_eof_irq = -1;
2495 chan->rot_out_eof_irq = -1;
2496
2497 spin_lock_init(&chan->irqlock);
2498 INIT_LIST_HEAD(&chan->ctx_list);
2499 INIT_LIST_HEAD(&chan->pending_q);
2500 INIT_LIST_HEAD(&chan->done_q);
2501 }
2502
2503 return 0;
2504 }
2505
ipu_image_convert_exit(struct ipu_soc * ipu)2506 void ipu_image_convert_exit(struct ipu_soc *ipu)
2507 {
2508 }
2509