1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2012-2016 Mentor Graphics Inc. 4 * 5 * Queued image conversion support, with tiling and rotation. 6 */ 7 8 #include <linux/interrupt.h> 9 #include <linux/dma-mapping.h> 10 #include <video/imx-ipu-image-convert.h> 11 #include "ipu-prv.h" 12 13 /* 14 * The IC Resizer has a restriction that the output frame from the 15 * resizer must be 1024 or less in both width (pixels) and height 16 * (lines). 17 * 18 * The image converter attempts to split up a conversion when 19 * the desired output (converted) frame resolution exceeds the 20 * IC resizer limit of 1024 in either dimension. 21 * 22 * If either dimension of the output frame exceeds the limit, the 23 * dimension is split into 1, 2, or 4 equal stripes, for a maximum 24 * of 4*4 or 16 tiles. A conversion is then carried out for each 25 * tile (but taking care to pass the full frame stride length to 26 * the DMA channel's parameter memory!). IDMA double-buffering is used 27 * to convert each tile back-to-back when possible (see note below 28 * when double_buffering boolean is set). 29 * 30 * Note that the input frame must be split up into the same number 31 * of tiles as the output frame: 32 * 33 * +---------+-----+ 34 * +-----+---+ | A | B | 35 * | A | B | | | | 36 * +-----+---+ --> +---------+-----+ 37 * | C | D | | C | D | 38 * +-----+---+ | | | 39 * +---------+-----+ 40 * 41 * Clockwise 90° rotations are handled by first rescaling into a 42 * reusable temporary tile buffer and then rotating with the 8x8 43 * block rotator, writing to the correct destination: 44 * 45 * +-----+-----+ 46 * | | | 47 * +-----+---+ +---------+ | C | A | 48 * | A | B | | A,B, | | | | | 49 * +-----+---+ --> | C,D | | --> | | | 50 * | C | D | +---------+ +-----+-----+ 51 * +-----+---+ | D | B | 52 * | | | 53 * +-----+-----+ 54 * 55 * If the 8x8 block rotator is used, horizontal or vertical flipping 56 * is done during the rotation step, otherwise flipping is done 57 * during the scaling step. 58 * With rotation or flipping, tile order changes between input and 59 * output image. Tiles are numbered row major from top left to bottom 60 * right for both input and output image. 61 */ 62 63 #define MAX_STRIPES_W 4 64 #define MAX_STRIPES_H 4 65 #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H) 66 67 #define MIN_W 16 68 #define MIN_H 8 69 #define MAX_W 4096 70 #define MAX_H 4096 71 72 enum ipu_image_convert_type { 73 IMAGE_CONVERT_IN = 0, 74 IMAGE_CONVERT_OUT, 75 }; 76 77 struct ipu_image_convert_dma_buf { 78 void *virt; 79 dma_addr_t phys; 80 unsigned long len; 81 }; 82 83 struct ipu_image_convert_dma_chan { 84 int in; 85 int out; 86 int rot_in; 87 int rot_out; 88 int vdi_in_p; 89 int vdi_in; 90 int vdi_in_n; 91 }; 92 93 /* dimensions of one tile */ 94 struct ipu_image_tile { 95 u32 width; 96 u32 height; 97 u32 left; 98 u32 top; 99 /* size and strides are in bytes */ 100 u32 size; 101 u32 stride; 102 u32 rot_stride; 103 /* start Y or packed offset of this tile */ 104 u32 offset; 105 /* offset from start to tile in U plane, for planar formats */ 106 u32 u_off; 107 /* offset from start to tile in V plane, for planar formats */ 108 u32 v_off; 109 }; 110 111 struct ipu_image_convert_image { 112 struct ipu_image base; 113 enum ipu_image_convert_type type; 114 115 const struct ipu_image_pixfmt *fmt; 116 unsigned int stride; 117 118 /* # of rows (horizontal stripes) if dest height is > 1024 */ 119 unsigned int num_rows; 120 /* # of columns (vertical stripes) if dest width is > 1024 */ 121 unsigned int num_cols; 122 123 struct ipu_image_tile tile[MAX_TILES]; 124 }; 125 126 struct ipu_image_pixfmt { 127 u32 fourcc; /* V4L2 fourcc */ 128 int bpp; /* total bpp */ 129 int uv_width_dec; /* decimation in width for U/V planes */ 130 int uv_height_dec; /* decimation in height for U/V planes */ 131 bool planar; /* planar format */ 132 bool uv_swapped; /* U and V planes are swapped */ 133 bool uv_packed; /* partial planar (U and V in same plane) */ 134 }; 135 136 struct ipu_image_convert_ctx; 137 struct ipu_image_convert_chan; 138 struct ipu_image_convert_priv; 139 140 enum eof_irq_mask { 141 EOF_IRQ_IN = BIT(0), 142 EOF_IRQ_ROT_IN = BIT(1), 143 EOF_IRQ_OUT = BIT(2), 144 EOF_IRQ_ROT_OUT = BIT(3), 145 }; 146 147 #define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT) 148 #define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT | \ 149 EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT) 150 151 struct ipu_image_convert_ctx { 152 struct ipu_image_convert_chan *chan; 153 154 ipu_image_convert_cb_t complete; 155 void *complete_context; 156 157 /* Source/destination image data and rotation mode */ 158 struct ipu_image_convert_image in; 159 struct ipu_image_convert_image out; 160 struct ipu_ic_csc csc; 161 enum ipu_rotate_mode rot_mode; 162 u32 downsize_coeff_h; 163 u32 downsize_coeff_v; 164 u32 image_resize_coeff_h; 165 u32 image_resize_coeff_v; 166 u32 resize_coeffs_h[MAX_STRIPES_W]; 167 u32 resize_coeffs_v[MAX_STRIPES_H]; 168 169 /* intermediate buffer for rotation */ 170 struct ipu_image_convert_dma_buf rot_intermediate[2]; 171 172 /* current buffer number for double buffering */ 173 int cur_buf_num; 174 175 bool aborting; 176 struct completion aborted; 177 178 /* can we use double-buffering for this conversion operation? */ 179 bool double_buffering; 180 /* num_rows * num_cols */ 181 unsigned int num_tiles; 182 /* next tile to process */ 183 unsigned int next_tile; 184 /* where to place converted tile in dest image */ 185 unsigned int out_tile_map[MAX_TILES]; 186 187 /* mask of completed EOF irqs at every tile conversion */ 188 enum eof_irq_mask eof_mask; 189 190 struct list_head list; 191 }; 192 193 struct ipu_image_convert_chan { 194 struct ipu_image_convert_priv *priv; 195 196 enum ipu_ic_task ic_task; 197 const struct ipu_image_convert_dma_chan *dma_ch; 198 199 struct ipu_ic *ic; 200 struct ipuv3_channel *in_chan; 201 struct ipuv3_channel *out_chan; 202 struct ipuv3_channel *rotation_in_chan; 203 struct ipuv3_channel *rotation_out_chan; 204 205 /* the IPU end-of-frame irqs */ 206 int in_eof_irq; 207 int rot_in_eof_irq; 208 int out_eof_irq; 209 int rot_out_eof_irq; 210 211 spinlock_t irqlock; 212 213 /* list of convert contexts */ 214 struct list_head ctx_list; 215 /* queue of conversion runs */ 216 struct list_head pending_q; 217 /* queue of completed runs */ 218 struct list_head done_q; 219 220 /* the current conversion run */ 221 struct ipu_image_convert_run *current_run; 222 }; 223 224 struct ipu_image_convert_priv { 225 struct ipu_image_convert_chan chan[IC_NUM_TASKS]; 226 struct ipu_soc *ipu; 227 }; 228 229 static const struct ipu_image_convert_dma_chan 230 image_convert_dma_chan[IC_NUM_TASKS] = { 231 [IC_TASK_VIEWFINDER] = { 232 .in = IPUV3_CHANNEL_MEM_IC_PRP_VF, 233 .out = IPUV3_CHANNEL_IC_PRP_VF_MEM, 234 .rot_in = IPUV3_CHANNEL_MEM_ROT_VF, 235 .rot_out = IPUV3_CHANNEL_ROT_VF_MEM, 236 .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV, 237 .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR, 238 .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT, 239 }, 240 [IC_TASK_POST_PROCESSOR] = { 241 .in = IPUV3_CHANNEL_MEM_IC_PP, 242 .out = IPUV3_CHANNEL_IC_PP_MEM, 243 .rot_in = IPUV3_CHANNEL_MEM_ROT_PP, 244 .rot_out = IPUV3_CHANNEL_ROT_PP_MEM, 245 }, 246 }; 247 248 static const struct ipu_image_pixfmt image_convert_formats[] = { 249 { 250 .fourcc = V4L2_PIX_FMT_RGB565, 251 .bpp = 16, 252 }, { 253 .fourcc = V4L2_PIX_FMT_RGB24, 254 .bpp = 24, 255 }, { 256 .fourcc = V4L2_PIX_FMT_BGR24, 257 .bpp = 24, 258 }, { 259 .fourcc = V4L2_PIX_FMT_RGB32, 260 .bpp = 32, 261 }, { 262 .fourcc = V4L2_PIX_FMT_BGR32, 263 .bpp = 32, 264 }, { 265 .fourcc = V4L2_PIX_FMT_XRGB32, 266 .bpp = 32, 267 }, { 268 .fourcc = V4L2_PIX_FMT_XBGR32, 269 .bpp = 32, 270 }, { 271 .fourcc = V4L2_PIX_FMT_BGRX32, 272 .bpp = 32, 273 }, { 274 .fourcc = V4L2_PIX_FMT_RGBX32, 275 .bpp = 32, 276 }, { 277 .fourcc = V4L2_PIX_FMT_YUYV, 278 .bpp = 16, 279 .uv_width_dec = 2, 280 .uv_height_dec = 1, 281 }, { 282 .fourcc = V4L2_PIX_FMT_UYVY, 283 .bpp = 16, 284 .uv_width_dec = 2, 285 .uv_height_dec = 1, 286 }, { 287 .fourcc = V4L2_PIX_FMT_YUV420, 288 .bpp = 12, 289 .planar = true, 290 .uv_width_dec = 2, 291 .uv_height_dec = 2, 292 }, { 293 .fourcc = V4L2_PIX_FMT_YVU420, 294 .bpp = 12, 295 .planar = true, 296 .uv_width_dec = 2, 297 .uv_height_dec = 2, 298 .uv_swapped = true, 299 }, { 300 .fourcc = V4L2_PIX_FMT_NV12, 301 .bpp = 12, 302 .planar = true, 303 .uv_width_dec = 2, 304 .uv_height_dec = 2, 305 .uv_packed = true, 306 }, { 307 .fourcc = V4L2_PIX_FMT_YUV422P, 308 .bpp = 16, 309 .planar = true, 310 .uv_width_dec = 2, 311 .uv_height_dec = 1, 312 }, { 313 .fourcc = V4L2_PIX_FMT_NV16, 314 .bpp = 16, 315 .planar = true, 316 .uv_width_dec = 2, 317 .uv_height_dec = 1, 318 .uv_packed = true, 319 }, 320 }; 321 322 static const struct ipu_image_pixfmt *get_format(u32 fourcc) 323 { 324 const struct ipu_image_pixfmt *ret = NULL; 325 unsigned int i; 326 327 for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) { 328 if (image_convert_formats[i].fourcc == fourcc) { 329 ret = &image_convert_formats[i]; 330 break; 331 } 332 } 333 334 return ret; 335 } 336 337 static void dump_format(struct ipu_image_convert_ctx *ctx, 338 struct ipu_image_convert_image *ic_image) 339 { 340 struct ipu_image_convert_chan *chan = ctx->chan; 341 struct ipu_image_convert_priv *priv = chan->priv; 342 343 dev_dbg(priv->ipu->dev, 344 "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n", 345 chan->ic_task, ctx, 346 ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input", 347 ic_image->base.pix.width, ic_image->base.pix.height, 348 ic_image->num_cols, ic_image->num_rows, 349 ic_image->fmt->fourcc & 0xff, 350 (ic_image->fmt->fourcc >> 8) & 0xff, 351 (ic_image->fmt->fourcc >> 16) & 0xff, 352 (ic_image->fmt->fourcc >> 24) & 0xff); 353 } 354 355 int ipu_image_convert_enum_format(int index, u32 *fourcc) 356 { 357 const struct ipu_image_pixfmt *fmt; 358 359 if (index >= (int)ARRAY_SIZE(image_convert_formats)) 360 return -EINVAL; 361 362 /* Format found */ 363 fmt = &image_convert_formats[index]; 364 *fourcc = fmt->fourcc; 365 return 0; 366 } 367 EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format); 368 369 static void free_dma_buf(struct ipu_image_convert_priv *priv, 370 struct ipu_image_convert_dma_buf *buf) 371 { 372 if (buf->virt) 373 dma_free_coherent(priv->ipu->dev, 374 buf->len, buf->virt, buf->phys); 375 buf->virt = NULL; 376 buf->phys = 0; 377 } 378 379 static int alloc_dma_buf(struct ipu_image_convert_priv *priv, 380 struct ipu_image_convert_dma_buf *buf, 381 int size) 382 { 383 buf->len = PAGE_ALIGN(size); 384 buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys, 385 GFP_DMA | GFP_KERNEL); 386 if (!buf->virt) { 387 dev_err(priv->ipu->dev, "failed to alloc dma buffer\n"); 388 return -ENOMEM; 389 } 390 391 return 0; 392 } 393 394 static inline int num_stripes(int dim) 395 { 396 return (dim - 1) / 1024 + 1; 397 } 398 399 /* 400 * Calculate downsizing coefficients, which are the same for all tiles, 401 * and initial bilinear resizing coefficients, which are used to find the 402 * best seam positions. 403 * Also determine the number of tiles necessary to guarantee that no tile 404 * is larger than 1024 pixels in either dimension at the output and between 405 * IC downsizing and main processing sections. 406 */ 407 static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx, 408 struct ipu_image *in, 409 struct ipu_image *out) 410 { 411 u32 downsized_width = in->rect.width; 412 u32 downsized_height = in->rect.height; 413 u32 downsize_coeff_v = 0; 414 u32 downsize_coeff_h = 0; 415 u32 resized_width = out->rect.width; 416 u32 resized_height = out->rect.height; 417 u32 resize_coeff_h; 418 u32 resize_coeff_v; 419 u32 cols; 420 u32 rows; 421 422 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 423 resized_width = out->rect.height; 424 resized_height = out->rect.width; 425 } 426 427 /* Do not let invalid input lead to an endless loop below */ 428 if (WARN_ON(resized_width == 0 || resized_height == 0)) 429 return -EINVAL; 430 431 while (downsized_width >= resized_width * 2) { 432 downsized_width >>= 1; 433 downsize_coeff_h++; 434 } 435 436 while (downsized_height >= resized_height * 2) { 437 downsized_height >>= 1; 438 downsize_coeff_v++; 439 } 440 441 /* 442 * Calculate the bilinear resizing coefficients that could be used if 443 * we were converting with a single tile. The bottom right output pixel 444 * should sample as close as possible to the bottom right input pixel 445 * out of the decimator, but not overshoot it: 446 */ 447 resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1); 448 resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1); 449 450 /* 451 * Both the output of the IC downsizing section before being passed to 452 * the IC main processing section and the final output of the IC main 453 * processing section must be <= 1024 pixels in both dimensions. 454 */ 455 cols = num_stripes(max_t(u32, downsized_width, resized_width)); 456 rows = num_stripes(max_t(u32, downsized_height, resized_height)); 457 458 dev_dbg(ctx->chan->priv->ipu->dev, 459 "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n", 460 __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v, 461 resize_coeff_v, cols, rows); 462 463 if (downsize_coeff_h > 2 || downsize_coeff_v > 2 || 464 resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff) 465 return -EINVAL; 466 467 ctx->downsize_coeff_h = downsize_coeff_h; 468 ctx->downsize_coeff_v = downsize_coeff_v; 469 ctx->image_resize_coeff_h = resize_coeff_h; 470 ctx->image_resize_coeff_v = resize_coeff_v; 471 ctx->in.num_cols = cols; 472 ctx->in.num_rows = rows; 473 474 return 0; 475 } 476 477 #define round_closest(x, y) round_down((x) + (y)/2, (y)) 478 479 /* 480 * Find the best aligned seam position for the given column / row index. 481 * Rotation and image offsets are out of scope. 482 * 483 * @index: column / row index, used to calculate valid interval 484 * @in_edge: input right / bottom edge 485 * @out_edge: output right / bottom edge 486 * @in_align: input alignment, either horizontal 8-byte line start address 487 * alignment, or pixel alignment due to image format 488 * @out_align: output alignment, either horizontal 8-byte line start address 489 * alignment, or pixel alignment due to image format or rotator 490 * block size 491 * @in_burst: horizontal input burst size in case of horizontal flip 492 * @out_burst: horizontal output burst size or rotator block size 493 * @downsize_coeff: downsizing section coefficient 494 * @resize_coeff: main processing section resizing coefficient 495 * @_in_seam: aligned input seam position return value 496 * @_out_seam: aligned output seam position return value 497 */ 498 static void find_best_seam(struct ipu_image_convert_ctx *ctx, 499 unsigned int index, 500 unsigned int in_edge, 501 unsigned int out_edge, 502 unsigned int in_align, 503 unsigned int out_align, 504 unsigned int in_burst, 505 unsigned int out_burst, 506 unsigned int downsize_coeff, 507 unsigned int resize_coeff, 508 u32 *_in_seam, 509 u32 *_out_seam) 510 { 511 struct device *dev = ctx->chan->priv->ipu->dev; 512 unsigned int out_pos; 513 /* Input / output seam position candidates */ 514 unsigned int out_seam = 0; 515 unsigned int in_seam = 0; 516 unsigned int min_diff = UINT_MAX; 517 unsigned int out_start; 518 unsigned int out_end; 519 unsigned int in_start; 520 unsigned int in_end; 521 522 /* Start within 1024 pixels of the right / bottom edge */ 523 out_start = max_t(int, index * out_align, out_edge - 1024); 524 /* End before having to add more columns to the left / rows above */ 525 out_end = min_t(unsigned int, out_edge, index * 1024 + 1); 526 527 /* 528 * Limit input seam position to make sure that the downsized input tile 529 * to the right or bottom does not exceed 1024 pixels. 530 */ 531 in_start = max_t(int, index * in_align, 532 in_edge - (1024 << downsize_coeff)); 533 in_end = min_t(unsigned int, in_edge, 534 index * (1024 << downsize_coeff) + 1); 535 536 /* 537 * Output tiles must start at a multiple of 8 bytes horizontally and 538 * possibly at an even line horizontally depending on the pixel format. 539 * Only consider output aligned positions for the seam. 540 */ 541 out_start = round_up(out_start, out_align); 542 for (out_pos = out_start; out_pos < out_end; out_pos += out_align) { 543 unsigned int in_pos; 544 unsigned int in_pos_aligned; 545 unsigned int in_pos_rounded; 546 unsigned int abs_diff; 547 548 /* 549 * Tiles in the right row / bottom column may not be allowed to 550 * overshoot horizontally / vertically. out_burst may be the 551 * actual DMA burst size, or the rotator block size. 552 */ 553 if ((out_burst > 1) && (out_edge - out_pos) % out_burst) 554 continue; 555 556 /* 557 * Input sample position, corresponding to out_pos, 19.13 fixed 558 * point. 559 */ 560 in_pos = (out_pos * resize_coeff) << downsize_coeff; 561 /* 562 * The closest input sample position that we could actually 563 * start the input tile at, 19.13 fixed point. 564 */ 565 in_pos_aligned = round_closest(in_pos, 8192U * in_align); 566 /* Convert 19.13 fixed point to integer */ 567 in_pos_rounded = in_pos_aligned / 8192U; 568 569 if (in_pos_rounded < in_start) 570 continue; 571 if (in_pos_rounded >= in_end) 572 break; 573 574 if ((in_burst > 1) && 575 (in_edge - in_pos_rounded) % in_burst) 576 continue; 577 578 if (in_pos < in_pos_aligned) 579 abs_diff = in_pos_aligned - in_pos; 580 else 581 abs_diff = in_pos - in_pos_aligned; 582 583 if (abs_diff < min_diff) { 584 in_seam = in_pos_rounded; 585 out_seam = out_pos; 586 min_diff = abs_diff; 587 } 588 } 589 590 *_out_seam = out_seam; 591 *_in_seam = in_seam; 592 593 dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) in [%u, %u] diff %u.%03u\n", 594 __func__, out_seam, out_align, out_start, out_end, 595 in_seam, in_align, in_start, in_end, min_diff / 8192, 596 DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192)); 597 } 598 599 /* 600 * Tile left edges are required to be aligned to multiples of 8 bytes 601 * by the IDMAC. 602 */ 603 static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt) 604 { 605 if (fmt->planar) 606 return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec; 607 else 608 return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8; 609 } 610 611 /* 612 * Tile top edge alignment is only limited by chroma subsampling. 613 */ 614 static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt) 615 { 616 return fmt->uv_height_dec > 1 ? 2 : 1; 617 } 618 619 static inline u32 tile_width_align(enum ipu_image_convert_type type, 620 const struct ipu_image_pixfmt *fmt, 621 enum ipu_rotate_mode rot_mode) 622 { 623 if (type == IMAGE_CONVERT_IN) { 624 /* 625 * The IC burst reads 8 pixels at a time. Reading beyond the 626 * end of the line is usually acceptable. Those pixels are 627 * ignored, unless the IC has to write the scaled line in 628 * reverse. 629 */ 630 return (!ipu_rot_mode_is_irt(rot_mode) && 631 (rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2; 632 } 633 634 /* 635 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled 636 * formats to guarantee 8-byte aligned line start addresses in the 637 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size 638 * for all other formats. 639 */ 640 return (ipu_rot_mode_is_irt(rot_mode) && 641 fmt->planar && !fmt->uv_packed) ? 642 8 * fmt->uv_width_dec : 8; 643 } 644 645 static inline u32 tile_height_align(enum ipu_image_convert_type type, 646 const struct ipu_image_pixfmt *fmt, 647 enum ipu_rotate_mode rot_mode) 648 { 649 if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode)) 650 return 2; 651 652 /* 653 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled 654 * formats to guarantee 8-byte aligned line start addresses in the 655 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size 656 * for all other formats. 657 */ 658 return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8; 659 } 660 661 /* 662 * Fill in left position and width and for all tiles in an input column, and 663 * for all corresponding output tiles. If the 90° rotator is used, the output 664 * tiles are in a row, and output tile top position and height are set. 665 */ 666 static void fill_tile_column(struct ipu_image_convert_ctx *ctx, 667 unsigned int col, 668 struct ipu_image_convert_image *in, 669 unsigned int in_left, unsigned int in_width, 670 struct ipu_image_convert_image *out, 671 unsigned int out_left, unsigned int out_width) 672 { 673 unsigned int row, tile_idx; 674 struct ipu_image_tile *in_tile, *out_tile; 675 676 for (row = 0; row < in->num_rows; row++) { 677 tile_idx = in->num_cols * row + col; 678 in_tile = &in->tile[tile_idx]; 679 out_tile = &out->tile[ctx->out_tile_map[tile_idx]]; 680 681 in_tile->left = in_left; 682 in_tile->width = in_width; 683 684 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 685 out_tile->top = out_left; 686 out_tile->height = out_width; 687 } else { 688 out_tile->left = out_left; 689 out_tile->width = out_width; 690 } 691 } 692 } 693 694 /* 695 * Fill in top position and height and for all tiles in an input row, and 696 * for all corresponding output tiles. If the 90° rotator is used, the output 697 * tiles are in a column, and output tile left position and width are set. 698 */ 699 static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row, 700 struct ipu_image_convert_image *in, 701 unsigned int in_top, unsigned int in_height, 702 struct ipu_image_convert_image *out, 703 unsigned int out_top, unsigned int out_height) 704 { 705 unsigned int col, tile_idx; 706 struct ipu_image_tile *in_tile, *out_tile; 707 708 for (col = 0; col < in->num_cols; col++) { 709 tile_idx = in->num_cols * row + col; 710 in_tile = &in->tile[tile_idx]; 711 out_tile = &out->tile[ctx->out_tile_map[tile_idx]]; 712 713 in_tile->top = in_top; 714 in_tile->height = in_height; 715 716 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 717 out_tile->left = out_top; 718 out_tile->width = out_height; 719 } else { 720 out_tile->top = out_top; 721 out_tile->height = out_height; 722 } 723 } 724 } 725 726 /* 727 * Find the best horizontal and vertical seam positions to split into tiles. 728 * Minimize the fractional part of the input sampling position for the 729 * top / left pixels of each tile. 730 */ 731 static void find_seams(struct ipu_image_convert_ctx *ctx, 732 struct ipu_image_convert_image *in, 733 struct ipu_image_convert_image *out) 734 { 735 struct device *dev = ctx->chan->priv->ipu->dev; 736 unsigned int resized_width = out->base.rect.width; 737 unsigned int resized_height = out->base.rect.height; 738 unsigned int col; 739 unsigned int row; 740 unsigned int in_left_align = tile_left_align(in->fmt); 741 unsigned int in_top_align = tile_top_align(in->fmt); 742 unsigned int out_left_align = tile_left_align(out->fmt); 743 unsigned int out_top_align = tile_top_align(out->fmt); 744 unsigned int out_width_align = tile_width_align(out->type, out->fmt, 745 ctx->rot_mode); 746 unsigned int out_height_align = tile_height_align(out->type, out->fmt, 747 ctx->rot_mode); 748 unsigned int in_right = in->base.rect.width; 749 unsigned int in_bottom = in->base.rect.height; 750 unsigned int out_right = out->base.rect.width; 751 unsigned int out_bottom = out->base.rect.height; 752 unsigned int flipped_out_left; 753 unsigned int flipped_out_top; 754 755 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 756 /* Switch width/height and align top left to IRT block size */ 757 resized_width = out->base.rect.height; 758 resized_height = out->base.rect.width; 759 out_left_align = out_height_align; 760 out_top_align = out_width_align; 761 out_width_align = out_left_align; 762 out_height_align = out_top_align; 763 out_right = out->base.rect.height; 764 out_bottom = out->base.rect.width; 765 } 766 767 for (col = in->num_cols - 1; col > 0; col--) { 768 bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) || 769 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP); 770 bool allow_out_overshoot = (col < in->num_cols - 1) && 771 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP); 772 unsigned int in_left; 773 unsigned int out_left; 774 775 /* 776 * Align input width to burst length if the scaling step flips 777 * horizontally. 778 */ 779 780 find_best_seam(ctx, col, 781 in_right, out_right, 782 in_left_align, out_left_align, 783 allow_in_overshoot ? 1 : 8 /* burst length */, 784 allow_out_overshoot ? 1 : out_width_align, 785 ctx->downsize_coeff_h, ctx->image_resize_coeff_h, 786 &in_left, &out_left); 787 788 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP) 789 flipped_out_left = resized_width - out_right; 790 else 791 flipped_out_left = out_left; 792 793 fill_tile_column(ctx, col, in, in_left, in_right - in_left, 794 out, flipped_out_left, out_right - out_left); 795 796 dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col, 797 in_left, in_right - in_left, 798 flipped_out_left, out_right - out_left); 799 800 in_right = in_left; 801 out_right = out_left; 802 } 803 804 flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ? 805 resized_width - out_right : 0; 806 807 fill_tile_column(ctx, 0, in, 0, in_right, 808 out, flipped_out_left, out_right); 809 810 dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__, 811 in_right, flipped_out_left, out_right); 812 813 for (row = in->num_rows - 1; row > 0; row--) { 814 bool allow_overshoot = row < in->num_rows - 1; 815 unsigned int in_top; 816 unsigned int out_top; 817 818 find_best_seam(ctx, row, 819 in_bottom, out_bottom, 820 in_top_align, out_top_align, 821 1, allow_overshoot ? 1 : out_height_align, 822 ctx->downsize_coeff_v, ctx->image_resize_coeff_v, 823 &in_top, &out_top); 824 825 if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^ 826 ipu_rot_mode_is_irt(ctx->rot_mode)) 827 flipped_out_top = resized_height - out_bottom; 828 else 829 flipped_out_top = out_top; 830 831 fill_tile_row(ctx, row, in, in_top, in_bottom - in_top, 832 out, flipped_out_top, out_bottom - out_top); 833 834 dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row, 835 in_top, in_bottom - in_top, 836 flipped_out_top, out_bottom - out_top); 837 838 in_bottom = in_top; 839 out_bottom = out_top; 840 } 841 842 if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^ 843 ipu_rot_mode_is_irt(ctx->rot_mode)) 844 flipped_out_top = resized_height - out_bottom; 845 else 846 flipped_out_top = 0; 847 848 fill_tile_row(ctx, 0, in, 0, in_bottom, 849 out, flipped_out_top, out_bottom); 850 851 dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__, 852 in_bottom, flipped_out_top, out_bottom); 853 } 854 855 static int calc_tile_dimensions(struct ipu_image_convert_ctx *ctx, 856 struct ipu_image_convert_image *image) 857 { 858 struct ipu_image_convert_chan *chan = ctx->chan; 859 struct ipu_image_convert_priv *priv = chan->priv; 860 unsigned int max_width = 1024; 861 unsigned int max_height = 1024; 862 unsigned int i; 863 864 if (image->type == IMAGE_CONVERT_IN) { 865 /* Up to 4096x4096 input tile size */ 866 max_width <<= ctx->downsize_coeff_h; 867 max_height <<= ctx->downsize_coeff_v; 868 } 869 870 for (i = 0; i < ctx->num_tiles; i++) { 871 struct ipu_image_tile *tile; 872 const unsigned int row = i / image->num_cols; 873 const unsigned int col = i % image->num_cols; 874 875 if (image->type == IMAGE_CONVERT_OUT) 876 tile = &image->tile[ctx->out_tile_map[i]]; 877 else 878 tile = &image->tile[i]; 879 880 tile->size = ((tile->height * image->fmt->bpp) >> 3) * 881 tile->width; 882 883 if (image->fmt->planar) { 884 tile->stride = tile->width; 885 tile->rot_stride = tile->height; 886 } else { 887 tile->stride = 888 (image->fmt->bpp * tile->width) >> 3; 889 tile->rot_stride = 890 (image->fmt->bpp * tile->height) >> 3; 891 } 892 893 dev_dbg(priv->ipu->dev, 894 "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n", 895 chan->ic_task, ctx, 896 image->type == IMAGE_CONVERT_IN ? "Input" : "Output", 897 row, col, 898 tile->width, tile->height, tile->left, tile->top); 899 900 if (!tile->width || tile->width > max_width || 901 !tile->height || tile->height > max_height) { 902 dev_err(priv->ipu->dev, "invalid %s tile size: %ux%u\n", 903 image->type == IMAGE_CONVERT_IN ? "input" : 904 "output", tile->width, tile->height); 905 return -EINVAL; 906 } 907 } 908 909 return 0; 910 } 911 912 /* 913 * Use the rotation transformation to find the tile coordinates 914 * (row, col) of a tile in the destination frame that corresponds 915 * to the given tile coordinates of a source frame. The destination 916 * coordinate is then converted to a tile index. 917 */ 918 static int transform_tile_index(struct ipu_image_convert_ctx *ctx, 919 int src_row, int src_col) 920 { 921 struct ipu_image_convert_chan *chan = ctx->chan; 922 struct ipu_image_convert_priv *priv = chan->priv; 923 struct ipu_image_convert_image *s_image = &ctx->in; 924 struct ipu_image_convert_image *d_image = &ctx->out; 925 int dst_row, dst_col; 926 927 /* with no rotation it's a 1:1 mapping */ 928 if (ctx->rot_mode == IPU_ROTATE_NONE) 929 return src_row * s_image->num_cols + src_col; 930 931 /* 932 * before doing the transform, first we have to translate 933 * source row,col for an origin in the center of s_image 934 */ 935 src_row = src_row * 2 - (s_image->num_rows - 1); 936 src_col = src_col * 2 - (s_image->num_cols - 1); 937 938 /* do the rotation transform */ 939 if (ctx->rot_mode & IPU_ROT_BIT_90) { 940 dst_col = -src_row; 941 dst_row = src_col; 942 } else { 943 dst_col = src_col; 944 dst_row = src_row; 945 } 946 947 /* apply flip */ 948 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP) 949 dst_col = -dst_col; 950 if (ctx->rot_mode & IPU_ROT_BIT_VFLIP) 951 dst_row = -dst_row; 952 953 dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n", 954 chan->ic_task, ctx, src_col, src_row, dst_col, dst_row); 955 956 /* 957 * finally translate dest row,col using an origin in upper 958 * left of d_image 959 */ 960 dst_row += d_image->num_rows - 1; 961 dst_col += d_image->num_cols - 1; 962 dst_row /= 2; 963 dst_col /= 2; 964 965 return dst_row * d_image->num_cols + dst_col; 966 } 967 968 /* 969 * Fill the out_tile_map[] with transformed destination tile indeces. 970 */ 971 static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx) 972 { 973 struct ipu_image_convert_image *s_image = &ctx->in; 974 unsigned int row, col, tile = 0; 975 976 for (row = 0; row < s_image->num_rows; row++) { 977 for (col = 0; col < s_image->num_cols; col++) { 978 ctx->out_tile_map[tile] = 979 transform_tile_index(ctx, row, col); 980 tile++; 981 } 982 } 983 } 984 985 static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx, 986 struct ipu_image_convert_image *image) 987 { 988 struct ipu_image_convert_chan *chan = ctx->chan; 989 struct ipu_image_convert_priv *priv = chan->priv; 990 const struct ipu_image_pixfmt *fmt = image->fmt; 991 unsigned int row, col, tile = 0; 992 u32 H, top, y_stride, uv_stride; 993 u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp; 994 u32 y_row_off, y_col_off, y_off; 995 u32 y_size, uv_size; 996 997 /* setup some convenience vars */ 998 H = image->base.pix.height; 999 1000 y_stride = image->stride; 1001 uv_stride = y_stride / fmt->uv_width_dec; 1002 if (fmt->uv_packed) 1003 uv_stride *= 2; 1004 1005 y_size = H * y_stride; 1006 uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec); 1007 1008 for (row = 0; row < image->num_rows; row++) { 1009 top = image->tile[tile].top; 1010 y_row_off = top * y_stride; 1011 uv_row_off = (top * uv_stride) / fmt->uv_height_dec; 1012 1013 for (col = 0; col < image->num_cols; col++) { 1014 y_col_off = image->tile[tile].left; 1015 uv_col_off = y_col_off / fmt->uv_width_dec; 1016 if (fmt->uv_packed) 1017 uv_col_off *= 2; 1018 1019 y_off = y_row_off + y_col_off; 1020 uv_off = uv_row_off + uv_col_off; 1021 1022 u_off = y_size - y_off + uv_off; 1023 v_off = (fmt->uv_packed) ? 0 : u_off + uv_size; 1024 if (fmt->uv_swapped) { 1025 tmp = u_off; 1026 u_off = v_off; 1027 v_off = tmp; 1028 } 1029 1030 image->tile[tile].offset = y_off; 1031 image->tile[tile].u_off = u_off; 1032 image->tile[tile++].v_off = v_off; 1033 1034 if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) { 1035 dev_err(priv->ipu->dev, 1036 "task %u: ctx %p: %s@[%d,%d]: " 1037 "y_off %08x, u_off %08x, v_off %08x\n", 1038 chan->ic_task, ctx, 1039 image->type == IMAGE_CONVERT_IN ? 1040 "Input" : "Output", row, col, 1041 y_off, u_off, v_off); 1042 return -EINVAL; 1043 } 1044 } 1045 } 1046 1047 return 0; 1048 } 1049 1050 static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx, 1051 struct ipu_image_convert_image *image) 1052 { 1053 struct ipu_image_convert_chan *chan = ctx->chan; 1054 struct ipu_image_convert_priv *priv = chan->priv; 1055 const struct ipu_image_pixfmt *fmt = image->fmt; 1056 unsigned int row, col, tile = 0; 1057 u32 bpp, stride, offset; 1058 u32 row_off, col_off; 1059 1060 /* setup some convenience vars */ 1061 stride = image->stride; 1062 bpp = fmt->bpp; 1063 1064 for (row = 0; row < image->num_rows; row++) { 1065 row_off = image->tile[tile].top * stride; 1066 1067 for (col = 0; col < image->num_cols; col++) { 1068 col_off = (image->tile[tile].left * bpp) >> 3; 1069 1070 offset = row_off + col_off; 1071 1072 image->tile[tile].offset = offset; 1073 image->tile[tile].u_off = 0; 1074 image->tile[tile++].v_off = 0; 1075 1076 if (offset & 0x7) { 1077 dev_err(priv->ipu->dev, 1078 "task %u: ctx %p: %s@[%d,%d]: " 1079 "phys %08x\n", 1080 chan->ic_task, ctx, 1081 image->type == IMAGE_CONVERT_IN ? 1082 "Input" : "Output", row, col, 1083 row_off + col_off); 1084 return -EINVAL; 1085 } 1086 } 1087 } 1088 1089 return 0; 1090 } 1091 1092 static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx, 1093 struct ipu_image_convert_image *image) 1094 { 1095 if (image->fmt->planar) 1096 return calc_tile_offsets_planar(ctx, image); 1097 1098 return calc_tile_offsets_packed(ctx, image); 1099 } 1100 1101 /* 1102 * Calculate the resizing ratio for the IC main processing section given input 1103 * size, fixed downsizing coefficient, and output size. 1104 * Either round to closest for the next tile's first pixel to minimize seams 1105 * and distortion (for all but right column / bottom row), or round down to 1106 * avoid sampling beyond the edges of the input image for this tile's last 1107 * pixel. 1108 * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff. 1109 */ 1110 static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff, 1111 u32 output_size, bool allow_overshoot) 1112 { 1113 u32 downsized = input_size >> downsize_coeff; 1114 1115 if (allow_overshoot) 1116 return DIV_ROUND_CLOSEST(8192 * downsized, output_size); 1117 else 1118 return 8192 * (downsized - 1) / (output_size - 1); 1119 } 1120 1121 /* 1122 * Slightly modify resize coefficients per tile to hide the bilinear 1123 * interpolator reset at tile borders, shifting the right / bottom edge 1124 * by up to a half input pixel. This removes noticeable seams between 1125 * tiles at higher upscaling factors. 1126 */ 1127 static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx) 1128 { 1129 struct ipu_image_convert_chan *chan = ctx->chan; 1130 struct ipu_image_convert_priv *priv = chan->priv; 1131 struct ipu_image_tile *in_tile, *out_tile; 1132 unsigned int col, row, tile_idx; 1133 unsigned int last_output; 1134 1135 for (col = 0; col < ctx->in.num_cols; col++) { 1136 bool closest = (col < ctx->in.num_cols - 1) && 1137 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP); 1138 u32 resized_width; 1139 u32 resize_coeff_h; 1140 u32 in_width; 1141 1142 tile_idx = col; 1143 in_tile = &ctx->in.tile[tile_idx]; 1144 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1145 1146 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1147 resized_width = out_tile->height; 1148 else 1149 resized_width = out_tile->width; 1150 1151 resize_coeff_h = calc_resize_coeff(in_tile->width, 1152 ctx->downsize_coeff_h, 1153 resized_width, closest); 1154 1155 dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n", 1156 __func__, col, resize_coeff_h); 1157 1158 /* 1159 * With the horizontal scaling factor known, round up resized 1160 * width (output width or height) to burst size. 1161 */ 1162 resized_width = round_up(resized_width, 8); 1163 1164 /* 1165 * Calculate input width from the last accessed input pixel 1166 * given resized width and scaling coefficients. Round up to 1167 * burst size. 1168 */ 1169 last_output = resized_width - 1; 1170 if (closest && ((last_output * resize_coeff_h) % 8192)) 1171 last_output++; 1172 in_width = round_up( 1173 (DIV_ROUND_UP(last_output * resize_coeff_h, 8192) + 1) 1174 << ctx->downsize_coeff_h, 8); 1175 1176 for (row = 0; row < ctx->in.num_rows; row++) { 1177 tile_idx = row * ctx->in.num_cols + col; 1178 in_tile = &ctx->in.tile[tile_idx]; 1179 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1180 1181 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1182 out_tile->height = resized_width; 1183 else 1184 out_tile->width = resized_width; 1185 1186 in_tile->width = in_width; 1187 } 1188 1189 ctx->resize_coeffs_h[col] = resize_coeff_h; 1190 } 1191 1192 for (row = 0; row < ctx->in.num_rows; row++) { 1193 bool closest = (row < ctx->in.num_rows - 1) && 1194 !(ctx->rot_mode & IPU_ROT_BIT_VFLIP); 1195 u32 resized_height; 1196 u32 resize_coeff_v; 1197 u32 in_height; 1198 1199 tile_idx = row * ctx->in.num_cols; 1200 in_tile = &ctx->in.tile[tile_idx]; 1201 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1202 1203 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1204 resized_height = out_tile->width; 1205 else 1206 resized_height = out_tile->height; 1207 1208 resize_coeff_v = calc_resize_coeff(in_tile->height, 1209 ctx->downsize_coeff_v, 1210 resized_height, closest); 1211 1212 dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n", 1213 __func__, row, resize_coeff_v); 1214 1215 /* 1216 * With the vertical scaling factor known, round up resized 1217 * height (output width or height) to IDMAC limitations. 1218 */ 1219 resized_height = round_up(resized_height, 2); 1220 1221 /* 1222 * Calculate input width from the last accessed input pixel 1223 * given resized height and scaling coefficients. Align to 1224 * IDMAC restrictions. 1225 */ 1226 last_output = resized_height - 1; 1227 if (closest && ((last_output * resize_coeff_v) % 8192)) 1228 last_output++; 1229 in_height = round_up( 1230 (DIV_ROUND_UP(last_output * resize_coeff_v, 8192) + 1) 1231 << ctx->downsize_coeff_v, 2); 1232 1233 for (col = 0; col < ctx->in.num_cols; col++) { 1234 tile_idx = row * ctx->in.num_cols + col; 1235 in_tile = &ctx->in.tile[tile_idx]; 1236 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1237 1238 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1239 out_tile->width = resized_height; 1240 else 1241 out_tile->height = resized_height; 1242 1243 in_tile->height = in_height; 1244 } 1245 1246 ctx->resize_coeffs_v[row] = resize_coeff_v; 1247 } 1248 } 1249 1250 /* 1251 * return the number of runs in given queue (pending_q or done_q) 1252 * for this context. hold irqlock when calling. 1253 */ 1254 static int get_run_count(struct ipu_image_convert_ctx *ctx, 1255 struct list_head *q) 1256 { 1257 struct ipu_image_convert_run *run; 1258 int count = 0; 1259 1260 lockdep_assert_held(&ctx->chan->irqlock); 1261 1262 list_for_each_entry(run, q, list) { 1263 if (run->ctx == ctx) 1264 count++; 1265 } 1266 1267 return count; 1268 } 1269 1270 static void convert_stop(struct ipu_image_convert_run *run) 1271 { 1272 struct ipu_image_convert_ctx *ctx = run->ctx; 1273 struct ipu_image_convert_chan *chan = ctx->chan; 1274 struct ipu_image_convert_priv *priv = chan->priv; 1275 1276 dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n", 1277 __func__, chan->ic_task, ctx, run); 1278 1279 /* disable IC tasks and the channels */ 1280 ipu_ic_task_disable(chan->ic); 1281 ipu_idmac_disable_channel(chan->in_chan); 1282 ipu_idmac_disable_channel(chan->out_chan); 1283 1284 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1285 ipu_idmac_disable_channel(chan->rotation_in_chan); 1286 ipu_idmac_disable_channel(chan->rotation_out_chan); 1287 ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan); 1288 } 1289 1290 ipu_ic_disable(chan->ic); 1291 } 1292 1293 static void init_idmac_channel(struct ipu_image_convert_ctx *ctx, 1294 struct ipuv3_channel *channel, 1295 struct ipu_image_convert_image *image, 1296 enum ipu_rotate_mode rot_mode, 1297 bool rot_swap_width_height, 1298 unsigned int tile) 1299 { 1300 struct ipu_image_convert_chan *chan = ctx->chan; 1301 unsigned int burst_size; 1302 u32 width, height, stride; 1303 dma_addr_t addr0, addr1 = 0; 1304 struct ipu_image tile_image; 1305 unsigned int tile_idx[2]; 1306 1307 if (image->type == IMAGE_CONVERT_OUT) { 1308 tile_idx[0] = ctx->out_tile_map[tile]; 1309 tile_idx[1] = ctx->out_tile_map[1]; 1310 } else { 1311 tile_idx[0] = tile; 1312 tile_idx[1] = 1; 1313 } 1314 1315 if (rot_swap_width_height) { 1316 width = image->tile[tile_idx[0]].height; 1317 height = image->tile[tile_idx[0]].width; 1318 stride = image->tile[tile_idx[0]].rot_stride; 1319 addr0 = ctx->rot_intermediate[0].phys; 1320 if (ctx->double_buffering) 1321 addr1 = ctx->rot_intermediate[1].phys; 1322 } else { 1323 width = image->tile[tile_idx[0]].width; 1324 height = image->tile[tile_idx[0]].height; 1325 stride = image->stride; 1326 addr0 = image->base.phys0 + 1327 image->tile[tile_idx[0]].offset; 1328 if (ctx->double_buffering) 1329 addr1 = image->base.phys0 + 1330 image->tile[tile_idx[1]].offset; 1331 } 1332 1333 ipu_cpmem_zero(channel); 1334 1335 memset(&tile_image, 0, sizeof(tile_image)); 1336 tile_image.pix.width = tile_image.rect.width = width; 1337 tile_image.pix.height = tile_image.rect.height = height; 1338 tile_image.pix.bytesperline = stride; 1339 tile_image.pix.pixelformat = image->fmt->fourcc; 1340 tile_image.phys0 = addr0; 1341 tile_image.phys1 = addr1; 1342 if (image->fmt->planar && !rot_swap_width_height) { 1343 tile_image.u_offset = image->tile[tile_idx[0]].u_off; 1344 tile_image.v_offset = image->tile[tile_idx[0]].v_off; 1345 } 1346 1347 ipu_cpmem_set_image(channel, &tile_image); 1348 1349 if (rot_mode) 1350 ipu_cpmem_set_rotation(channel, rot_mode); 1351 1352 /* 1353 * Skip writing U and V components to odd rows in the output 1354 * channels for planar 4:2:0. 1355 */ 1356 if ((channel == chan->out_chan || 1357 channel == chan->rotation_out_chan) && 1358 image->fmt->planar && image->fmt->uv_height_dec == 2) 1359 ipu_cpmem_skip_odd_chroma_rows(channel); 1360 1361 if (channel == chan->rotation_in_chan || 1362 channel == chan->rotation_out_chan) { 1363 burst_size = 8; 1364 ipu_cpmem_set_block_mode(channel); 1365 } else 1366 burst_size = (width % 16) ? 8 : 16; 1367 1368 ipu_cpmem_set_burstsize(channel, burst_size); 1369 1370 ipu_ic_task_idma_init(chan->ic, channel, width, height, 1371 burst_size, rot_mode); 1372 1373 /* 1374 * Setting a non-zero AXI ID collides with the PRG AXI snooping, so 1375 * only do this when there is no PRG present. 1376 */ 1377 if (!channel->ipu->prg_priv) 1378 ipu_cpmem_set_axi_id(channel, 1); 1379 1380 ipu_idmac_set_double_buffer(channel, ctx->double_buffering); 1381 } 1382 1383 static int convert_start(struct ipu_image_convert_run *run, unsigned int tile) 1384 { 1385 struct ipu_image_convert_ctx *ctx = run->ctx; 1386 struct ipu_image_convert_chan *chan = ctx->chan; 1387 struct ipu_image_convert_priv *priv = chan->priv; 1388 struct ipu_image_convert_image *s_image = &ctx->in; 1389 struct ipu_image_convert_image *d_image = &ctx->out; 1390 unsigned int dst_tile = ctx->out_tile_map[tile]; 1391 unsigned int dest_width, dest_height; 1392 unsigned int col, row; 1393 u32 rsc; 1394 int ret; 1395 1396 dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n", 1397 __func__, chan->ic_task, ctx, run, tile, dst_tile); 1398 1399 /* clear EOF irq mask */ 1400 ctx->eof_mask = 0; 1401 1402 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1403 /* swap width/height for resizer */ 1404 dest_width = d_image->tile[dst_tile].height; 1405 dest_height = d_image->tile[dst_tile].width; 1406 } else { 1407 dest_width = d_image->tile[dst_tile].width; 1408 dest_height = d_image->tile[dst_tile].height; 1409 } 1410 1411 row = tile / s_image->num_cols; 1412 col = tile % s_image->num_cols; 1413 1414 rsc = (ctx->downsize_coeff_v << 30) | 1415 (ctx->resize_coeffs_v[row] << 16) | 1416 (ctx->downsize_coeff_h << 14) | 1417 (ctx->resize_coeffs_h[col]); 1418 1419 dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n", 1420 __func__, s_image->tile[tile].width, 1421 s_image->tile[tile].height, dest_width, dest_height, rsc); 1422 1423 /* setup the IC resizer and CSC */ 1424 ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc, 1425 s_image->tile[tile].width, 1426 s_image->tile[tile].height, 1427 dest_width, 1428 dest_height, 1429 rsc); 1430 if (ret) { 1431 dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret); 1432 return ret; 1433 } 1434 1435 /* init the source MEM-->IC PP IDMAC channel */ 1436 init_idmac_channel(ctx, chan->in_chan, s_image, 1437 IPU_ROTATE_NONE, false, tile); 1438 1439 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1440 /* init the IC PP-->MEM IDMAC channel */ 1441 init_idmac_channel(ctx, chan->out_chan, d_image, 1442 IPU_ROTATE_NONE, true, tile); 1443 1444 /* init the MEM-->IC PP ROT IDMAC channel */ 1445 init_idmac_channel(ctx, chan->rotation_in_chan, d_image, 1446 ctx->rot_mode, true, tile); 1447 1448 /* init the destination IC PP ROT-->MEM IDMAC channel */ 1449 init_idmac_channel(ctx, chan->rotation_out_chan, d_image, 1450 IPU_ROTATE_NONE, false, tile); 1451 1452 /* now link IC PP-->MEM to MEM-->IC PP ROT */ 1453 ipu_idmac_link(chan->out_chan, chan->rotation_in_chan); 1454 } else { 1455 /* init the destination IC PP-->MEM IDMAC channel */ 1456 init_idmac_channel(ctx, chan->out_chan, d_image, 1457 ctx->rot_mode, false, tile); 1458 } 1459 1460 /* enable the IC */ 1461 ipu_ic_enable(chan->ic); 1462 1463 /* set buffers ready */ 1464 ipu_idmac_select_buffer(chan->in_chan, 0); 1465 ipu_idmac_select_buffer(chan->out_chan, 0); 1466 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1467 ipu_idmac_select_buffer(chan->rotation_out_chan, 0); 1468 if (ctx->double_buffering) { 1469 ipu_idmac_select_buffer(chan->in_chan, 1); 1470 ipu_idmac_select_buffer(chan->out_chan, 1); 1471 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1472 ipu_idmac_select_buffer(chan->rotation_out_chan, 1); 1473 } 1474 1475 /* enable the channels! */ 1476 ipu_idmac_enable_channel(chan->in_chan); 1477 ipu_idmac_enable_channel(chan->out_chan); 1478 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1479 ipu_idmac_enable_channel(chan->rotation_in_chan); 1480 ipu_idmac_enable_channel(chan->rotation_out_chan); 1481 } 1482 1483 ipu_ic_task_enable(chan->ic); 1484 1485 ipu_cpmem_dump(chan->in_chan); 1486 ipu_cpmem_dump(chan->out_chan); 1487 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1488 ipu_cpmem_dump(chan->rotation_in_chan); 1489 ipu_cpmem_dump(chan->rotation_out_chan); 1490 } 1491 1492 ipu_dump(priv->ipu); 1493 1494 return 0; 1495 } 1496 1497 /* hold irqlock when calling */ 1498 static int do_run(struct ipu_image_convert_run *run) 1499 { 1500 struct ipu_image_convert_ctx *ctx = run->ctx; 1501 struct ipu_image_convert_chan *chan = ctx->chan; 1502 1503 lockdep_assert_held(&chan->irqlock); 1504 1505 ctx->in.base.phys0 = run->in_phys; 1506 ctx->out.base.phys0 = run->out_phys; 1507 1508 ctx->cur_buf_num = 0; 1509 ctx->next_tile = 1; 1510 1511 /* remove run from pending_q and set as current */ 1512 list_del(&run->list); 1513 chan->current_run = run; 1514 1515 return convert_start(run, 0); 1516 } 1517 1518 /* hold irqlock when calling */ 1519 static void run_next(struct ipu_image_convert_chan *chan) 1520 { 1521 struct ipu_image_convert_priv *priv = chan->priv; 1522 struct ipu_image_convert_run *run, *tmp; 1523 int ret; 1524 1525 lockdep_assert_held(&chan->irqlock); 1526 1527 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) { 1528 /* skip contexts that are aborting */ 1529 if (run->ctx->aborting) { 1530 dev_dbg(priv->ipu->dev, 1531 "%s: task %u: skipping aborting ctx %p run %p\n", 1532 __func__, chan->ic_task, run->ctx, run); 1533 continue; 1534 } 1535 1536 ret = do_run(run); 1537 if (!ret) 1538 break; 1539 1540 /* 1541 * something went wrong with start, add the run 1542 * to done q and continue to the next run in the 1543 * pending q. 1544 */ 1545 run->status = ret; 1546 list_add_tail(&run->list, &chan->done_q); 1547 chan->current_run = NULL; 1548 } 1549 } 1550 1551 static void empty_done_q(struct ipu_image_convert_chan *chan) 1552 { 1553 struct ipu_image_convert_priv *priv = chan->priv; 1554 struct ipu_image_convert_run *run; 1555 unsigned long flags; 1556 1557 spin_lock_irqsave(&chan->irqlock, flags); 1558 1559 while (!list_empty(&chan->done_q)) { 1560 run = list_entry(chan->done_q.next, 1561 struct ipu_image_convert_run, 1562 list); 1563 1564 list_del(&run->list); 1565 1566 dev_dbg(priv->ipu->dev, 1567 "%s: task %u: completing ctx %p run %p with %d\n", 1568 __func__, chan->ic_task, run->ctx, run, run->status); 1569 1570 /* call the completion callback and free the run */ 1571 spin_unlock_irqrestore(&chan->irqlock, flags); 1572 run->ctx->complete(run, run->ctx->complete_context); 1573 spin_lock_irqsave(&chan->irqlock, flags); 1574 } 1575 1576 spin_unlock_irqrestore(&chan->irqlock, flags); 1577 } 1578 1579 /* 1580 * the bottom half thread clears out the done_q, calling the 1581 * completion handler for each. 1582 */ 1583 static irqreturn_t do_bh(int irq, void *dev_id) 1584 { 1585 struct ipu_image_convert_chan *chan = dev_id; 1586 struct ipu_image_convert_priv *priv = chan->priv; 1587 struct ipu_image_convert_ctx *ctx; 1588 unsigned long flags; 1589 1590 dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__, 1591 chan->ic_task); 1592 1593 empty_done_q(chan); 1594 1595 spin_lock_irqsave(&chan->irqlock, flags); 1596 1597 /* 1598 * the done_q is cleared out, signal any contexts 1599 * that are aborting that abort can complete. 1600 */ 1601 list_for_each_entry(ctx, &chan->ctx_list, list) { 1602 if (ctx->aborting) { 1603 dev_dbg(priv->ipu->dev, 1604 "%s: task %u: signaling abort for ctx %p\n", 1605 __func__, chan->ic_task, ctx); 1606 complete_all(&ctx->aborted); 1607 } 1608 } 1609 1610 spin_unlock_irqrestore(&chan->irqlock, flags); 1611 1612 dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__, 1613 chan->ic_task); 1614 1615 return IRQ_HANDLED; 1616 } 1617 1618 static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx) 1619 { 1620 unsigned int cur_tile = ctx->next_tile - 1; 1621 unsigned int next_tile = ctx->next_tile; 1622 1623 if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] != 1624 ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] || 1625 ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] != 1626 ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] || 1627 ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width || 1628 ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height || 1629 ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width || 1630 ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height) 1631 return true; 1632 1633 return false; 1634 } 1635 1636 /* hold irqlock when calling */ 1637 static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run) 1638 { 1639 struct ipu_image_convert_ctx *ctx = run->ctx; 1640 struct ipu_image_convert_chan *chan = ctx->chan; 1641 struct ipu_image_tile *src_tile, *dst_tile; 1642 struct ipu_image_convert_image *s_image = &ctx->in; 1643 struct ipu_image_convert_image *d_image = &ctx->out; 1644 struct ipuv3_channel *outch; 1645 unsigned int dst_idx; 1646 1647 lockdep_assert_held(&chan->irqlock); 1648 1649 outch = ipu_rot_mode_is_irt(ctx->rot_mode) ? 1650 chan->rotation_out_chan : chan->out_chan; 1651 1652 /* 1653 * It is difficult to stop the channel DMA before the channels 1654 * enter the paused state. Without double-buffering the channels 1655 * are always in a paused state when the EOF irq occurs, so it 1656 * is safe to stop the channels now. For double-buffering we 1657 * just ignore the abort until the operation completes, when it 1658 * is safe to shut down. 1659 */ 1660 if (ctx->aborting && !ctx->double_buffering) { 1661 convert_stop(run); 1662 run->status = -EIO; 1663 goto done; 1664 } 1665 1666 if (ctx->next_tile == ctx->num_tiles) { 1667 /* 1668 * the conversion is complete 1669 */ 1670 convert_stop(run); 1671 run->status = 0; 1672 goto done; 1673 } 1674 1675 /* 1676 * not done, place the next tile buffers. 1677 */ 1678 if (!ctx->double_buffering) { 1679 if (ic_settings_changed(ctx)) { 1680 convert_stop(run); 1681 convert_start(run, ctx->next_tile); 1682 } else { 1683 src_tile = &s_image->tile[ctx->next_tile]; 1684 dst_idx = ctx->out_tile_map[ctx->next_tile]; 1685 dst_tile = &d_image->tile[dst_idx]; 1686 1687 ipu_cpmem_set_buffer(chan->in_chan, 0, 1688 s_image->base.phys0 + 1689 src_tile->offset); 1690 ipu_cpmem_set_buffer(outch, 0, 1691 d_image->base.phys0 + 1692 dst_tile->offset); 1693 if (s_image->fmt->planar) 1694 ipu_cpmem_set_uv_offset(chan->in_chan, 1695 src_tile->u_off, 1696 src_tile->v_off); 1697 if (d_image->fmt->planar) 1698 ipu_cpmem_set_uv_offset(outch, 1699 dst_tile->u_off, 1700 dst_tile->v_off); 1701 1702 ipu_idmac_select_buffer(chan->in_chan, 0); 1703 ipu_idmac_select_buffer(outch, 0); 1704 } 1705 } else if (ctx->next_tile < ctx->num_tiles - 1) { 1706 1707 src_tile = &s_image->tile[ctx->next_tile + 1]; 1708 dst_idx = ctx->out_tile_map[ctx->next_tile + 1]; 1709 dst_tile = &d_image->tile[dst_idx]; 1710 1711 ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num, 1712 s_image->base.phys0 + src_tile->offset); 1713 ipu_cpmem_set_buffer(outch, ctx->cur_buf_num, 1714 d_image->base.phys0 + dst_tile->offset); 1715 1716 ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num); 1717 ipu_idmac_select_buffer(outch, ctx->cur_buf_num); 1718 1719 ctx->cur_buf_num ^= 1; 1720 } 1721 1722 ctx->eof_mask = 0; /* clear EOF irq mask for next tile */ 1723 ctx->next_tile++; 1724 return IRQ_HANDLED; 1725 done: 1726 list_add_tail(&run->list, &chan->done_q); 1727 chan->current_run = NULL; 1728 run_next(chan); 1729 return IRQ_WAKE_THREAD; 1730 } 1731 1732 static irqreturn_t eof_irq(int irq, void *data) 1733 { 1734 struct ipu_image_convert_chan *chan = data; 1735 struct ipu_image_convert_priv *priv = chan->priv; 1736 struct ipu_image_convert_ctx *ctx; 1737 struct ipu_image_convert_run *run; 1738 irqreturn_t ret = IRQ_HANDLED; 1739 bool tile_complete = false; 1740 unsigned long flags; 1741 1742 spin_lock_irqsave(&chan->irqlock, flags); 1743 1744 /* get current run and its context */ 1745 run = chan->current_run; 1746 if (!run) { 1747 ret = IRQ_NONE; 1748 goto out; 1749 } 1750 1751 ctx = run->ctx; 1752 1753 if (irq == chan->in_eof_irq) { 1754 ctx->eof_mask |= EOF_IRQ_IN; 1755 } else if (irq == chan->out_eof_irq) { 1756 ctx->eof_mask |= EOF_IRQ_OUT; 1757 } else if (irq == chan->rot_in_eof_irq || 1758 irq == chan->rot_out_eof_irq) { 1759 if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { 1760 /* this was NOT a rotation op, shouldn't happen */ 1761 dev_err(priv->ipu->dev, 1762 "Unexpected rotation interrupt\n"); 1763 goto out; 1764 } 1765 ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ? 1766 EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT; 1767 } else { 1768 dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq); 1769 ret = IRQ_NONE; 1770 goto out; 1771 } 1772 1773 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1774 tile_complete = (ctx->eof_mask == EOF_IRQ_ROT_COMPLETE); 1775 else 1776 tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE); 1777 1778 if (tile_complete) 1779 ret = do_tile_complete(run); 1780 out: 1781 spin_unlock_irqrestore(&chan->irqlock, flags); 1782 return ret; 1783 } 1784 1785 /* 1786 * try to force the completion of runs for this ctx. Called when 1787 * abort wait times out in ipu_image_convert_abort(). 1788 */ 1789 static void force_abort(struct ipu_image_convert_ctx *ctx) 1790 { 1791 struct ipu_image_convert_chan *chan = ctx->chan; 1792 struct ipu_image_convert_run *run; 1793 unsigned long flags; 1794 1795 spin_lock_irqsave(&chan->irqlock, flags); 1796 1797 run = chan->current_run; 1798 if (run && run->ctx == ctx) { 1799 convert_stop(run); 1800 run->status = -EIO; 1801 list_add_tail(&run->list, &chan->done_q); 1802 chan->current_run = NULL; 1803 run_next(chan); 1804 } 1805 1806 spin_unlock_irqrestore(&chan->irqlock, flags); 1807 1808 empty_done_q(chan); 1809 } 1810 1811 static void release_ipu_resources(struct ipu_image_convert_chan *chan) 1812 { 1813 if (chan->in_eof_irq >= 0) 1814 free_irq(chan->in_eof_irq, chan); 1815 if (chan->rot_in_eof_irq >= 0) 1816 free_irq(chan->rot_in_eof_irq, chan); 1817 if (chan->out_eof_irq >= 0) 1818 free_irq(chan->out_eof_irq, chan); 1819 if (chan->rot_out_eof_irq >= 0) 1820 free_irq(chan->rot_out_eof_irq, chan); 1821 1822 if (!IS_ERR_OR_NULL(chan->in_chan)) 1823 ipu_idmac_put(chan->in_chan); 1824 if (!IS_ERR_OR_NULL(chan->out_chan)) 1825 ipu_idmac_put(chan->out_chan); 1826 if (!IS_ERR_OR_NULL(chan->rotation_in_chan)) 1827 ipu_idmac_put(chan->rotation_in_chan); 1828 if (!IS_ERR_OR_NULL(chan->rotation_out_chan)) 1829 ipu_idmac_put(chan->rotation_out_chan); 1830 if (!IS_ERR_OR_NULL(chan->ic)) 1831 ipu_ic_put(chan->ic); 1832 1833 chan->in_chan = chan->out_chan = chan->rotation_in_chan = 1834 chan->rotation_out_chan = NULL; 1835 chan->in_eof_irq = -1; 1836 chan->rot_in_eof_irq = -1; 1837 chan->out_eof_irq = -1; 1838 chan->rot_out_eof_irq = -1; 1839 } 1840 1841 static int get_eof_irq(struct ipu_image_convert_chan *chan, 1842 struct ipuv3_channel *channel) 1843 { 1844 struct ipu_image_convert_priv *priv = chan->priv; 1845 int ret, irq; 1846 1847 irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF); 1848 1849 ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan); 1850 if (ret < 0) { 1851 dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq); 1852 return ret; 1853 } 1854 1855 return irq; 1856 } 1857 1858 static int get_ipu_resources(struct ipu_image_convert_chan *chan) 1859 { 1860 const struct ipu_image_convert_dma_chan *dma = chan->dma_ch; 1861 struct ipu_image_convert_priv *priv = chan->priv; 1862 int ret; 1863 1864 /* get IC */ 1865 chan->ic = ipu_ic_get(priv->ipu, chan->ic_task); 1866 if (IS_ERR(chan->ic)) { 1867 dev_err(priv->ipu->dev, "could not acquire IC\n"); 1868 ret = PTR_ERR(chan->ic); 1869 goto err; 1870 } 1871 1872 /* get IDMAC channels */ 1873 chan->in_chan = ipu_idmac_get(priv->ipu, dma->in); 1874 chan->out_chan = ipu_idmac_get(priv->ipu, dma->out); 1875 if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) { 1876 dev_err(priv->ipu->dev, "could not acquire idmac channels\n"); 1877 ret = -EBUSY; 1878 goto err; 1879 } 1880 1881 chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in); 1882 chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out); 1883 if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) { 1884 dev_err(priv->ipu->dev, 1885 "could not acquire idmac rotation channels\n"); 1886 ret = -EBUSY; 1887 goto err; 1888 } 1889 1890 /* acquire the EOF interrupts */ 1891 ret = get_eof_irq(chan, chan->in_chan); 1892 if (ret < 0) { 1893 chan->in_eof_irq = -1; 1894 goto err; 1895 } 1896 chan->in_eof_irq = ret; 1897 1898 ret = get_eof_irq(chan, chan->rotation_in_chan); 1899 if (ret < 0) { 1900 chan->rot_in_eof_irq = -1; 1901 goto err; 1902 } 1903 chan->rot_in_eof_irq = ret; 1904 1905 ret = get_eof_irq(chan, chan->out_chan); 1906 if (ret < 0) { 1907 chan->out_eof_irq = -1; 1908 goto err; 1909 } 1910 chan->out_eof_irq = ret; 1911 1912 ret = get_eof_irq(chan, chan->rotation_out_chan); 1913 if (ret < 0) { 1914 chan->rot_out_eof_irq = -1; 1915 goto err; 1916 } 1917 chan->rot_out_eof_irq = ret; 1918 1919 return 0; 1920 err: 1921 release_ipu_resources(chan); 1922 return ret; 1923 } 1924 1925 static int fill_image(struct ipu_image_convert_ctx *ctx, 1926 struct ipu_image_convert_image *ic_image, 1927 struct ipu_image *image, 1928 enum ipu_image_convert_type type) 1929 { 1930 struct ipu_image_convert_priv *priv = ctx->chan->priv; 1931 1932 ic_image->base = *image; 1933 ic_image->type = type; 1934 1935 ic_image->fmt = get_format(image->pix.pixelformat); 1936 if (!ic_image->fmt) { 1937 dev_err(priv->ipu->dev, "pixelformat not supported for %s\n", 1938 type == IMAGE_CONVERT_OUT ? "Output" : "Input"); 1939 return -EINVAL; 1940 } 1941 1942 if (ic_image->fmt->planar) 1943 ic_image->stride = ic_image->base.pix.width; 1944 else 1945 ic_image->stride = ic_image->base.pix.bytesperline; 1946 1947 return 0; 1948 } 1949 1950 /* borrowed from drivers/media/v4l2-core/v4l2-common.c */ 1951 static unsigned int clamp_align(unsigned int x, unsigned int min, 1952 unsigned int max, unsigned int align) 1953 { 1954 /* Bits that must be zero to be aligned */ 1955 unsigned int mask = ~((1 << align) - 1); 1956 1957 /* Clamp to aligned min and max */ 1958 x = clamp(x, (min + ~mask) & mask, max & mask); 1959 1960 /* Round to nearest aligned value */ 1961 if (align) 1962 x = (x + (1 << (align - 1))) & mask; 1963 1964 return x; 1965 } 1966 1967 /* Adjusts input/output images to IPU restrictions */ 1968 void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out, 1969 enum ipu_rotate_mode rot_mode) 1970 { 1971 const struct ipu_image_pixfmt *infmt, *outfmt; 1972 u32 w_align_out, h_align_out; 1973 u32 w_align_in, h_align_in; 1974 1975 infmt = get_format(in->pix.pixelformat); 1976 outfmt = get_format(out->pix.pixelformat); 1977 1978 /* set some default pixel formats if needed */ 1979 if (!infmt) { 1980 in->pix.pixelformat = V4L2_PIX_FMT_RGB24; 1981 infmt = get_format(V4L2_PIX_FMT_RGB24); 1982 } 1983 if (!outfmt) { 1984 out->pix.pixelformat = V4L2_PIX_FMT_RGB24; 1985 outfmt = get_format(V4L2_PIX_FMT_RGB24); 1986 } 1987 1988 /* image converter does not handle fields */ 1989 in->pix.field = out->pix.field = V4L2_FIELD_NONE; 1990 1991 /* resizer cannot downsize more than 4:1 */ 1992 if (ipu_rot_mode_is_irt(rot_mode)) { 1993 out->pix.height = max_t(__u32, out->pix.height, 1994 in->pix.width / 4); 1995 out->pix.width = max_t(__u32, out->pix.width, 1996 in->pix.height / 4); 1997 } else { 1998 out->pix.width = max_t(__u32, out->pix.width, 1999 in->pix.width / 4); 2000 out->pix.height = max_t(__u32, out->pix.height, 2001 in->pix.height / 4); 2002 } 2003 2004 /* align input width/height */ 2005 w_align_in = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt, 2006 rot_mode)); 2007 h_align_in = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt, 2008 rot_mode)); 2009 in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, 2010 w_align_in); 2011 in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, 2012 h_align_in); 2013 2014 /* align output width/height */ 2015 w_align_out = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt, 2016 rot_mode)); 2017 h_align_out = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt, 2018 rot_mode)); 2019 out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, 2020 w_align_out); 2021 out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, 2022 h_align_out); 2023 2024 /* set input/output strides and image sizes */ 2025 in->pix.bytesperline = infmt->planar ? 2026 clamp_align(in->pix.width, 2 << w_align_in, MAX_W, 2027 w_align_in) : 2028 clamp_align((in->pix.width * infmt->bpp) >> 3, 2029 ((2 << w_align_in) * infmt->bpp) >> 3, 2030 (MAX_W * infmt->bpp) >> 3, 2031 w_align_in); 2032 in->pix.sizeimage = infmt->planar ? 2033 (in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 : 2034 in->pix.height * in->pix.bytesperline; 2035 out->pix.bytesperline = outfmt->planar ? out->pix.width : 2036 (out->pix.width * outfmt->bpp) >> 3; 2037 out->pix.sizeimage = outfmt->planar ? 2038 (out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 : 2039 out->pix.height * out->pix.bytesperline; 2040 } 2041 EXPORT_SYMBOL_GPL(ipu_image_convert_adjust); 2042 2043 /* 2044 * this is used by ipu_image_convert_prepare() to verify set input and 2045 * output images are valid before starting the conversion. Clients can 2046 * also call it before calling ipu_image_convert_prepare(). 2047 */ 2048 int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out, 2049 enum ipu_rotate_mode rot_mode) 2050 { 2051 struct ipu_image testin, testout; 2052 2053 testin = *in; 2054 testout = *out; 2055 2056 ipu_image_convert_adjust(&testin, &testout, rot_mode); 2057 2058 if (testin.pix.width != in->pix.width || 2059 testin.pix.height != in->pix.height || 2060 testout.pix.width != out->pix.width || 2061 testout.pix.height != out->pix.height) 2062 return -EINVAL; 2063 2064 return 0; 2065 } 2066 EXPORT_SYMBOL_GPL(ipu_image_convert_verify); 2067 2068 /* 2069 * Call ipu_image_convert_prepare() to prepare for the conversion of 2070 * given images and rotation mode. Returns a new conversion context. 2071 */ 2072 struct ipu_image_convert_ctx * 2073 ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 2074 struct ipu_image *in, struct ipu_image *out, 2075 enum ipu_rotate_mode rot_mode, 2076 ipu_image_convert_cb_t complete, 2077 void *complete_context) 2078 { 2079 struct ipu_image_convert_priv *priv = ipu->image_convert_priv; 2080 struct ipu_image_convert_image *s_image, *d_image; 2081 struct ipu_image_convert_chan *chan; 2082 struct ipu_image_convert_ctx *ctx; 2083 unsigned long flags; 2084 unsigned int i; 2085 bool get_res; 2086 int ret; 2087 2088 if (!in || !out || !complete || 2089 (ic_task != IC_TASK_VIEWFINDER && 2090 ic_task != IC_TASK_POST_PROCESSOR)) 2091 return ERR_PTR(-EINVAL); 2092 2093 /* verify the in/out images before continuing */ 2094 ret = ipu_image_convert_verify(in, out, rot_mode); 2095 if (ret) { 2096 dev_err(priv->ipu->dev, "%s: in/out formats invalid\n", 2097 __func__); 2098 return ERR_PTR(ret); 2099 } 2100 2101 chan = &priv->chan[ic_task]; 2102 2103 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 2104 if (!ctx) 2105 return ERR_PTR(-ENOMEM); 2106 2107 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__, 2108 chan->ic_task, ctx); 2109 2110 ctx->chan = chan; 2111 init_completion(&ctx->aborted); 2112 2113 ctx->rot_mode = rot_mode; 2114 2115 /* Sets ctx->in.num_rows/cols as well */ 2116 ret = calc_image_resize_coefficients(ctx, in, out); 2117 if (ret) 2118 goto out_free; 2119 2120 s_image = &ctx->in; 2121 d_image = &ctx->out; 2122 2123 /* set tiling and rotation */ 2124 if (ipu_rot_mode_is_irt(rot_mode)) { 2125 d_image->num_rows = s_image->num_cols; 2126 d_image->num_cols = s_image->num_rows; 2127 } else { 2128 d_image->num_rows = s_image->num_rows; 2129 d_image->num_cols = s_image->num_cols; 2130 } 2131 2132 ctx->num_tiles = d_image->num_cols * d_image->num_rows; 2133 2134 ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN); 2135 if (ret) 2136 goto out_free; 2137 ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT); 2138 if (ret) 2139 goto out_free; 2140 2141 calc_out_tile_map(ctx); 2142 2143 find_seams(ctx, s_image, d_image); 2144 2145 ret = calc_tile_dimensions(ctx, s_image); 2146 if (ret) 2147 goto out_free; 2148 2149 ret = calc_tile_offsets(ctx, s_image); 2150 if (ret) 2151 goto out_free; 2152 2153 calc_tile_dimensions(ctx, d_image); 2154 ret = calc_tile_offsets(ctx, d_image); 2155 if (ret) 2156 goto out_free; 2157 2158 calc_tile_resize_coefficients(ctx); 2159 2160 ret = ipu_ic_calc_csc(&ctx->csc, 2161 s_image->base.pix.ycbcr_enc, 2162 s_image->base.pix.quantization, 2163 ipu_pixelformat_to_colorspace(s_image->fmt->fourcc), 2164 d_image->base.pix.ycbcr_enc, 2165 d_image->base.pix.quantization, 2166 ipu_pixelformat_to_colorspace(d_image->fmt->fourcc)); 2167 if (ret) 2168 goto out_free; 2169 2170 dump_format(ctx, s_image); 2171 dump_format(ctx, d_image); 2172 2173 ctx->complete = complete; 2174 ctx->complete_context = complete_context; 2175 2176 /* 2177 * Can we use double-buffering for this operation? If there is 2178 * only one tile (the whole image can be converted in a single 2179 * operation) there's no point in using double-buffering. Also, 2180 * the IPU's IDMAC channels allow only a single U and V plane 2181 * offset shared between both buffers, but these offsets change 2182 * for every tile, and therefore would have to be updated for 2183 * each buffer which is not possible. So double-buffering is 2184 * impossible when either the source or destination images are 2185 * a planar format (YUV420, YUV422P, etc.). Further, differently 2186 * sized tiles or different resizing coefficients per tile 2187 * prevent double-buffering as well. 2188 */ 2189 ctx->double_buffering = (ctx->num_tiles > 1 && 2190 !s_image->fmt->planar && 2191 !d_image->fmt->planar); 2192 for (i = 1; i < ctx->num_tiles; i++) { 2193 if (ctx->in.tile[i].width != ctx->in.tile[0].width || 2194 ctx->in.tile[i].height != ctx->in.tile[0].height || 2195 ctx->out.tile[i].width != ctx->out.tile[0].width || 2196 ctx->out.tile[i].height != ctx->out.tile[0].height) { 2197 ctx->double_buffering = false; 2198 break; 2199 } 2200 } 2201 for (i = 1; i < ctx->in.num_cols; i++) { 2202 if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) { 2203 ctx->double_buffering = false; 2204 break; 2205 } 2206 } 2207 for (i = 1; i < ctx->in.num_rows; i++) { 2208 if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) { 2209 ctx->double_buffering = false; 2210 break; 2211 } 2212 } 2213 2214 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 2215 unsigned long intermediate_size = d_image->tile[0].size; 2216 2217 for (i = 1; i < ctx->num_tiles; i++) { 2218 if (d_image->tile[i].size > intermediate_size) 2219 intermediate_size = d_image->tile[i].size; 2220 } 2221 2222 ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0], 2223 intermediate_size); 2224 if (ret) 2225 goto out_free; 2226 if (ctx->double_buffering) { 2227 ret = alloc_dma_buf(priv, 2228 &ctx->rot_intermediate[1], 2229 intermediate_size); 2230 if (ret) 2231 goto out_free_dmabuf0; 2232 } 2233 } 2234 2235 spin_lock_irqsave(&chan->irqlock, flags); 2236 2237 get_res = list_empty(&chan->ctx_list); 2238 2239 list_add_tail(&ctx->list, &chan->ctx_list); 2240 2241 spin_unlock_irqrestore(&chan->irqlock, flags); 2242 2243 if (get_res) { 2244 ret = get_ipu_resources(chan); 2245 if (ret) 2246 goto out_free_dmabuf1; 2247 } 2248 2249 return ctx; 2250 2251 out_free_dmabuf1: 2252 free_dma_buf(priv, &ctx->rot_intermediate[1]); 2253 spin_lock_irqsave(&chan->irqlock, flags); 2254 list_del(&ctx->list); 2255 spin_unlock_irqrestore(&chan->irqlock, flags); 2256 out_free_dmabuf0: 2257 free_dma_buf(priv, &ctx->rot_intermediate[0]); 2258 out_free: 2259 kfree(ctx); 2260 return ERR_PTR(ret); 2261 } 2262 EXPORT_SYMBOL_GPL(ipu_image_convert_prepare); 2263 2264 /* 2265 * Carry out a single image conversion run. Only the physaddr's of the input 2266 * and output image buffers are needed. The conversion context must have 2267 * been created previously with ipu_image_convert_prepare(). 2268 */ 2269 int ipu_image_convert_queue(struct ipu_image_convert_run *run) 2270 { 2271 struct ipu_image_convert_chan *chan; 2272 struct ipu_image_convert_priv *priv; 2273 struct ipu_image_convert_ctx *ctx; 2274 unsigned long flags; 2275 int ret = 0; 2276 2277 if (!run || !run->ctx || !run->in_phys || !run->out_phys) 2278 return -EINVAL; 2279 2280 ctx = run->ctx; 2281 chan = ctx->chan; 2282 priv = chan->priv; 2283 2284 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__, 2285 chan->ic_task, ctx, run); 2286 2287 INIT_LIST_HEAD(&run->list); 2288 2289 spin_lock_irqsave(&chan->irqlock, flags); 2290 2291 if (ctx->aborting) { 2292 ret = -EIO; 2293 goto unlock; 2294 } 2295 2296 list_add_tail(&run->list, &chan->pending_q); 2297 2298 if (!chan->current_run) { 2299 ret = do_run(run); 2300 if (ret) 2301 chan->current_run = NULL; 2302 } 2303 unlock: 2304 spin_unlock_irqrestore(&chan->irqlock, flags); 2305 return ret; 2306 } 2307 EXPORT_SYMBOL_GPL(ipu_image_convert_queue); 2308 2309 /* Abort any active or pending conversions for this context */ 2310 static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) 2311 { 2312 struct ipu_image_convert_chan *chan = ctx->chan; 2313 struct ipu_image_convert_priv *priv = chan->priv; 2314 struct ipu_image_convert_run *run, *active_run, *tmp; 2315 unsigned long flags; 2316 int run_count, ret; 2317 2318 spin_lock_irqsave(&chan->irqlock, flags); 2319 2320 /* move all remaining pending runs in this context to done_q */ 2321 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) { 2322 if (run->ctx != ctx) 2323 continue; 2324 run->status = -EIO; 2325 list_move_tail(&run->list, &chan->done_q); 2326 } 2327 2328 run_count = get_run_count(ctx, &chan->done_q); 2329 active_run = (chan->current_run && chan->current_run->ctx == ctx) ? 2330 chan->current_run : NULL; 2331 2332 if (active_run) 2333 reinit_completion(&ctx->aborted); 2334 2335 ctx->aborting = true; 2336 2337 spin_unlock_irqrestore(&chan->irqlock, flags); 2338 2339 if (!run_count && !active_run) { 2340 dev_dbg(priv->ipu->dev, 2341 "%s: task %u: no abort needed for ctx %p\n", 2342 __func__, chan->ic_task, ctx); 2343 return; 2344 } 2345 2346 if (!active_run) { 2347 empty_done_q(chan); 2348 return; 2349 } 2350 2351 dev_dbg(priv->ipu->dev, 2352 "%s: task %u: wait for completion: %d runs\n", 2353 __func__, chan->ic_task, run_count); 2354 2355 ret = wait_for_completion_timeout(&ctx->aborted, 2356 msecs_to_jiffies(10000)); 2357 if (ret == 0) { 2358 dev_warn(priv->ipu->dev, "%s: timeout\n", __func__); 2359 force_abort(ctx); 2360 } 2361 } 2362 2363 void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) 2364 { 2365 __ipu_image_convert_abort(ctx); 2366 ctx->aborting = false; 2367 } 2368 EXPORT_SYMBOL_GPL(ipu_image_convert_abort); 2369 2370 /* Unprepare image conversion context */ 2371 void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx) 2372 { 2373 struct ipu_image_convert_chan *chan = ctx->chan; 2374 struct ipu_image_convert_priv *priv = chan->priv; 2375 unsigned long flags; 2376 bool put_res; 2377 2378 /* make sure no runs are hanging around */ 2379 __ipu_image_convert_abort(ctx); 2380 2381 dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__, 2382 chan->ic_task, ctx); 2383 2384 spin_lock_irqsave(&chan->irqlock, flags); 2385 2386 list_del(&ctx->list); 2387 2388 put_res = list_empty(&chan->ctx_list); 2389 2390 spin_unlock_irqrestore(&chan->irqlock, flags); 2391 2392 if (put_res) 2393 release_ipu_resources(chan); 2394 2395 free_dma_buf(priv, &ctx->rot_intermediate[1]); 2396 free_dma_buf(priv, &ctx->rot_intermediate[0]); 2397 2398 kfree(ctx); 2399 } 2400 EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare); 2401 2402 /* 2403 * "Canned" asynchronous single image conversion. Allocates and returns 2404 * a new conversion run. On successful return the caller must free the 2405 * run and call ipu_image_convert_unprepare() after conversion completes. 2406 */ 2407 struct ipu_image_convert_run * 2408 ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 2409 struct ipu_image *in, struct ipu_image *out, 2410 enum ipu_rotate_mode rot_mode, 2411 ipu_image_convert_cb_t complete, 2412 void *complete_context) 2413 { 2414 struct ipu_image_convert_ctx *ctx; 2415 struct ipu_image_convert_run *run; 2416 int ret; 2417 2418 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode, 2419 complete, complete_context); 2420 if (IS_ERR(ctx)) 2421 return ERR_CAST(ctx); 2422 2423 run = kzalloc(sizeof(*run), GFP_KERNEL); 2424 if (!run) { 2425 ipu_image_convert_unprepare(ctx); 2426 return ERR_PTR(-ENOMEM); 2427 } 2428 2429 run->ctx = ctx; 2430 run->in_phys = in->phys0; 2431 run->out_phys = out->phys0; 2432 2433 ret = ipu_image_convert_queue(run); 2434 if (ret) { 2435 ipu_image_convert_unprepare(ctx); 2436 kfree(run); 2437 return ERR_PTR(ret); 2438 } 2439 2440 return run; 2441 } 2442 EXPORT_SYMBOL_GPL(ipu_image_convert); 2443 2444 /* "Canned" synchronous single image conversion */ 2445 static void image_convert_sync_complete(struct ipu_image_convert_run *run, 2446 void *data) 2447 { 2448 struct completion *comp = data; 2449 2450 complete(comp); 2451 } 2452 2453 int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 2454 struct ipu_image *in, struct ipu_image *out, 2455 enum ipu_rotate_mode rot_mode) 2456 { 2457 struct ipu_image_convert_run *run; 2458 struct completion comp; 2459 int ret; 2460 2461 init_completion(&comp); 2462 2463 run = ipu_image_convert(ipu, ic_task, in, out, rot_mode, 2464 image_convert_sync_complete, &comp); 2465 if (IS_ERR(run)) 2466 return PTR_ERR(run); 2467 2468 ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000)); 2469 ret = (ret == 0) ? -ETIMEDOUT : 0; 2470 2471 ipu_image_convert_unprepare(run->ctx); 2472 kfree(run); 2473 2474 return ret; 2475 } 2476 EXPORT_SYMBOL_GPL(ipu_image_convert_sync); 2477 2478 int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev) 2479 { 2480 struct ipu_image_convert_priv *priv; 2481 int i; 2482 2483 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 2484 if (!priv) 2485 return -ENOMEM; 2486 2487 ipu->image_convert_priv = priv; 2488 priv->ipu = ipu; 2489 2490 for (i = 0; i < IC_NUM_TASKS; i++) { 2491 struct ipu_image_convert_chan *chan = &priv->chan[i]; 2492 2493 chan->ic_task = i; 2494 chan->priv = priv; 2495 chan->dma_ch = &image_convert_dma_chan[i]; 2496 chan->in_eof_irq = -1; 2497 chan->rot_in_eof_irq = -1; 2498 chan->out_eof_irq = -1; 2499 chan->rot_out_eof_irq = -1; 2500 2501 spin_lock_init(&chan->irqlock); 2502 INIT_LIST_HEAD(&chan->ctx_list); 2503 INIT_LIST_HEAD(&chan->pending_q); 2504 INIT_LIST_HEAD(&chan->done_q); 2505 } 2506 2507 return 0; 2508 } 2509 2510 void ipu_image_convert_exit(struct ipu_soc *ipu) 2511 { 2512 } 2513