1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2012-2016 Mentor Graphics Inc. 4 * 5 * Queued image conversion support, with tiling and rotation. 6 */ 7 8 #include <linux/interrupt.h> 9 #include <linux/dma-mapping.h> 10 #include <video/imx-ipu-image-convert.h> 11 #include "ipu-prv.h" 12 13 /* 14 * The IC Resizer has a restriction that the output frame from the 15 * resizer must be 1024 or less in both width (pixels) and height 16 * (lines). 17 * 18 * The image converter attempts to split up a conversion when 19 * the desired output (converted) frame resolution exceeds the 20 * IC resizer limit of 1024 in either dimension. 21 * 22 * If either dimension of the output frame exceeds the limit, the 23 * dimension is split into 1, 2, or 4 equal stripes, for a maximum 24 * of 4*4 or 16 tiles. A conversion is then carried out for each 25 * tile (but taking care to pass the full frame stride length to 26 * the DMA channel's parameter memory!). IDMA double-buffering is used 27 * to convert each tile back-to-back when possible (see note below 28 * when double_buffering boolean is set). 29 * 30 * Note that the input frame must be split up into the same number 31 * of tiles as the output frame: 32 * 33 * +---------+-----+ 34 * +-----+---+ | A | B | 35 * | A | B | | | | 36 * +-----+---+ --> +---------+-----+ 37 * | C | D | | C | D | 38 * +-----+---+ | | | 39 * +---------+-----+ 40 * 41 * Clockwise 90° rotations are handled by first rescaling into a 42 * reusable temporary tile buffer and then rotating with the 8x8 43 * block rotator, writing to the correct destination: 44 * 45 * +-----+-----+ 46 * | | | 47 * +-----+---+ +---------+ | C | A | 48 * | A | B | | A,B, | | | | | 49 * +-----+---+ --> | C,D | | --> | | | 50 * | C | D | +---------+ +-----+-----+ 51 * +-----+---+ | D | B | 52 * | | | 53 * +-----+-----+ 54 * 55 * If the 8x8 block rotator is used, horizontal or vertical flipping 56 * is done during the rotation step, otherwise flipping is done 57 * during the scaling step. 58 * With rotation or flipping, tile order changes between input and 59 * output image. Tiles are numbered row major from top left to bottom 60 * right for both input and output image. 61 */ 62 63 #define MAX_STRIPES_W 4 64 #define MAX_STRIPES_H 4 65 #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H) 66 67 #define MIN_W 16 68 #define MIN_H 8 69 #define MAX_W 4096 70 #define MAX_H 4096 71 72 enum ipu_image_convert_type { 73 IMAGE_CONVERT_IN = 0, 74 IMAGE_CONVERT_OUT, 75 }; 76 77 struct ipu_image_convert_dma_buf { 78 void *virt; 79 dma_addr_t phys; 80 unsigned long len; 81 }; 82 83 struct ipu_image_convert_dma_chan { 84 int in; 85 int out; 86 int rot_in; 87 int rot_out; 88 int vdi_in_p; 89 int vdi_in; 90 int vdi_in_n; 91 }; 92 93 /* dimensions of one tile */ 94 struct ipu_image_tile { 95 u32 width; 96 u32 height; 97 u32 left; 98 u32 top; 99 /* size and strides are in bytes */ 100 u32 size; 101 u32 stride; 102 u32 rot_stride; 103 /* start Y or packed offset of this tile */ 104 u32 offset; 105 /* offset from start to tile in U plane, for planar formats */ 106 u32 u_off; 107 /* offset from start to tile in V plane, for planar formats */ 108 u32 v_off; 109 }; 110 111 struct ipu_image_convert_image { 112 struct ipu_image base; 113 enum ipu_image_convert_type type; 114 115 const struct ipu_image_pixfmt *fmt; 116 unsigned int stride; 117 118 /* # of rows (horizontal stripes) if dest height is > 1024 */ 119 unsigned int num_rows; 120 /* # of columns (vertical stripes) if dest width is > 1024 */ 121 unsigned int num_cols; 122 123 struct ipu_image_tile tile[MAX_TILES]; 124 }; 125 126 struct ipu_image_pixfmt { 127 u32 fourcc; /* V4L2 fourcc */ 128 int bpp; /* total bpp */ 129 int uv_width_dec; /* decimation in width for U/V planes */ 130 int uv_height_dec; /* decimation in height for U/V planes */ 131 bool planar; /* planar format */ 132 bool uv_swapped; /* U and V planes are swapped */ 133 bool uv_packed; /* partial planar (U and V in same plane) */ 134 }; 135 136 struct ipu_image_convert_ctx; 137 struct ipu_image_convert_chan; 138 struct ipu_image_convert_priv; 139 140 struct ipu_image_convert_ctx { 141 struct ipu_image_convert_chan *chan; 142 143 ipu_image_convert_cb_t complete; 144 void *complete_context; 145 146 /* Source/destination image data and rotation mode */ 147 struct ipu_image_convert_image in; 148 struct ipu_image_convert_image out; 149 struct ipu_ic_csc csc; 150 enum ipu_rotate_mode rot_mode; 151 u32 downsize_coeff_h; 152 u32 downsize_coeff_v; 153 u32 image_resize_coeff_h; 154 u32 image_resize_coeff_v; 155 u32 resize_coeffs_h[MAX_STRIPES_W]; 156 u32 resize_coeffs_v[MAX_STRIPES_H]; 157 158 /* intermediate buffer for rotation */ 159 struct ipu_image_convert_dma_buf rot_intermediate[2]; 160 161 /* current buffer number for double buffering */ 162 int cur_buf_num; 163 164 bool aborting; 165 struct completion aborted; 166 167 /* can we use double-buffering for this conversion operation? */ 168 bool double_buffering; 169 /* num_rows * num_cols */ 170 unsigned int num_tiles; 171 /* next tile to process */ 172 unsigned int next_tile; 173 /* where to place converted tile in dest image */ 174 unsigned int out_tile_map[MAX_TILES]; 175 176 struct list_head list; 177 }; 178 179 struct ipu_image_convert_chan { 180 struct ipu_image_convert_priv *priv; 181 182 enum ipu_ic_task ic_task; 183 const struct ipu_image_convert_dma_chan *dma_ch; 184 185 struct ipu_ic *ic; 186 struct ipuv3_channel *in_chan; 187 struct ipuv3_channel *out_chan; 188 struct ipuv3_channel *rotation_in_chan; 189 struct ipuv3_channel *rotation_out_chan; 190 191 /* the IPU end-of-frame irqs */ 192 int out_eof_irq; 193 int rot_out_eof_irq; 194 195 spinlock_t irqlock; 196 197 /* list of convert contexts */ 198 struct list_head ctx_list; 199 /* queue of conversion runs */ 200 struct list_head pending_q; 201 /* queue of completed runs */ 202 struct list_head done_q; 203 204 /* the current conversion run */ 205 struct ipu_image_convert_run *current_run; 206 }; 207 208 struct ipu_image_convert_priv { 209 struct ipu_image_convert_chan chan[IC_NUM_TASKS]; 210 struct ipu_soc *ipu; 211 }; 212 213 static const struct ipu_image_convert_dma_chan 214 image_convert_dma_chan[IC_NUM_TASKS] = { 215 [IC_TASK_VIEWFINDER] = { 216 .in = IPUV3_CHANNEL_MEM_IC_PRP_VF, 217 .out = IPUV3_CHANNEL_IC_PRP_VF_MEM, 218 .rot_in = IPUV3_CHANNEL_MEM_ROT_VF, 219 .rot_out = IPUV3_CHANNEL_ROT_VF_MEM, 220 .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV, 221 .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR, 222 .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT, 223 }, 224 [IC_TASK_POST_PROCESSOR] = { 225 .in = IPUV3_CHANNEL_MEM_IC_PP, 226 .out = IPUV3_CHANNEL_IC_PP_MEM, 227 .rot_in = IPUV3_CHANNEL_MEM_ROT_PP, 228 .rot_out = IPUV3_CHANNEL_ROT_PP_MEM, 229 }, 230 }; 231 232 static const struct ipu_image_pixfmt image_convert_formats[] = { 233 { 234 .fourcc = V4L2_PIX_FMT_RGB565, 235 .bpp = 16, 236 }, { 237 .fourcc = V4L2_PIX_FMT_RGB24, 238 .bpp = 24, 239 }, { 240 .fourcc = V4L2_PIX_FMT_BGR24, 241 .bpp = 24, 242 }, { 243 .fourcc = V4L2_PIX_FMT_RGB32, 244 .bpp = 32, 245 }, { 246 .fourcc = V4L2_PIX_FMT_BGR32, 247 .bpp = 32, 248 }, { 249 .fourcc = V4L2_PIX_FMT_XRGB32, 250 .bpp = 32, 251 }, { 252 .fourcc = V4L2_PIX_FMT_XBGR32, 253 .bpp = 32, 254 }, { 255 .fourcc = V4L2_PIX_FMT_BGRX32, 256 .bpp = 32, 257 }, { 258 .fourcc = V4L2_PIX_FMT_RGBX32, 259 .bpp = 32, 260 }, { 261 .fourcc = V4L2_PIX_FMT_YUYV, 262 .bpp = 16, 263 .uv_width_dec = 2, 264 .uv_height_dec = 1, 265 }, { 266 .fourcc = V4L2_PIX_FMT_UYVY, 267 .bpp = 16, 268 .uv_width_dec = 2, 269 .uv_height_dec = 1, 270 }, { 271 .fourcc = V4L2_PIX_FMT_YUV420, 272 .bpp = 12, 273 .planar = true, 274 .uv_width_dec = 2, 275 .uv_height_dec = 2, 276 }, { 277 .fourcc = V4L2_PIX_FMT_YVU420, 278 .bpp = 12, 279 .planar = true, 280 .uv_width_dec = 2, 281 .uv_height_dec = 2, 282 .uv_swapped = true, 283 }, { 284 .fourcc = V4L2_PIX_FMT_NV12, 285 .bpp = 12, 286 .planar = true, 287 .uv_width_dec = 2, 288 .uv_height_dec = 2, 289 .uv_packed = true, 290 }, { 291 .fourcc = V4L2_PIX_FMT_YUV422P, 292 .bpp = 16, 293 .planar = true, 294 .uv_width_dec = 2, 295 .uv_height_dec = 1, 296 }, { 297 .fourcc = V4L2_PIX_FMT_NV16, 298 .bpp = 16, 299 .planar = true, 300 .uv_width_dec = 2, 301 .uv_height_dec = 1, 302 .uv_packed = true, 303 }, 304 }; 305 306 static const struct ipu_image_pixfmt *get_format(u32 fourcc) 307 { 308 const struct ipu_image_pixfmt *ret = NULL; 309 unsigned int i; 310 311 for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) { 312 if (image_convert_formats[i].fourcc == fourcc) { 313 ret = &image_convert_formats[i]; 314 break; 315 } 316 } 317 318 return ret; 319 } 320 321 static void dump_format(struct ipu_image_convert_ctx *ctx, 322 struct ipu_image_convert_image *ic_image) 323 { 324 struct ipu_image_convert_chan *chan = ctx->chan; 325 struct ipu_image_convert_priv *priv = chan->priv; 326 327 dev_dbg(priv->ipu->dev, 328 "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n", 329 chan->ic_task, ctx, 330 ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input", 331 ic_image->base.pix.width, ic_image->base.pix.height, 332 ic_image->num_cols, ic_image->num_rows, 333 ic_image->fmt->fourcc & 0xff, 334 (ic_image->fmt->fourcc >> 8) & 0xff, 335 (ic_image->fmt->fourcc >> 16) & 0xff, 336 (ic_image->fmt->fourcc >> 24) & 0xff); 337 } 338 339 int ipu_image_convert_enum_format(int index, u32 *fourcc) 340 { 341 const struct ipu_image_pixfmt *fmt; 342 343 if (index >= (int)ARRAY_SIZE(image_convert_formats)) 344 return -EINVAL; 345 346 /* Format found */ 347 fmt = &image_convert_formats[index]; 348 *fourcc = fmt->fourcc; 349 return 0; 350 } 351 EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format); 352 353 static void free_dma_buf(struct ipu_image_convert_priv *priv, 354 struct ipu_image_convert_dma_buf *buf) 355 { 356 if (buf->virt) 357 dma_free_coherent(priv->ipu->dev, 358 buf->len, buf->virt, buf->phys); 359 buf->virt = NULL; 360 buf->phys = 0; 361 } 362 363 static int alloc_dma_buf(struct ipu_image_convert_priv *priv, 364 struct ipu_image_convert_dma_buf *buf, 365 int size) 366 { 367 buf->len = PAGE_ALIGN(size); 368 buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys, 369 GFP_DMA | GFP_KERNEL); 370 if (!buf->virt) { 371 dev_err(priv->ipu->dev, "failed to alloc dma buffer\n"); 372 return -ENOMEM; 373 } 374 375 return 0; 376 } 377 378 static inline int num_stripes(int dim) 379 { 380 return (dim - 1) / 1024 + 1; 381 } 382 383 /* 384 * Calculate downsizing coefficients, which are the same for all tiles, 385 * and bilinear resizing coefficients, which are used to find the best 386 * seam positions. 387 */ 388 static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx, 389 struct ipu_image *in, 390 struct ipu_image *out) 391 { 392 u32 downsized_width = in->rect.width; 393 u32 downsized_height = in->rect.height; 394 u32 downsize_coeff_v = 0; 395 u32 downsize_coeff_h = 0; 396 u32 resized_width = out->rect.width; 397 u32 resized_height = out->rect.height; 398 u32 resize_coeff_h; 399 u32 resize_coeff_v; 400 401 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 402 resized_width = out->rect.height; 403 resized_height = out->rect.width; 404 } 405 406 /* Do not let invalid input lead to an endless loop below */ 407 if (WARN_ON(resized_width == 0 || resized_height == 0)) 408 return -EINVAL; 409 410 while (downsized_width > 1024 || 411 downsized_width >= resized_width * 2) { 412 downsized_width >>= 1; 413 downsize_coeff_h++; 414 } 415 416 while (downsized_height > 1024 || 417 downsized_height >= resized_height * 2) { 418 downsized_height >>= 1; 419 downsize_coeff_v++; 420 } 421 422 /* 423 * Calculate the bilinear resizing coefficients that could be used if 424 * we were converting with a single tile. The bottom right output pixel 425 * should sample as close as possible to the bottom right input pixel 426 * out of the decimator, but not overshoot it: 427 */ 428 resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1); 429 resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1); 430 431 dev_dbg(ctx->chan->priv->ipu->dev, 432 "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n", 433 __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v, 434 resize_coeff_v, ctx->in.num_cols, ctx->in.num_rows); 435 436 if (downsize_coeff_h > 2 || downsize_coeff_v > 2 || 437 resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff) 438 return -EINVAL; 439 440 ctx->downsize_coeff_h = downsize_coeff_h; 441 ctx->downsize_coeff_v = downsize_coeff_v; 442 ctx->image_resize_coeff_h = resize_coeff_h; 443 ctx->image_resize_coeff_v = resize_coeff_v; 444 445 return 0; 446 } 447 448 #define round_closest(x, y) round_down((x) + (y)/2, (y)) 449 450 /* 451 * Find the best aligned seam position for the given column / row index. 452 * Rotation and image offsets are out of scope. 453 * 454 * @index: column / row index, used to calculate valid interval 455 * @in_edge: input right / bottom edge 456 * @out_edge: output right / bottom edge 457 * @in_align: input alignment, either horizontal 8-byte line start address 458 * alignment, or pixel alignment due to image format 459 * @out_align: output alignment, either horizontal 8-byte line start address 460 * alignment, or pixel alignment due to image format or rotator 461 * block size 462 * @in_burst: horizontal input burst size in case of horizontal flip 463 * @out_burst: horizontal output burst size or rotator block size 464 * @downsize_coeff: downsizing section coefficient 465 * @resize_coeff: main processing section resizing coefficient 466 * @_in_seam: aligned input seam position return value 467 * @_out_seam: aligned output seam position return value 468 */ 469 static void find_best_seam(struct ipu_image_convert_ctx *ctx, 470 unsigned int index, 471 unsigned int in_edge, 472 unsigned int out_edge, 473 unsigned int in_align, 474 unsigned int out_align, 475 unsigned int in_burst, 476 unsigned int out_burst, 477 unsigned int downsize_coeff, 478 unsigned int resize_coeff, 479 u32 *_in_seam, 480 u32 *_out_seam) 481 { 482 struct device *dev = ctx->chan->priv->ipu->dev; 483 unsigned int out_pos; 484 /* Input / output seam position candidates */ 485 unsigned int out_seam = 0; 486 unsigned int in_seam = 0; 487 unsigned int min_diff = UINT_MAX; 488 unsigned int out_start; 489 unsigned int out_end; 490 unsigned int in_start; 491 unsigned int in_end; 492 493 /* Start within 1024 pixels of the right / bottom edge */ 494 out_start = max_t(int, index * out_align, out_edge - 1024); 495 /* End before having to add more columns to the left / rows above */ 496 out_end = min_t(unsigned int, out_edge, index * 1024 + 1); 497 498 /* 499 * Limit input seam position to make sure that the downsized input tile 500 * to the right or bottom does not exceed 1024 pixels. 501 */ 502 in_start = max_t(int, index * in_align, 503 in_edge - (1024 << downsize_coeff)); 504 in_end = min_t(unsigned int, in_edge, 505 index * (1024 << downsize_coeff) + 1); 506 507 /* 508 * Output tiles must start at a multiple of 8 bytes horizontally and 509 * possibly at an even line horizontally depending on the pixel format. 510 * Only consider output aligned positions for the seam. 511 */ 512 out_start = round_up(out_start, out_align); 513 for (out_pos = out_start; out_pos < out_end; out_pos += out_align) { 514 unsigned int in_pos; 515 unsigned int in_pos_aligned; 516 unsigned int in_pos_rounded; 517 unsigned int abs_diff; 518 519 /* 520 * Tiles in the right row / bottom column may not be allowed to 521 * overshoot horizontally / vertically. out_burst may be the 522 * actual DMA burst size, or the rotator block size. 523 */ 524 if ((out_burst > 1) && (out_edge - out_pos) % out_burst) 525 continue; 526 527 /* 528 * Input sample position, corresponding to out_pos, 19.13 fixed 529 * point. 530 */ 531 in_pos = (out_pos * resize_coeff) << downsize_coeff; 532 /* 533 * The closest input sample position that we could actually 534 * start the input tile at, 19.13 fixed point. 535 */ 536 in_pos_aligned = round_closest(in_pos, 8192U * in_align); 537 /* Convert 19.13 fixed point to integer */ 538 in_pos_rounded = in_pos_aligned / 8192U; 539 540 if (in_pos_rounded < in_start) 541 continue; 542 if (in_pos_rounded >= in_end) 543 break; 544 545 if ((in_burst > 1) && 546 (in_edge - in_pos_rounded) % in_burst) 547 continue; 548 549 if (in_pos < in_pos_aligned) 550 abs_diff = in_pos_aligned - in_pos; 551 else 552 abs_diff = in_pos - in_pos_aligned; 553 554 if (abs_diff < min_diff) { 555 in_seam = in_pos_rounded; 556 out_seam = out_pos; 557 min_diff = abs_diff; 558 } 559 } 560 561 *_out_seam = out_seam; 562 *_in_seam = in_seam; 563 564 dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) in [%u, %u] diff %u.%03u\n", 565 __func__, out_seam, out_align, out_start, out_end, 566 in_seam, in_align, in_start, in_end, min_diff / 8192, 567 DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192)); 568 } 569 570 /* 571 * Tile left edges are required to be aligned to multiples of 8 bytes 572 * by the IDMAC. 573 */ 574 static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt) 575 { 576 if (fmt->planar) 577 return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec; 578 else 579 return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8; 580 } 581 582 /* 583 * Tile top edge alignment is only limited by chroma subsampling. 584 */ 585 static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt) 586 { 587 return fmt->uv_height_dec > 1 ? 2 : 1; 588 } 589 590 static inline u32 tile_width_align(enum ipu_image_convert_type type, 591 const struct ipu_image_pixfmt *fmt, 592 enum ipu_rotate_mode rot_mode) 593 { 594 if (type == IMAGE_CONVERT_IN) { 595 /* 596 * The IC burst reads 8 pixels at a time. Reading beyond the 597 * end of the line is usually acceptable. Those pixels are 598 * ignored, unless the IC has to write the scaled line in 599 * reverse. 600 */ 601 return (!ipu_rot_mode_is_irt(rot_mode) && 602 (rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2; 603 } 604 605 /* 606 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled 607 * formats to guarantee 8-byte aligned line start addresses in the 608 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size 609 * for all other formats. 610 */ 611 return (ipu_rot_mode_is_irt(rot_mode) && 612 fmt->planar && !fmt->uv_packed) ? 613 8 * fmt->uv_width_dec : 8; 614 } 615 616 static inline u32 tile_height_align(enum ipu_image_convert_type type, 617 const struct ipu_image_pixfmt *fmt, 618 enum ipu_rotate_mode rot_mode) 619 { 620 if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode)) 621 return 2; 622 623 /* 624 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled 625 * formats to guarantee 8-byte aligned line start addresses in the 626 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size 627 * for all other formats. 628 */ 629 return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8; 630 } 631 632 /* 633 * Fill in left position and width and for all tiles in an input column, and 634 * for all corresponding output tiles. If the 90° rotator is used, the output 635 * tiles are in a row, and output tile top position and height are set. 636 */ 637 static void fill_tile_column(struct ipu_image_convert_ctx *ctx, 638 unsigned int col, 639 struct ipu_image_convert_image *in, 640 unsigned int in_left, unsigned int in_width, 641 struct ipu_image_convert_image *out, 642 unsigned int out_left, unsigned int out_width) 643 { 644 unsigned int row, tile_idx; 645 struct ipu_image_tile *in_tile, *out_tile; 646 647 for (row = 0; row < in->num_rows; row++) { 648 tile_idx = in->num_cols * row + col; 649 in_tile = &in->tile[tile_idx]; 650 out_tile = &out->tile[ctx->out_tile_map[tile_idx]]; 651 652 in_tile->left = in_left; 653 in_tile->width = in_width; 654 655 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 656 out_tile->top = out_left; 657 out_tile->height = out_width; 658 } else { 659 out_tile->left = out_left; 660 out_tile->width = out_width; 661 } 662 } 663 } 664 665 /* 666 * Fill in top position and height and for all tiles in an input row, and 667 * for all corresponding output tiles. If the 90° rotator is used, the output 668 * tiles are in a column, and output tile left position and width are set. 669 */ 670 static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row, 671 struct ipu_image_convert_image *in, 672 unsigned int in_top, unsigned int in_height, 673 struct ipu_image_convert_image *out, 674 unsigned int out_top, unsigned int out_height) 675 { 676 unsigned int col, tile_idx; 677 struct ipu_image_tile *in_tile, *out_tile; 678 679 for (col = 0; col < in->num_cols; col++) { 680 tile_idx = in->num_cols * row + col; 681 in_tile = &in->tile[tile_idx]; 682 out_tile = &out->tile[ctx->out_tile_map[tile_idx]]; 683 684 in_tile->top = in_top; 685 in_tile->height = in_height; 686 687 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 688 out_tile->left = out_top; 689 out_tile->width = out_height; 690 } else { 691 out_tile->top = out_top; 692 out_tile->height = out_height; 693 } 694 } 695 } 696 697 /* 698 * Find the best horizontal and vertical seam positions to split into tiles. 699 * Minimize the fractional part of the input sampling position for the 700 * top / left pixels of each tile. 701 */ 702 static void find_seams(struct ipu_image_convert_ctx *ctx, 703 struct ipu_image_convert_image *in, 704 struct ipu_image_convert_image *out) 705 { 706 struct device *dev = ctx->chan->priv->ipu->dev; 707 unsigned int resized_width = out->base.rect.width; 708 unsigned int resized_height = out->base.rect.height; 709 unsigned int col; 710 unsigned int row; 711 unsigned int in_left_align = tile_left_align(in->fmt); 712 unsigned int in_top_align = tile_top_align(in->fmt); 713 unsigned int out_left_align = tile_left_align(out->fmt); 714 unsigned int out_top_align = tile_top_align(out->fmt); 715 unsigned int out_width_align = tile_width_align(out->type, out->fmt, 716 ctx->rot_mode); 717 unsigned int out_height_align = tile_height_align(out->type, out->fmt, 718 ctx->rot_mode); 719 unsigned int in_right = in->base.rect.width; 720 unsigned int in_bottom = in->base.rect.height; 721 unsigned int out_right = out->base.rect.width; 722 unsigned int out_bottom = out->base.rect.height; 723 unsigned int flipped_out_left; 724 unsigned int flipped_out_top; 725 726 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 727 /* Switch width/height and align top left to IRT block size */ 728 resized_width = out->base.rect.height; 729 resized_height = out->base.rect.width; 730 out_left_align = out_height_align; 731 out_top_align = out_width_align; 732 out_width_align = out_left_align; 733 out_height_align = out_top_align; 734 out_right = out->base.rect.height; 735 out_bottom = out->base.rect.width; 736 } 737 738 for (col = in->num_cols - 1; col > 0; col--) { 739 bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) || 740 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP); 741 bool allow_out_overshoot = (col < in->num_cols - 1) && 742 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP); 743 unsigned int in_left; 744 unsigned int out_left; 745 746 /* 747 * Align input width to burst length if the scaling step flips 748 * horizontally. 749 */ 750 751 find_best_seam(ctx, col, 752 in_right, out_right, 753 in_left_align, out_left_align, 754 allow_in_overshoot ? 1 : 8 /* burst length */, 755 allow_out_overshoot ? 1 : out_width_align, 756 ctx->downsize_coeff_h, ctx->image_resize_coeff_h, 757 &in_left, &out_left); 758 759 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP) 760 flipped_out_left = resized_width - out_right; 761 else 762 flipped_out_left = out_left; 763 764 fill_tile_column(ctx, col, in, in_left, in_right - in_left, 765 out, flipped_out_left, out_right - out_left); 766 767 dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col, 768 in_left, in_right - in_left, 769 flipped_out_left, out_right - out_left); 770 771 in_right = in_left; 772 out_right = out_left; 773 } 774 775 flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ? 776 resized_width - out_right : 0; 777 778 fill_tile_column(ctx, 0, in, 0, in_right, 779 out, flipped_out_left, out_right); 780 781 dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__, 782 in_right, flipped_out_left, out_right); 783 784 for (row = in->num_rows - 1; row > 0; row--) { 785 bool allow_overshoot = row < in->num_rows - 1; 786 unsigned int in_top; 787 unsigned int out_top; 788 789 find_best_seam(ctx, row, 790 in_bottom, out_bottom, 791 in_top_align, out_top_align, 792 1, allow_overshoot ? 1 : out_height_align, 793 ctx->downsize_coeff_v, ctx->image_resize_coeff_v, 794 &in_top, &out_top); 795 796 if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^ 797 ipu_rot_mode_is_irt(ctx->rot_mode)) 798 flipped_out_top = resized_height - out_bottom; 799 else 800 flipped_out_top = out_top; 801 802 fill_tile_row(ctx, row, in, in_top, in_bottom - in_top, 803 out, flipped_out_top, out_bottom - out_top); 804 805 dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row, 806 in_top, in_bottom - in_top, 807 flipped_out_top, out_bottom - out_top); 808 809 in_bottom = in_top; 810 out_bottom = out_top; 811 } 812 813 if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^ 814 ipu_rot_mode_is_irt(ctx->rot_mode)) 815 flipped_out_top = resized_height - out_bottom; 816 else 817 flipped_out_top = 0; 818 819 fill_tile_row(ctx, 0, in, 0, in_bottom, 820 out, flipped_out_top, out_bottom); 821 822 dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__, 823 in_bottom, flipped_out_top, out_bottom); 824 } 825 826 static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx, 827 struct ipu_image_convert_image *image) 828 { 829 struct ipu_image_convert_chan *chan = ctx->chan; 830 struct ipu_image_convert_priv *priv = chan->priv; 831 unsigned int i; 832 833 for (i = 0; i < ctx->num_tiles; i++) { 834 struct ipu_image_tile *tile; 835 const unsigned int row = i / image->num_cols; 836 const unsigned int col = i % image->num_cols; 837 838 if (image->type == IMAGE_CONVERT_OUT) 839 tile = &image->tile[ctx->out_tile_map[i]]; 840 else 841 tile = &image->tile[i]; 842 843 tile->size = ((tile->height * image->fmt->bpp) >> 3) * 844 tile->width; 845 846 if (image->fmt->planar) { 847 tile->stride = tile->width; 848 tile->rot_stride = tile->height; 849 } else { 850 tile->stride = 851 (image->fmt->bpp * tile->width) >> 3; 852 tile->rot_stride = 853 (image->fmt->bpp * tile->height) >> 3; 854 } 855 856 dev_dbg(priv->ipu->dev, 857 "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n", 858 chan->ic_task, ctx, 859 image->type == IMAGE_CONVERT_IN ? "Input" : "Output", 860 row, col, 861 tile->width, tile->height, tile->left, tile->top); 862 } 863 } 864 865 /* 866 * Use the rotation transformation to find the tile coordinates 867 * (row, col) of a tile in the destination frame that corresponds 868 * to the given tile coordinates of a source frame. The destination 869 * coordinate is then converted to a tile index. 870 */ 871 static int transform_tile_index(struct ipu_image_convert_ctx *ctx, 872 int src_row, int src_col) 873 { 874 struct ipu_image_convert_chan *chan = ctx->chan; 875 struct ipu_image_convert_priv *priv = chan->priv; 876 struct ipu_image_convert_image *s_image = &ctx->in; 877 struct ipu_image_convert_image *d_image = &ctx->out; 878 int dst_row, dst_col; 879 880 /* with no rotation it's a 1:1 mapping */ 881 if (ctx->rot_mode == IPU_ROTATE_NONE) 882 return src_row * s_image->num_cols + src_col; 883 884 /* 885 * before doing the transform, first we have to translate 886 * source row,col for an origin in the center of s_image 887 */ 888 src_row = src_row * 2 - (s_image->num_rows - 1); 889 src_col = src_col * 2 - (s_image->num_cols - 1); 890 891 /* do the rotation transform */ 892 if (ctx->rot_mode & IPU_ROT_BIT_90) { 893 dst_col = -src_row; 894 dst_row = src_col; 895 } else { 896 dst_col = src_col; 897 dst_row = src_row; 898 } 899 900 /* apply flip */ 901 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP) 902 dst_col = -dst_col; 903 if (ctx->rot_mode & IPU_ROT_BIT_VFLIP) 904 dst_row = -dst_row; 905 906 dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n", 907 chan->ic_task, ctx, src_col, src_row, dst_col, dst_row); 908 909 /* 910 * finally translate dest row,col using an origin in upper 911 * left of d_image 912 */ 913 dst_row += d_image->num_rows - 1; 914 dst_col += d_image->num_cols - 1; 915 dst_row /= 2; 916 dst_col /= 2; 917 918 return dst_row * d_image->num_cols + dst_col; 919 } 920 921 /* 922 * Fill the out_tile_map[] with transformed destination tile indeces. 923 */ 924 static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx) 925 { 926 struct ipu_image_convert_image *s_image = &ctx->in; 927 unsigned int row, col, tile = 0; 928 929 for (row = 0; row < s_image->num_rows; row++) { 930 for (col = 0; col < s_image->num_cols; col++) { 931 ctx->out_tile_map[tile] = 932 transform_tile_index(ctx, row, col); 933 tile++; 934 } 935 } 936 } 937 938 static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx, 939 struct ipu_image_convert_image *image) 940 { 941 struct ipu_image_convert_chan *chan = ctx->chan; 942 struct ipu_image_convert_priv *priv = chan->priv; 943 const struct ipu_image_pixfmt *fmt = image->fmt; 944 unsigned int row, col, tile = 0; 945 u32 H, top, y_stride, uv_stride; 946 u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp; 947 u32 y_row_off, y_col_off, y_off; 948 u32 y_size, uv_size; 949 950 /* setup some convenience vars */ 951 H = image->base.pix.height; 952 953 y_stride = image->stride; 954 uv_stride = y_stride / fmt->uv_width_dec; 955 if (fmt->uv_packed) 956 uv_stride *= 2; 957 958 y_size = H * y_stride; 959 uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec); 960 961 for (row = 0; row < image->num_rows; row++) { 962 top = image->tile[tile].top; 963 y_row_off = top * y_stride; 964 uv_row_off = (top * uv_stride) / fmt->uv_height_dec; 965 966 for (col = 0; col < image->num_cols; col++) { 967 y_col_off = image->tile[tile].left; 968 uv_col_off = y_col_off / fmt->uv_width_dec; 969 if (fmt->uv_packed) 970 uv_col_off *= 2; 971 972 y_off = y_row_off + y_col_off; 973 uv_off = uv_row_off + uv_col_off; 974 975 u_off = y_size - y_off + uv_off; 976 v_off = (fmt->uv_packed) ? 0 : u_off + uv_size; 977 if (fmt->uv_swapped) { 978 tmp = u_off; 979 u_off = v_off; 980 v_off = tmp; 981 } 982 983 image->tile[tile].offset = y_off; 984 image->tile[tile].u_off = u_off; 985 image->tile[tile++].v_off = v_off; 986 987 if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) { 988 dev_err(priv->ipu->dev, 989 "task %u: ctx %p: %s@[%d,%d]: " 990 "y_off %08x, u_off %08x, v_off %08x\n", 991 chan->ic_task, ctx, 992 image->type == IMAGE_CONVERT_IN ? 993 "Input" : "Output", row, col, 994 y_off, u_off, v_off); 995 return -EINVAL; 996 } 997 } 998 } 999 1000 return 0; 1001 } 1002 1003 static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx, 1004 struct ipu_image_convert_image *image) 1005 { 1006 struct ipu_image_convert_chan *chan = ctx->chan; 1007 struct ipu_image_convert_priv *priv = chan->priv; 1008 const struct ipu_image_pixfmt *fmt = image->fmt; 1009 unsigned int row, col, tile = 0; 1010 u32 bpp, stride, offset; 1011 u32 row_off, col_off; 1012 1013 /* setup some convenience vars */ 1014 stride = image->stride; 1015 bpp = fmt->bpp; 1016 1017 for (row = 0; row < image->num_rows; row++) { 1018 row_off = image->tile[tile].top * stride; 1019 1020 for (col = 0; col < image->num_cols; col++) { 1021 col_off = (image->tile[tile].left * bpp) >> 3; 1022 1023 offset = row_off + col_off; 1024 1025 image->tile[tile].offset = offset; 1026 image->tile[tile].u_off = 0; 1027 image->tile[tile++].v_off = 0; 1028 1029 if (offset & 0x7) { 1030 dev_err(priv->ipu->dev, 1031 "task %u: ctx %p: %s@[%d,%d]: " 1032 "phys %08x\n", 1033 chan->ic_task, ctx, 1034 image->type == IMAGE_CONVERT_IN ? 1035 "Input" : "Output", row, col, 1036 row_off + col_off); 1037 return -EINVAL; 1038 } 1039 } 1040 } 1041 1042 return 0; 1043 } 1044 1045 static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx, 1046 struct ipu_image_convert_image *image) 1047 { 1048 if (image->fmt->planar) 1049 return calc_tile_offsets_planar(ctx, image); 1050 1051 return calc_tile_offsets_packed(ctx, image); 1052 } 1053 1054 /* 1055 * Calculate the resizing ratio for the IC main processing section given input 1056 * size, fixed downsizing coefficient, and output size. 1057 * Either round to closest for the next tile's first pixel to minimize seams 1058 * and distortion (for all but right column / bottom row), or round down to 1059 * avoid sampling beyond the edges of the input image for this tile's last 1060 * pixel. 1061 * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff. 1062 */ 1063 static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff, 1064 u32 output_size, bool allow_overshoot) 1065 { 1066 u32 downsized = input_size >> downsize_coeff; 1067 1068 if (allow_overshoot) 1069 return DIV_ROUND_CLOSEST(8192 * downsized, output_size); 1070 else 1071 return 8192 * (downsized - 1) / (output_size - 1); 1072 } 1073 1074 /* 1075 * Slightly modify resize coefficients per tile to hide the bilinear 1076 * interpolator reset at tile borders, shifting the right / bottom edge 1077 * by up to a half input pixel. This removes noticeable seams between 1078 * tiles at higher upscaling factors. 1079 */ 1080 static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx) 1081 { 1082 struct ipu_image_convert_chan *chan = ctx->chan; 1083 struct ipu_image_convert_priv *priv = chan->priv; 1084 struct ipu_image_tile *in_tile, *out_tile; 1085 unsigned int col, row, tile_idx; 1086 unsigned int last_output; 1087 1088 for (col = 0; col < ctx->in.num_cols; col++) { 1089 bool closest = (col < ctx->in.num_cols - 1) && 1090 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP); 1091 u32 resized_width; 1092 u32 resize_coeff_h; 1093 1094 tile_idx = col; 1095 in_tile = &ctx->in.tile[tile_idx]; 1096 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1097 1098 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1099 resized_width = out_tile->height; 1100 else 1101 resized_width = out_tile->width; 1102 1103 resize_coeff_h = calc_resize_coeff(in_tile->width, 1104 ctx->downsize_coeff_h, 1105 resized_width, closest); 1106 1107 dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n", 1108 __func__, col, resize_coeff_h); 1109 1110 1111 for (row = 0; row < ctx->in.num_rows; row++) { 1112 tile_idx = row * ctx->in.num_cols + col; 1113 in_tile = &ctx->in.tile[tile_idx]; 1114 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1115 1116 /* 1117 * With the horizontal scaling factor known, round up 1118 * resized width (output width or height) to burst size. 1119 */ 1120 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1121 out_tile->height = round_up(resized_width, 8); 1122 else 1123 out_tile->width = round_up(resized_width, 8); 1124 1125 /* 1126 * Calculate input width from the last accessed input 1127 * pixel given resized width and scaling coefficients. 1128 * Round up to burst size. 1129 */ 1130 last_output = round_up(resized_width, 8) - 1; 1131 if (closest) 1132 last_output++; 1133 in_tile->width = round_up( 1134 (DIV_ROUND_UP(last_output * resize_coeff_h, 1135 8192) + 1) 1136 << ctx->downsize_coeff_h, 8); 1137 } 1138 1139 ctx->resize_coeffs_h[col] = resize_coeff_h; 1140 } 1141 1142 for (row = 0; row < ctx->in.num_rows; row++) { 1143 bool closest = (row < ctx->in.num_rows - 1) && 1144 !(ctx->rot_mode & IPU_ROT_BIT_VFLIP); 1145 u32 resized_height; 1146 u32 resize_coeff_v; 1147 1148 tile_idx = row * ctx->in.num_cols; 1149 in_tile = &ctx->in.tile[tile_idx]; 1150 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1151 1152 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1153 resized_height = out_tile->width; 1154 else 1155 resized_height = out_tile->height; 1156 1157 resize_coeff_v = calc_resize_coeff(in_tile->height, 1158 ctx->downsize_coeff_v, 1159 resized_height, closest); 1160 1161 dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n", 1162 __func__, row, resize_coeff_v); 1163 1164 for (col = 0; col < ctx->in.num_cols; col++) { 1165 tile_idx = row * ctx->in.num_cols + col; 1166 in_tile = &ctx->in.tile[tile_idx]; 1167 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1168 1169 /* 1170 * With the vertical scaling factor known, round up 1171 * resized height (output width or height) to IDMAC 1172 * limitations. 1173 */ 1174 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1175 out_tile->width = round_up(resized_height, 2); 1176 else 1177 out_tile->height = round_up(resized_height, 2); 1178 1179 /* 1180 * Calculate input width from the last accessed input 1181 * pixel given resized height and scaling coefficients. 1182 * Align to IDMAC restrictions. 1183 */ 1184 last_output = round_up(resized_height, 2) - 1; 1185 if (closest) 1186 last_output++; 1187 in_tile->height = round_up( 1188 (DIV_ROUND_UP(last_output * resize_coeff_v, 1189 8192) + 1) 1190 << ctx->downsize_coeff_v, 2); 1191 } 1192 1193 ctx->resize_coeffs_v[row] = resize_coeff_v; 1194 } 1195 } 1196 1197 /* 1198 * return the number of runs in given queue (pending_q or done_q) 1199 * for this context. hold irqlock when calling. 1200 */ 1201 static int get_run_count(struct ipu_image_convert_ctx *ctx, 1202 struct list_head *q) 1203 { 1204 struct ipu_image_convert_run *run; 1205 int count = 0; 1206 1207 lockdep_assert_held(&ctx->chan->irqlock); 1208 1209 list_for_each_entry(run, q, list) { 1210 if (run->ctx == ctx) 1211 count++; 1212 } 1213 1214 return count; 1215 } 1216 1217 static void convert_stop(struct ipu_image_convert_run *run) 1218 { 1219 struct ipu_image_convert_ctx *ctx = run->ctx; 1220 struct ipu_image_convert_chan *chan = ctx->chan; 1221 struct ipu_image_convert_priv *priv = chan->priv; 1222 1223 dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n", 1224 __func__, chan->ic_task, ctx, run); 1225 1226 /* disable IC tasks and the channels */ 1227 ipu_ic_task_disable(chan->ic); 1228 ipu_idmac_disable_channel(chan->in_chan); 1229 ipu_idmac_disable_channel(chan->out_chan); 1230 1231 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1232 ipu_idmac_disable_channel(chan->rotation_in_chan); 1233 ipu_idmac_disable_channel(chan->rotation_out_chan); 1234 ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan); 1235 } 1236 1237 ipu_ic_disable(chan->ic); 1238 } 1239 1240 static void init_idmac_channel(struct ipu_image_convert_ctx *ctx, 1241 struct ipuv3_channel *channel, 1242 struct ipu_image_convert_image *image, 1243 enum ipu_rotate_mode rot_mode, 1244 bool rot_swap_width_height, 1245 unsigned int tile) 1246 { 1247 struct ipu_image_convert_chan *chan = ctx->chan; 1248 unsigned int burst_size; 1249 u32 width, height, stride; 1250 dma_addr_t addr0, addr1 = 0; 1251 struct ipu_image tile_image; 1252 unsigned int tile_idx[2]; 1253 1254 if (image->type == IMAGE_CONVERT_OUT) { 1255 tile_idx[0] = ctx->out_tile_map[tile]; 1256 tile_idx[1] = ctx->out_tile_map[1]; 1257 } else { 1258 tile_idx[0] = tile; 1259 tile_idx[1] = 1; 1260 } 1261 1262 if (rot_swap_width_height) { 1263 width = image->tile[tile_idx[0]].height; 1264 height = image->tile[tile_idx[0]].width; 1265 stride = image->tile[tile_idx[0]].rot_stride; 1266 addr0 = ctx->rot_intermediate[0].phys; 1267 if (ctx->double_buffering) 1268 addr1 = ctx->rot_intermediate[1].phys; 1269 } else { 1270 width = image->tile[tile_idx[0]].width; 1271 height = image->tile[tile_idx[0]].height; 1272 stride = image->stride; 1273 addr0 = image->base.phys0 + 1274 image->tile[tile_idx[0]].offset; 1275 if (ctx->double_buffering) 1276 addr1 = image->base.phys0 + 1277 image->tile[tile_idx[1]].offset; 1278 } 1279 1280 ipu_cpmem_zero(channel); 1281 1282 memset(&tile_image, 0, sizeof(tile_image)); 1283 tile_image.pix.width = tile_image.rect.width = width; 1284 tile_image.pix.height = tile_image.rect.height = height; 1285 tile_image.pix.bytesperline = stride; 1286 tile_image.pix.pixelformat = image->fmt->fourcc; 1287 tile_image.phys0 = addr0; 1288 tile_image.phys1 = addr1; 1289 if (image->fmt->planar && !rot_swap_width_height) { 1290 tile_image.u_offset = image->tile[tile_idx[0]].u_off; 1291 tile_image.v_offset = image->tile[tile_idx[0]].v_off; 1292 } 1293 1294 ipu_cpmem_set_image(channel, &tile_image); 1295 1296 if (rot_mode) 1297 ipu_cpmem_set_rotation(channel, rot_mode); 1298 1299 /* 1300 * Skip writing U and V components to odd rows in the output 1301 * channels for planar 4:2:0. 1302 */ 1303 if ((channel == chan->out_chan || 1304 channel == chan->rotation_out_chan) && 1305 image->fmt->planar && image->fmt->uv_height_dec == 2) 1306 ipu_cpmem_skip_odd_chroma_rows(channel); 1307 1308 if (channel == chan->rotation_in_chan || 1309 channel == chan->rotation_out_chan) { 1310 burst_size = 8; 1311 ipu_cpmem_set_block_mode(channel); 1312 } else 1313 burst_size = (width % 16) ? 8 : 16; 1314 1315 ipu_cpmem_set_burstsize(channel, burst_size); 1316 1317 ipu_ic_task_idma_init(chan->ic, channel, width, height, 1318 burst_size, rot_mode); 1319 1320 /* 1321 * Setting a non-zero AXI ID collides with the PRG AXI snooping, so 1322 * only do this when there is no PRG present. 1323 */ 1324 if (!channel->ipu->prg_priv) 1325 ipu_cpmem_set_axi_id(channel, 1); 1326 1327 ipu_idmac_set_double_buffer(channel, ctx->double_buffering); 1328 } 1329 1330 static int convert_start(struct ipu_image_convert_run *run, unsigned int tile) 1331 { 1332 struct ipu_image_convert_ctx *ctx = run->ctx; 1333 struct ipu_image_convert_chan *chan = ctx->chan; 1334 struct ipu_image_convert_priv *priv = chan->priv; 1335 struct ipu_image_convert_image *s_image = &ctx->in; 1336 struct ipu_image_convert_image *d_image = &ctx->out; 1337 unsigned int dst_tile = ctx->out_tile_map[tile]; 1338 unsigned int dest_width, dest_height; 1339 unsigned int col, row; 1340 u32 rsc; 1341 int ret; 1342 1343 dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n", 1344 __func__, chan->ic_task, ctx, run, tile, dst_tile); 1345 1346 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1347 /* swap width/height for resizer */ 1348 dest_width = d_image->tile[dst_tile].height; 1349 dest_height = d_image->tile[dst_tile].width; 1350 } else { 1351 dest_width = d_image->tile[dst_tile].width; 1352 dest_height = d_image->tile[dst_tile].height; 1353 } 1354 1355 row = tile / s_image->num_cols; 1356 col = tile % s_image->num_cols; 1357 1358 rsc = (ctx->downsize_coeff_v << 30) | 1359 (ctx->resize_coeffs_v[row] << 16) | 1360 (ctx->downsize_coeff_h << 14) | 1361 (ctx->resize_coeffs_h[col]); 1362 1363 dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n", 1364 __func__, s_image->tile[tile].width, 1365 s_image->tile[tile].height, dest_width, dest_height, rsc); 1366 1367 /* setup the IC resizer and CSC */ 1368 ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc, 1369 s_image->tile[tile].width, 1370 s_image->tile[tile].height, 1371 dest_width, 1372 dest_height, 1373 rsc); 1374 if (ret) { 1375 dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret); 1376 return ret; 1377 } 1378 1379 /* init the source MEM-->IC PP IDMAC channel */ 1380 init_idmac_channel(ctx, chan->in_chan, s_image, 1381 IPU_ROTATE_NONE, false, tile); 1382 1383 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1384 /* init the IC PP-->MEM IDMAC channel */ 1385 init_idmac_channel(ctx, chan->out_chan, d_image, 1386 IPU_ROTATE_NONE, true, tile); 1387 1388 /* init the MEM-->IC PP ROT IDMAC channel */ 1389 init_idmac_channel(ctx, chan->rotation_in_chan, d_image, 1390 ctx->rot_mode, true, tile); 1391 1392 /* init the destination IC PP ROT-->MEM IDMAC channel */ 1393 init_idmac_channel(ctx, chan->rotation_out_chan, d_image, 1394 IPU_ROTATE_NONE, false, tile); 1395 1396 /* now link IC PP-->MEM to MEM-->IC PP ROT */ 1397 ipu_idmac_link(chan->out_chan, chan->rotation_in_chan); 1398 } else { 1399 /* init the destination IC PP-->MEM IDMAC channel */ 1400 init_idmac_channel(ctx, chan->out_chan, d_image, 1401 ctx->rot_mode, false, tile); 1402 } 1403 1404 /* enable the IC */ 1405 ipu_ic_enable(chan->ic); 1406 1407 /* set buffers ready */ 1408 ipu_idmac_select_buffer(chan->in_chan, 0); 1409 ipu_idmac_select_buffer(chan->out_chan, 0); 1410 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1411 ipu_idmac_select_buffer(chan->rotation_out_chan, 0); 1412 if (ctx->double_buffering) { 1413 ipu_idmac_select_buffer(chan->in_chan, 1); 1414 ipu_idmac_select_buffer(chan->out_chan, 1); 1415 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1416 ipu_idmac_select_buffer(chan->rotation_out_chan, 1); 1417 } 1418 1419 /* enable the channels! */ 1420 ipu_idmac_enable_channel(chan->in_chan); 1421 ipu_idmac_enable_channel(chan->out_chan); 1422 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1423 ipu_idmac_enable_channel(chan->rotation_in_chan); 1424 ipu_idmac_enable_channel(chan->rotation_out_chan); 1425 } 1426 1427 ipu_ic_task_enable(chan->ic); 1428 1429 ipu_cpmem_dump(chan->in_chan); 1430 ipu_cpmem_dump(chan->out_chan); 1431 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1432 ipu_cpmem_dump(chan->rotation_in_chan); 1433 ipu_cpmem_dump(chan->rotation_out_chan); 1434 } 1435 1436 ipu_dump(priv->ipu); 1437 1438 return 0; 1439 } 1440 1441 /* hold irqlock when calling */ 1442 static int do_run(struct ipu_image_convert_run *run) 1443 { 1444 struct ipu_image_convert_ctx *ctx = run->ctx; 1445 struct ipu_image_convert_chan *chan = ctx->chan; 1446 1447 lockdep_assert_held(&chan->irqlock); 1448 1449 ctx->in.base.phys0 = run->in_phys; 1450 ctx->out.base.phys0 = run->out_phys; 1451 1452 ctx->cur_buf_num = 0; 1453 ctx->next_tile = 1; 1454 1455 /* remove run from pending_q and set as current */ 1456 list_del(&run->list); 1457 chan->current_run = run; 1458 1459 return convert_start(run, 0); 1460 } 1461 1462 /* hold irqlock when calling */ 1463 static void run_next(struct ipu_image_convert_chan *chan) 1464 { 1465 struct ipu_image_convert_priv *priv = chan->priv; 1466 struct ipu_image_convert_run *run, *tmp; 1467 int ret; 1468 1469 lockdep_assert_held(&chan->irqlock); 1470 1471 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) { 1472 /* skip contexts that are aborting */ 1473 if (run->ctx->aborting) { 1474 dev_dbg(priv->ipu->dev, 1475 "%s: task %u: skipping aborting ctx %p run %p\n", 1476 __func__, chan->ic_task, run->ctx, run); 1477 continue; 1478 } 1479 1480 ret = do_run(run); 1481 if (!ret) 1482 break; 1483 1484 /* 1485 * something went wrong with start, add the run 1486 * to done q and continue to the next run in the 1487 * pending q. 1488 */ 1489 run->status = ret; 1490 list_add_tail(&run->list, &chan->done_q); 1491 chan->current_run = NULL; 1492 } 1493 } 1494 1495 static void empty_done_q(struct ipu_image_convert_chan *chan) 1496 { 1497 struct ipu_image_convert_priv *priv = chan->priv; 1498 struct ipu_image_convert_run *run; 1499 unsigned long flags; 1500 1501 spin_lock_irqsave(&chan->irqlock, flags); 1502 1503 while (!list_empty(&chan->done_q)) { 1504 run = list_entry(chan->done_q.next, 1505 struct ipu_image_convert_run, 1506 list); 1507 1508 list_del(&run->list); 1509 1510 dev_dbg(priv->ipu->dev, 1511 "%s: task %u: completing ctx %p run %p with %d\n", 1512 __func__, chan->ic_task, run->ctx, run, run->status); 1513 1514 /* call the completion callback and free the run */ 1515 spin_unlock_irqrestore(&chan->irqlock, flags); 1516 run->ctx->complete(run, run->ctx->complete_context); 1517 spin_lock_irqsave(&chan->irqlock, flags); 1518 } 1519 1520 spin_unlock_irqrestore(&chan->irqlock, flags); 1521 } 1522 1523 /* 1524 * the bottom half thread clears out the done_q, calling the 1525 * completion handler for each. 1526 */ 1527 static irqreturn_t do_bh(int irq, void *dev_id) 1528 { 1529 struct ipu_image_convert_chan *chan = dev_id; 1530 struct ipu_image_convert_priv *priv = chan->priv; 1531 struct ipu_image_convert_ctx *ctx; 1532 unsigned long flags; 1533 1534 dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__, 1535 chan->ic_task); 1536 1537 empty_done_q(chan); 1538 1539 spin_lock_irqsave(&chan->irqlock, flags); 1540 1541 /* 1542 * the done_q is cleared out, signal any contexts 1543 * that are aborting that abort can complete. 1544 */ 1545 list_for_each_entry(ctx, &chan->ctx_list, list) { 1546 if (ctx->aborting) { 1547 dev_dbg(priv->ipu->dev, 1548 "%s: task %u: signaling abort for ctx %p\n", 1549 __func__, chan->ic_task, ctx); 1550 complete_all(&ctx->aborted); 1551 } 1552 } 1553 1554 spin_unlock_irqrestore(&chan->irqlock, flags); 1555 1556 dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__, 1557 chan->ic_task); 1558 1559 return IRQ_HANDLED; 1560 } 1561 1562 static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx) 1563 { 1564 unsigned int cur_tile = ctx->next_tile - 1; 1565 unsigned int next_tile = ctx->next_tile; 1566 1567 if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] != 1568 ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] || 1569 ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] != 1570 ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] || 1571 ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width || 1572 ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height || 1573 ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width || 1574 ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height) 1575 return true; 1576 1577 return false; 1578 } 1579 1580 /* hold irqlock when calling */ 1581 static irqreturn_t do_irq(struct ipu_image_convert_run *run) 1582 { 1583 struct ipu_image_convert_ctx *ctx = run->ctx; 1584 struct ipu_image_convert_chan *chan = ctx->chan; 1585 struct ipu_image_tile *src_tile, *dst_tile; 1586 struct ipu_image_convert_image *s_image = &ctx->in; 1587 struct ipu_image_convert_image *d_image = &ctx->out; 1588 struct ipuv3_channel *outch; 1589 unsigned int dst_idx; 1590 1591 lockdep_assert_held(&chan->irqlock); 1592 1593 outch = ipu_rot_mode_is_irt(ctx->rot_mode) ? 1594 chan->rotation_out_chan : chan->out_chan; 1595 1596 /* 1597 * It is difficult to stop the channel DMA before the channels 1598 * enter the paused state. Without double-buffering the channels 1599 * are always in a paused state when the EOF irq occurs, so it 1600 * is safe to stop the channels now. For double-buffering we 1601 * just ignore the abort until the operation completes, when it 1602 * is safe to shut down. 1603 */ 1604 if (ctx->aborting && !ctx->double_buffering) { 1605 convert_stop(run); 1606 run->status = -EIO; 1607 goto done; 1608 } 1609 1610 if (ctx->next_tile == ctx->num_tiles) { 1611 /* 1612 * the conversion is complete 1613 */ 1614 convert_stop(run); 1615 run->status = 0; 1616 goto done; 1617 } 1618 1619 /* 1620 * not done, place the next tile buffers. 1621 */ 1622 if (!ctx->double_buffering) { 1623 if (ic_settings_changed(ctx)) { 1624 convert_stop(run); 1625 convert_start(run, ctx->next_tile); 1626 } else { 1627 src_tile = &s_image->tile[ctx->next_tile]; 1628 dst_idx = ctx->out_tile_map[ctx->next_tile]; 1629 dst_tile = &d_image->tile[dst_idx]; 1630 1631 ipu_cpmem_set_buffer(chan->in_chan, 0, 1632 s_image->base.phys0 + 1633 src_tile->offset); 1634 ipu_cpmem_set_buffer(outch, 0, 1635 d_image->base.phys0 + 1636 dst_tile->offset); 1637 if (s_image->fmt->planar) 1638 ipu_cpmem_set_uv_offset(chan->in_chan, 1639 src_tile->u_off, 1640 src_tile->v_off); 1641 if (d_image->fmt->planar) 1642 ipu_cpmem_set_uv_offset(outch, 1643 dst_tile->u_off, 1644 dst_tile->v_off); 1645 1646 ipu_idmac_select_buffer(chan->in_chan, 0); 1647 ipu_idmac_select_buffer(outch, 0); 1648 } 1649 } else if (ctx->next_tile < ctx->num_tiles - 1) { 1650 1651 src_tile = &s_image->tile[ctx->next_tile + 1]; 1652 dst_idx = ctx->out_tile_map[ctx->next_tile + 1]; 1653 dst_tile = &d_image->tile[dst_idx]; 1654 1655 ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num, 1656 s_image->base.phys0 + src_tile->offset); 1657 ipu_cpmem_set_buffer(outch, ctx->cur_buf_num, 1658 d_image->base.phys0 + dst_tile->offset); 1659 1660 ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num); 1661 ipu_idmac_select_buffer(outch, ctx->cur_buf_num); 1662 1663 ctx->cur_buf_num ^= 1; 1664 } 1665 1666 ctx->next_tile++; 1667 return IRQ_HANDLED; 1668 done: 1669 list_add_tail(&run->list, &chan->done_q); 1670 chan->current_run = NULL; 1671 run_next(chan); 1672 return IRQ_WAKE_THREAD; 1673 } 1674 1675 static irqreturn_t norotate_irq(int irq, void *data) 1676 { 1677 struct ipu_image_convert_chan *chan = data; 1678 struct ipu_image_convert_ctx *ctx; 1679 struct ipu_image_convert_run *run; 1680 unsigned long flags; 1681 irqreturn_t ret; 1682 1683 spin_lock_irqsave(&chan->irqlock, flags); 1684 1685 /* get current run and its context */ 1686 run = chan->current_run; 1687 if (!run) { 1688 ret = IRQ_NONE; 1689 goto out; 1690 } 1691 1692 ctx = run->ctx; 1693 1694 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1695 /* this is a rotation operation, just ignore */ 1696 spin_unlock_irqrestore(&chan->irqlock, flags); 1697 return IRQ_HANDLED; 1698 } 1699 1700 ret = do_irq(run); 1701 out: 1702 spin_unlock_irqrestore(&chan->irqlock, flags); 1703 return ret; 1704 } 1705 1706 static irqreturn_t rotate_irq(int irq, void *data) 1707 { 1708 struct ipu_image_convert_chan *chan = data; 1709 struct ipu_image_convert_priv *priv = chan->priv; 1710 struct ipu_image_convert_ctx *ctx; 1711 struct ipu_image_convert_run *run; 1712 unsigned long flags; 1713 irqreturn_t ret; 1714 1715 spin_lock_irqsave(&chan->irqlock, flags); 1716 1717 /* get current run and its context */ 1718 run = chan->current_run; 1719 if (!run) { 1720 ret = IRQ_NONE; 1721 goto out; 1722 } 1723 1724 ctx = run->ctx; 1725 1726 if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { 1727 /* this was NOT a rotation operation, shouldn't happen */ 1728 dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n"); 1729 spin_unlock_irqrestore(&chan->irqlock, flags); 1730 return IRQ_HANDLED; 1731 } 1732 1733 ret = do_irq(run); 1734 out: 1735 spin_unlock_irqrestore(&chan->irqlock, flags); 1736 return ret; 1737 } 1738 1739 /* 1740 * try to force the completion of runs for this ctx. Called when 1741 * abort wait times out in ipu_image_convert_abort(). 1742 */ 1743 static void force_abort(struct ipu_image_convert_ctx *ctx) 1744 { 1745 struct ipu_image_convert_chan *chan = ctx->chan; 1746 struct ipu_image_convert_run *run; 1747 unsigned long flags; 1748 1749 spin_lock_irqsave(&chan->irqlock, flags); 1750 1751 run = chan->current_run; 1752 if (run && run->ctx == ctx) { 1753 convert_stop(run); 1754 run->status = -EIO; 1755 list_add_tail(&run->list, &chan->done_q); 1756 chan->current_run = NULL; 1757 run_next(chan); 1758 } 1759 1760 spin_unlock_irqrestore(&chan->irqlock, flags); 1761 1762 empty_done_q(chan); 1763 } 1764 1765 static void release_ipu_resources(struct ipu_image_convert_chan *chan) 1766 { 1767 if (chan->out_eof_irq >= 0) 1768 free_irq(chan->out_eof_irq, chan); 1769 if (chan->rot_out_eof_irq >= 0) 1770 free_irq(chan->rot_out_eof_irq, chan); 1771 1772 if (!IS_ERR_OR_NULL(chan->in_chan)) 1773 ipu_idmac_put(chan->in_chan); 1774 if (!IS_ERR_OR_NULL(chan->out_chan)) 1775 ipu_idmac_put(chan->out_chan); 1776 if (!IS_ERR_OR_NULL(chan->rotation_in_chan)) 1777 ipu_idmac_put(chan->rotation_in_chan); 1778 if (!IS_ERR_OR_NULL(chan->rotation_out_chan)) 1779 ipu_idmac_put(chan->rotation_out_chan); 1780 if (!IS_ERR_OR_NULL(chan->ic)) 1781 ipu_ic_put(chan->ic); 1782 1783 chan->in_chan = chan->out_chan = chan->rotation_in_chan = 1784 chan->rotation_out_chan = NULL; 1785 chan->out_eof_irq = chan->rot_out_eof_irq = -1; 1786 } 1787 1788 static int get_ipu_resources(struct ipu_image_convert_chan *chan) 1789 { 1790 const struct ipu_image_convert_dma_chan *dma = chan->dma_ch; 1791 struct ipu_image_convert_priv *priv = chan->priv; 1792 int ret; 1793 1794 /* get IC */ 1795 chan->ic = ipu_ic_get(priv->ipu, chan->ic_task); 1796 if (IS_ERR(chan->ic)) { 1797 dev_err(priv->ipu->dev, "could not acquire IC\n"); 1798 ret = PTR_ERR(chan->ic); 1799 goto err; 1800 } 1801 1802 /* get IDMAC channels */ 1803 chan->in_chan = ipu_idmac_get(priv->ipu, dma->in); 1804 chan->out_chan = ipu_idmac_get(priv->ipu, dma->out); 1805 if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) { 1806 dev_err(priv->ipu->dev, "could not acquire idmac channels\n"); 1807 ret = -EBUSY; 1808 goto err; 1809 } 1810 1811 chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in); 1812 chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out); 1813 if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) { 1814 dev_err(priv->ipu->dev, 1815 "could not acquire idmac rotation channels\n"); 1816 ret = -EBUSY; 1817 goto err; 1818 } 1819 1820 /* acquire the EOF interrupts */ 1821 chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu, 1822 chan->out_chan, 1823 IPU_IRQ_EOF); 1824 1825 ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh, 1826 0, "ipu-ic", chan); 1827 if (ret < 0) { 1828 dev_err(priv->ipu->dev, "could not acquire irq %d\n", 1829 chan->out_eof_irq); 1830 chan->out_eof_irq = -1; 1831 goto err; 1832 } 1833 1834 chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu, 1835 chan->rotation_out_chan, 1836 IPU_IRQ_EOF); 1837 1838 ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh, 1839 0, "ipu-ic", chan); 1840 if (ret < 0) { 1841 dev_err(priv->ipu->dev, "could not acquire irq %d\n", 1842 chan->rot_out_eof_irq); 1843 chan->rot_out_eof_irq = -1; 1844 goto err; 1845 } 1846 1847 return 0; 1848 err: 1849 release_ipu_resources(chan); 1850 return ret; 1851 } 1852 1853 static int fill_image(struct ipu_image_convert_ctx *ctx, 1854 struct ipu_image_convert_image *ic_image, 1855 struct ipu_image *image, 1856 enum ipu_image_convert_type type) 1857 { 1858 struct ipu_image_convert_priv *priv = ctx->chan->priv; 1859 1860 ic_image->base = *image; 1861 ic_image->type = type; 1862 1863 ic_image->fmt = get_format(image->pix.pixelformat); 1864 if (!ic_image->fmt) { 1865 dev_err(priv->ipu->dev, "pixelformat not supported for %s\n", 1866 type == IMAGE_CONVERT_OUT ? "Output" : "Input"); 1867 return -EINVAL; 1868 } 1869 1870 if (ic_image->fmt->planar) 1871 ic_image->stride = ic_image->base.pix.width; 1872 else 1873 ic_image->stride = ic_image->base.pix.bytesperline; 1874 1875 return 0; 1876 } 1877 1878 /* borrowed from drivers/media/v4l2-core/v4l2-common.c */ 1879 static unsigned int clamp_align(unsigned int x, unsigned int min, 1880 unsigned int max, unsigned int align) 1881 { 1882 /* Bits that must be zero to be aligned */ 1883 unsigned int mask = ~((1 << align) - 1); 1884 1885 /* Clamp to aligned min and max */ 1886 x = clamp(x, (min + ~mask) & mask, max & mask); 1887 1888 /* Round to nearest aligned value */ 1889 if (align) 1890 x = (x + (1 << (align - 1))) & mask; 1891 1892 return x; 1893 } 1894 1895 /* Adjusts input/output images to IPU restrictions */ 1896 void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out, 1897 enum ipu_rotate_mode rot_mode) 1898 { 1899 const struct ipu_image_pixfmt *infmt, *outfmt; 1900 u32 w_align_out, h_align_out; 1901 u32 w_align_in, h_align_in; 1902 1903 infmt = get_format(in->pix.pixelformat); 1904 outfmt = get_format(out->pix.pixelformat); 1905 1906 /* set some default pixel formats if needed */ 1907 if (!infmt) { 1908 in->pix.pixelformat = V4L2_PIX_FMT_RGB24; 1909 infmt = get_format(V4L2_PIX_FMT_RGB24); 1910 } 1911 if (!outfmt) { 1912 out->pix.pixelformat = V4L2_PIX_FMT_RGB24; 1913 outfmt = get_format(V4L2_PIX_FMT_RGB24); 1914 } 1915 1916 /* image converter does not handle fields */ 1917 in->pix.field = out->pix.field = V4L2_FIELD_NONE; 1918 1919 /* resizer cannot downsize more than 4:1 */ 1920 if (ipu_rot_mode_is_irt(rot_mode)) { 1921 out->pix.height = max_t(__u32, out->pix.height, 1922 in->pix.width / 4); 1923 out->pix.width = max_t(__u32, out->pix.width, 1924 in->pix.height / 4); 1925 } else { 1926 out->pix.width = max_t(__u32, out->pix.width, 1927 in->pix.width / 4); 1928 out->pix.height = max_t(__u32, out->pix.height, 1929 in->pix.height / 4); 1930 } 1931 1932 /* align input width/height */ 1933 w_align_in = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt, 1934 rot_mode)); 1935 h_align_in = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt, 1936 rot_mode)); 1937 in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, 1938 w_align_in); 1939 in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, 1940 h_align_in); 1941 1942 /* align output width/height */ 1943 w_align_out = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt, 1944 rot_mode)); 1945 h_align_out = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt, 1946 rot_mode)); 1947 out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, 1948 w_align_out); 1949 out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, 1950 h_align_out); 1951 1952 /* set input/output strides and image sizes */ 1953 in->pix.bytesperline = infmt->planar ? 1954 clamp_align(in->pix.width, 2 << w_align_in, MAX_W, 1955 w_align_in) : 1956 clamp_align((in->pix.width * infmt->bpp) >> 3, 1957 ((2 << w_align_in) * infmt->bpp) >> 3, 1958 (MAX_W * infmt->bpp) >> 3, 1959 w_align_in); 1960 in->pix.sizeimage = infmt->planar ? 1961 (in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 : 1962 in->pix.height * in->pix.bytesperline; 1963 out->pix.bytesperline = outfmt->planar ? out->pix.width : 1964 (out->pix.width * outfmt->bpp) >> 3; 1965 out->pix.sizeimage = outfmt->planar ? 1966 (out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 : 1967 out->pix.height * out->pix.bytesperline; 1968 } 1969 EXPORT_SYMBOL_GPL(ipu_image_convert_adjust); 1970 1971 /* 1972 * this is used by ipu_image_convert_prepare() to verify set input and 1973 * output images are valid before starting the conversion. Clients can 1974 * also call it before calling ipu_image_convert_prepare(). 1975 */ 1976 int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out, 1977 enum ipu_rotate_mode rot_mode) 1978 { 1979 struct ipu_image testin, testout; 1980 1981 testin = *in; 1982 testout = *out; 1983 1984 ipu_image_convert_adjust(&testin, &testout, rot_mode); 1985 1986 if (testin.pix.width != in->pix.width || 1987 testin.pix.height != in->pix.height || 1988 testout.pix.width != out->pix.width || 1989 testout.pix.height != out->pix.height) 1990 return -EINVAL; 1991 1992 return 0; 1993 } 1994 EXPORT_SYMBOL_GPL(ipu_image_convert_verify); 1995 1996 /* 1997 * Call ipu_image_convert_prepare() to prepare for the conversion of 1998 * given images and rotation mode. Returns a new conversion context. 1999 */ 2000 struct ipu_image_convert_ctx * 2001 ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 2002 struct ipu_image *in, struct ipu_image *out, 2003 enum ipu_rotate_mode rot_mode, 2004 ipu_image_convert_cb_t complete, 2005 void *complete_context) 2006 { 2007 struct ipu_image_convert_priv *priv = ipu->image_convert_priv; 2008 struct ipu_image_convert_image *s_image, *d_image; 2009 struct ipu_image_convert_chan *chan; 2010 struct ipu_image_convert_ctx *ctx; 2011 unsigned long flags; 2012 unsigned int i; 2013 bool get_res; 2014 int ret; 2015 2016 if (!in || !out || !complete || 2017 (ic_task != IC_TASK_VIEWFINDER && 2018 ic_task != IC_TASK_POST_PROCESSOR)) 2019 return ERR_PTR(-EINVAL); 2020 2021 /* verify the in/out images before continuing */ 2022 ret = ipu_image_convert_verify(in, out, rot_mode); 2023 if (ret) { 2024 dev_err(priv->ipu->dev, "%s: in/out formats invalid\n", 2025 __func__); 2026 return ERR_PTR(ret); 2027 } 2028 2029 chan = &priv->chan[ic_task]; 2030 2031 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 2032 if (!ctx) 2033 return ERR_PTR(-ENOMEM); 2034 2035 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__, 2036 chan->ic_task, ctx); 2037 2038 ctx->chan = chan; 2039 init_completion(&ctx->aborted); 2040 2041 s_image = &ctx->in; 2042 d_image = &ctx->out; 2043 2044 /* set tiling and rotation */ 2045 d_image->num_rows = num_stripes(out->pix.height); 2046 d_image->num_cols = num_stripes(out->pix.width); 2047 if (ipu_rot_mode_is_irt(rot_mode)) { 2048 s_image->num_rows = d_image->num_cols; 2049 s_image->num_cols = d_image->num_rows; 2050 } else { 2051 s_image->num_rows = d_image->num_rows; 2052 s_image->num_cols = d_image->num_cols; 2053 } 2054 2055 ctx->num_tiles = d_image->num_cols * d_image->num_rows; 2056 ctx->rot_mode = rot_mode; 2057 2058 ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN); 2059 if (ret) 2060 goto out_free; 2061 ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT); 2062 if (ret) 2063 goto out_free; 2064 2065 ret = calc_image_resize_coefficients(ctx, in, out); 2066 if (ret) 2067 goto out_free; 2068 2069 calc_out_tile_map(ctx); 2070 2071 find_seams(ctx, s_image, d_image); 2072 2073 calc_tile_dimensions(ctx, s_image); 2074 ret = calc_tile_offsets(ctx, s_image); 2075 if (ret) 2076 goto out_free; 2077 2078 calc_tile_dimensions(ctx, d_image); 2079 ret = calc_tile_offsets(ctx, d_image); 2080 if (ret) 2081 goto out_free; 2082 2083 calc_tile_resize_coefficients(ctx); 2084 2085 ret = ipu_ic_calc_csc(&ctx->csc, 2086 s_image->base.pix.ycbcr_enc, 2087 s_image->base.pix.quantization, 2088 ipu_pixelformat_to_colorspace(s_image->fmt->fourcc), 2089 d_image->base.pix.ycbcr_enc, 2090 d_image->base.pix.quantization, 2091 ipu_pixelformat_to_colorspace(d_image->fmt->fourcc)); 2092 if (ret) 2093 goto out_free; 2094 2095 dump_format(ctx, s_image); 2096 dump_format(ctx, d_image); 2097 2098 ctx->complete = complete; 2099 ctx->complete_context = complete_context; 2100 2101 /* 2102 * Can we use double-buffering for this operation? If there is 2103 * only one tile (the whole image can be converted in a single 2104 * operation) there's no point in using double-buffering. Also, 2105 * the IPU's IDMAC channels allow only a single U and V plane 2106 * offset shared between both buffers, but these offsets change 2107 * for every tile, and therefore would have to be updated for 2108 * each buffer which is not possible. So double-buffering is 2109 * impossible when either the source or destination images are 2110 * a planar format (YUV420, YUV422P, etc.). Further, differently 2111 * sized tiles or different resizing coefficients per tile 2112 * prevent double-buffering as well. 2113 */ 2114 ctx->double_buffering = (ctx->num_tiles > 1 && 2115 !s_image->fmt->planar && 2116 !d_image->fmt->planar); 2117 for (i = 1; i < ctx->num_tiles; i++) { 2118 if (ctx->in.tile[i].width != ctx->in.tile[0].width || 2119 ctx->in.tile[i].height != ctx->in.tile[0].height || 2120 ctx->out.tile[i].width != ctx->out.tile[0].width || 2121 ctx->out.tile[i].height != ctx->out.tile[0].height) { 2122 ctx->double_buffering = false; 2123 break; 2124 } 2125 } 2126 for (i = 1; i < ctx->in.num_cols; i++) { 2127 if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) { 2128 ctx->double_buffering = false; 2129 break; 2130 } 2131 } 2132 for (i = 1; i < ctx->in.num_rows; i++) { 2133 if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) { 2134 ctx->double_buffering = false; 2135 break; 2136 } 2137 } 2138 2139 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 2140 unsigned long intermediate_size = d_image->tile[0].size; 2141 2142 for (i = 1; i < ctx->num_tiles; i++) { 2143 if (d_image->tile[i].size > intermediate_size) 2144 intermediate_size = d_image->tile[i].size; 2145 } 2146 2147 ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0], 2148 intermediate_size); 2149 if (ret) 2150 goto out_free; 2151 if (ctx->double_buffering) { 2152 ret = alloc_dma_buf(priv, 2153 &ctx->rot_intermediate[1], 2154 intermediate_size); 2155 if (ret) 2156 goto out_free_dmabuf0; 2157 } 2158 } 2159 2160 spin_lock_irqsave(&chan->irqlock, flags); 2161 2162 get_res = list_empty(&chan->ctx_list); 2163 2164 list_add_tail(&ctx->list, &chan->ctx_list); 2165 2166 spin_unlock_irqrestore(&chan->irqlock, flags); 2167 2168 if (get_res) { 2169 ret = get_ipu_resources(chan); 2170 if (ret) 2171 goto out_free_dmabuf1; 2172 } 2173 2174 return ctx; 2175 2176 out_free_dmabuf1: 2177 free_dma_buf(priv, &ctx->rot_intermediate[1]); 2178 spin_lock_irqsave(&chan->irqlock, flags); 2179 list_del(&ctx->list); 2180 spin_unlock_irqrestore(&chan->irqlock, flags); 2181 out_free_dmabuf0: 2182 free_dma_buf(priv, &ctx->rot_intermediate[0]); 2183 out_free: 2184 kfree(ctx); 2185 return ERR_PTR(ret); 2186 } 2187 EXPORT_SYMBOL_GPL(ipu_image_convert_prepare); 2188 2189 /* 2190 * Carry out a single image conversion run. Only the physaddr's of the input 2191 * and output image buffers are needed. The conversion context must have 2192 * been created previously with ipu_image_convert_prepare(). 2193 */ 2194 int ipu_image_convert_queue(struct ipu_image_convert_run *run) 2195 { 2196 struct ipu_image_convert_chan *chan; 2197 struct ipu_image_convert_priv *priv; 2198 struct ipu_image_convert_ctx *ctx; 2199 unsigned long flags; 2200 int ret = 0; 2201 2202 if (!run || !run->ctx || !run->in_phys || !run->out_phys) 2203 return -EINVAL; 2204 2205 ctx = run->ctx; 2206 chan = ctx->chan; 2207 priv = chan->priv; 2208 2209 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__, 2210 chan->ic_task, ctx, run); 2211 2212 INIT_LIST_HEAD(&run->list); 2213 2214 spin_lock_irqsave(&chan->irqlock, flags); 2215 2216 if (ctx->aborting) { 2217 ret = -EIO; 2218 goto unlock; 2219 } 2220 2221 list_add_tail(&run->list, &chan->pending_q); 2222 2223 if (!chan->current_run) { 2224 ret = do_run(run); 2225 if (ret) 2226 chan->current_run = NULL; 2227 } 2228 unlock: 2229 spin_unlock_irqrestore(&chan->irqlock, flags); 2230 return ret; 2231 } 2232 EXPORT_SYMBOL_GPL(ipu_image_convert_queue); 2233 2234 /* Abort any active or pending conversions for this context */ 2235 static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) 2236 { 2237 struct ipu_image_convert_chan *chan = ctx->chan; 2238 struct ipu_image_convert_priv *priv = chan->priv; 2239 struct ipu_image_convert_run *run, *active_run, *tmp; 2240 unsigned long flags; 2241 int run_count, ret; 2242 2243 spin_lock_irqsave(&chan->irqlock, flags); 2244 2245 /* move all remaining pending runs in this context to done_q */ 2246 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) { 2247 if (run->ctx != ctx) 2248 continue; 2249 run->status = -EIO; 2250 list_move_tail(&run->list, &chan->done_q); 2251 } 2252 2253 run_count = get_run_count(ctx, &chan->done_q); 2254 active_run = (chan->current_run && chan->current_run->ctx == ctx) ? 2255 chan->current_run : NULL; 2256 2257 if (active_run) 2258 reinit_completion(&ctx->aborted); 2259 2260 ctx->aborting = true; 2261 2262 spin_unlock_irqrestore(&chan->irqlock, flags); 2263 2264 if (!run_count && !active_run) { 2265 dev_dbg(priv->ipu->dev, 2266 "%s: task %u: no abort needed for ctx %p\n", 2267 __func__, chan->ic_task, ctx); 2268 return; 2269 } 2270 2271 if (!active_run) { 2272 empty_done_q(chan); 2273 return; 2274 } 2275 2276 dev_dbg(priv->ipu->dev, 2277 "%s: task %u: wait for completion: %d runs\n", 2278 __func__, chan->ic_task, run_count); 2279 2280 ret = wait_for_completion_timeout(&ctx->aborted, 2281 msecs_to_jiffies(10000)); 2282 if (ret == 0) { 2283 dev_warn(priv->ipu->dev, "%s: timeout\n", __func__); 2284 force_abort(ctx); 2285 } 2286 } 2287 2288 void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) 2289 { 2290 __ipu_image_convert_abort(ctx); 2291 ctx->aborting = false; 2292 } 2293 EXPORT_SYMBOL_GPL(ipu_image_convert_abort); 2294 2295 /* Unprepare image conversion context */ 2296 void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx) 2297 { 2298 struct ipu_image_convert_chan *chan = ctx->chan; 2299 struct ipu_image_convert_priv *priv = chan->priv; 2300 unsigned long flags; 2301 bool put_res; 2302 2303 /* make sure no runs are hanging around */ 2304 __ipu_image_convert_abort(ctx); 2305 2306 dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__, 2307 chan->ic_task, ctx); 2308 2309 spin_lock_irqsave(&chan->irqlock, flags); 2310 2311 list_del(&ctx->list); 2312 2313 put_res = list_empty(&chan->ctx_list); 2314 2315 spin_unlock_irqrestore(&chan->irqlock, flags); 2316 2317 if (put_res) 2318 release_ipu_resources(chan); 2319 2320 free_dma_buf(priv, &ctx->rot_intermediate[1]); 2321 free_dma_buf(priv, &ctx->rot_intermediate[0]); 2322 2323 kfree(ctx); 2324 } 2325 EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare); 2326 2327 /* 2328 * "Canned" asynchronous single image conversion. Allocates and returns 2329 * a new conversion run. On successful return the caller must free the 2330 * run and call ipu_image_convert_unprepare() after conversion completes. 2331 */ 2332 struct ipu_image_convert_run * 2333 ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 2334 struct ipu_image *in, struct ipu_image *out, 2335 enum ipu_rotate_mode rot_mode, 2336 ipu_image_convert_cb_t complete, 2337 void *complete_context) 2338 { 2339 struct ipu_image_convert_ctx *ctx; 2340 struct ipu_image_convert_run *run; 2341 int ret; 2342 2343 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode, 2344 complete, complete_context); 2345 if (IS_ERR(ctx)) 2346 return ERR_CAST(ctx); 2347 2348 run = kzalloc(sizeof(*run), GFP_KERNEL); 2349 if (!run) { 2350 ipu_image_convert_unprepare(ctx); 2351 return ERR_PTR(-ENOMEM); 2352 } 2353 2354 run->ctx = ctx; 2355 run->in_phys = in->phys0; 2356 run->out_phys = out->phys0; 2357 2358 ret = ipu_image_convert_queue(run); 2359 if (ret) { 2360 ipu_image_convert_unprepare(ctx); 2361 kfree(run); 2362 return ERR_PTR(ret); 2363 } 2364 2365 return run; 2366 } 2367 EXPORT_SYMBOL_GPL(ipu_image_convert); 2368 2369 /* "Canned" synchronous single image conversion */ 2370 static void image_convert_sync_complete(struct ipu_image_convert_run *run, 2371 void *data) 2372 { 2373 struct completion *comp = data; 2374 2375 complete(comp); 2376 } 2377 2378 int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 2379 struct ipu_image *in, struct ipu_image *out, 2380 enum ipu_rotate_mode rot_mode) 2381 { 2382 struct ipu_image_convert_run *run; 2383 struct completion comp; 2384 int ret; 2385 2386 init_completion(&comp); 2387 2388 run = ipu_image_convert(ipu, ic_task, in, out, rot_mode, 2389 image_convert_sync_complete, &comp); 2390 if (IS_ERR(run)) 2391 return PTR_ERR(run); 2392 2393 ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000)); 2394 ret = (ret == 0) ? -ETIMEDOUT : 0; 2395 2396 ipu_image_convert_unprepare(run->ctx); 2397 kfree(run); 2398 2399 return ret; 2400 } 2401 EXPORT_SYMBOL_GPL(ipu_image_convert_sync); 2402 2403 int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev) 2404 { 2405 struct ipu_image_convert_priv *priv; 2406 int i; 2407 2408 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 2409 if (!priv) 2410 return -ENOMEM; 2411 2412 ipu->image_convert_priv = priv; 2413 priv->ipu = ipu; 2414 2415 for (i = 0; i < IC_NUM_TASKS; i++) { 2416 struct ipu_image_convert_chan *chan = &priv->chan[i]; 2417 2418 chan->ic_task = i; 2419 chan->priv = priv; 2420 chan->dma_ch = &image_convert_dma_chan[i]; 2421 chan->out_eof_irq = -1; 2422 chan->rot_out_eof_irq = -1; 2423 2424 spin_lock_init(&chan->irqlock); 2425 INIT_LIST_HEAD(&chan->ctx_list); 2426 INIT_LIST_HEAD(&chan->pending_q); 2427 INIT_LIST_HEAD(&chan->done_q); 2428 } 2429 2430 return 0; 2431 } 2432 2433 void ipu_image_convert_exit(struct ipu_soc *ipu) 2434 { 2435 } 2436