1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2012-2016 Mentor Graphics Inc. 4 * 5 * Queued image conversion support, with tiling and rotation. 6 */ 7 8 #include <linux/interrupt.h> 9 #include <linux/dma-mapping.h> 10 #include <video/imx-ipu-image-convert.h> 11 #include "ipu-prv.h" 12 13 /* 14 * The IC Resizer has a restriction that the output frame from the 15 * resizer must be 1024 or less in both width (pixels) and height 16 * (lines). 17 * 18 * The image converter attempts to split up a conversion when 19 * the desired output (converted) frame resolution exceeds the 20 * IC resizer limit of 1024 in either dimension. 21 * 22 * If either dimension of the output frame exceeds the limit, the 23 * dimension is split into 1, 2, or 4 equal stripes, for a maximum 24 * of 4*4 or 16 tiles. A conversion is then carried out for each 25 * tile (but taking care to pass the full frame stride length to 26 * the DMA channel's parameter memory!). IDMA double-buffering is used 27 * to convert each tile back-to-back when possible (see note below 28 * when double_buffering boolean is set). 29 * 30 * Note that the input frame must be split up into the same number 31 * of tiles as the output frame: 32 * 33 * +---------+-----+ 34 * +-----+---+ | A | B | 35 * | A | B | | | | 36 * +-----+---+ --> +---------+-----+ 37 * | C | D | | C | D | 38 * +-----+---+ | | | 39 * +---------+-----+ 40 * 41 * Clockwise 90° rotations are handled by first rescaling into a 42 * reusable temporary tile buffer and then rotating with the 8x8 43 * block rotator, writing to the correct destination: 44 * 45 * +-----+-----+ 46 * | | | 47 * +-----+---+ +---------+ | C | A | 48 * | A | B | | A,B, | | | | | 49 * +-----+---+ --> | C,D | | --> | | | 50 * | C | D | +---------+ +-----+-----+ 51 * +-----+---+ | D | B | 52 * | | | 53 * +-----+-----+ 54 * 55 * If the 8x8 block rotator is used, horizontal or vertical flipping 56 * is done during the rotation step, otherwise flipping is done 57 * during the scaling step. 58 * With rotation or flipping, tile order changes between input and 59 * output image. Tiles are numbered row major from top left to bottom 60 * right for both input and output image. 61 */ 62 63 #define MAX_STRIPES_W 4 64 #define MAX_STRIPES_H 4 65 #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H) 66 67 #define MIN_W 16 68 #define MIN_H 8 69 #define MAX_W 4096 70 #define MAX_H 4096 71 72 enum ipu_image_convert_type { 73 IMAGE_CONVERT_IN = 0, 74 IMAGE_CONVERT_OUT, 75 }; 76 77 struct ipu_image_convert_dma_buf { 78 void *virt; 79 dma_addr_t phys; 80 unsigned long len; 81 }; 82 83 struct ipu_image_convert_dma_chan { 84 int in; 85 int out; 86 int rot_in; 87 int rot_out; 88 int vdi_in_p; 89 int vdi_in; 90 int vdi_in_n; 91 }; 92 93 /* dimensions of one tile */ 94 struct ipu_image_tile { 95 u32 width; 96 u32 height; 97 u32 left; 98 u32 top; 99 /* size and strides are in bytes */ 100 u32 size; 101 u32 stride; 102 u32 rot_stride; 103 /* start Y or packed offset of this tile */ 104 u32 offset; 105 /* offset from start to tile in U plane, for planar formats */ 106 u32 u_off; 107 /* offset from start to tile in V plane, for planar formats */ 108 u32 v_off; 109 }; 110 111 struct ipu_image_convert_image { 112 struct ipu_image base; 113 enum ipu_image_convert_type type; 114 115 const struct ipu_image_pixfmt *fmt; 116 unsigned int stride; 117 118 /* # of rows (horizontal stripes) if dest height is > 1024 */ 119 unsigned int num_rows; 120 /* # of columns (vertical stripes) if dest width is > 1024 */ 121 unsigned int num_cols; 122 123 struct ipu_image_tile tile[MAX_TILES]; 124 }; 125 126 struct ipu_image_pixfmt { 127 u32 fourcc; /* V4L2 fourcc */ 128 int bpp; /* total bpp */ 129 int uv_width_dec; /* decimation in width for U/V planes */ 130 int uv_height_dec; /* decimation in height for U/V planes */ 131 bool planar; /* planar format */ 132 bool uv_swapped; /* U and V planes are swapped */ 133 bool uv_packed; /* partial planar (U and V in same plane) */ 134 }; 135 136 struct ipu_image_convert_ctx; 137 struct ipu_image_convert_chan; 138 struct ipu_image_convert_priv; 139 140 struct ipu_image_convert_ctx { 141 struct ipu_image_convert_chan *chan; 142 143 ipu_image_convert_cb_t complete; 144 void *complete_context; 145 146 /* Source/destination image data and rotation mode */ 147 struct ipu_image_convert_image in; 148 struct ipu_image_convert_image out; 149 struct ipu_ic_csc csc; 150 enum ipu_rotate_mode rot_mode; 151 u32 downsize_coeff_h; 152 u32 downsize_coeff_v; 153 u32 image_resize_coeff_h; 154 u32 image_resize_coeff_v; 155 u32 resize_coeffs_h[MAX_STRIPES_W]; 156 u32 resize_coeffs_v[MAX_STRIPES_H]; 157 158 /* intermediate buffer for rotation */ 159 struct ipu_image_convert_dma_buf rot_intermediate[2]; 160 161 /* current buffer number for double buffering */ 162 int cur_buf_num; 163 164 bool aborting; 165 struct completion aborted; 166 167 /* can we use double-buffering for this conversion operation? */ 168 bool double_buffering; 169 /* num_rows * num_cols */ 170 unsigned int num_tiles; 171 /* next tile to process */ 172 unsigned int next_tile; 173 /* where to place converted tile in dest image */ 174 unsigned int out_tile_map[MAX_TILES]; 175 176 struct list_head list; 177 }; 178 179 struct ipu_image_convert_chan { 180 struct ipu_image_convert_priv *priv; 181 182 enum ipu_ic_task ic_task; 183 const struct ipu_image_convert_dma_chan *dma_ch; 184 185 struct ipu_ic *ic; 186 struct ipuv3_channel *in_chan; 187 struct ipuv3_channel *out_chan; 188 struct ipuv3_channel *rotation_in_chan; 189 struct ipuv3_channel *rotation_out_chan; 190 191 /* the IPU end-of-frame irqs */ 192 int out_eof_irq; 193 int rot_out_eof_irq; 194 195 spinlock_t irqlock; 196 197 /* list of convert contexts */ 198 struct list_head ctx_list; 199 /* queue of conversion runs */ 200 struct list_head pending_q; 201 /* queue of completed runs */ 202 struct list_head done_q; 203 204 /* the current conversion run */ 205 struct ipu_image_convert_run *current_run; 206 }; 207 208 struct ipu_image_convert_priv { 209 struct ipu_image_convert_chan chan[IC_NUM_TASKS]; 210 struct ipu_soc *ipu; 211 }; 212 213 static const struct ipu_image_convert_dma_chan 214 image_convert_dma_chan[IC_NUM_TASKS] = { 215 [IC_TASK_VIEWFINDER] = { 216 .in = IPUV3_CHANNEL_MEM_IC_PRP_VF, 217 .out = IPUV3_CHANNEL_IC_PRP_VF_MEM, 218 .rot_in = IPUV3_CHANNEL_MEM_ROT_VF, 219 .rot_out = IPUV3_CHANNEL_ROT_VF_MEM, 220 .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV, 221 .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR, 222 .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT, 223 }, 224 [IC_TASK_POST_PROCESSOR] = { 225 .in = IPUV3_CHANNEL_MEM_IC_PP, 226 .out = IPUV3_CHANNEL_IC_PP_MEM, 227 .rot_in = IPUV3_CHANNEL_MEM_ROT_PP, 228 .rot_out = IPUV3_CHANNEL_ROT_PP_MEM, 229 }, 230 }; 231 232 static const struct ipu_image_pixfmt image_convert_formats[] = { 233 { 234 .fourcc = V4L2_PIX_FMT_RGB565, 235 .bpp = 16, 236 }, { 237 .fourcc = V4L2_PIX_FMT_RGB24, 238 .bpp = 24, 239 }, { 240 .fourcc = V4L2_PIX_FMT_BGR24, 241 .bpp = 24, 242 }, { 243 .fourcc = V4L2_PIX_FMT_RGB32, 244 .bpp = 32, 245 }, { 246 .fourcc = V4L2_PIX_FMT_BGR32, 247 .bpp = 32, 248 }, { 249 .fourcc = V4L2_PIX_FMT_XRGB32, 250 .bpp = 32, 251 }, { 252 .fourcc = V4L2_PIX_FMT_XBGR32, 253 .bpp = 32, 254 }, { 255 .fourcc = V4L2_PIX_FMT_YUYV, 256 .bpp = 16, 257 .uv_width_dec = 2, 258 .uv_height_dec = 1, 259 }, { 260 .fourcc = V4L2_PIX_FMT_UYVY, 261 .bpp = 16, 262 .uv_width_dec = 2, 263 .uv_height_dec = 1, 264 }, { 265 .fourcc = V4L2_PIX_FMT_YUV420, 266 .bpp = 12, 267 .planar = true, 268 .uv_width_dec = 2, 269 .uv_height_dec = 2, 270 }, { 271 .fourcc = V4L2_PIX_FMT_YVU420, 272 .bpp = 12, 273 .planar = true, 274 .uv_width_dec = 2, 275 .uv_height_dec = 2, 276 .uv_swapped = true, 277 }, { 278 .fourcc = V4L2_PIX_FMT_NV12, 279 .bpp = 12, 280 .planar = true, 281 .uv_width_dec = 2, 282 .uv_height_dec = 2, 283 .uv_packed = true, 284 }, { 285 .fourcc = V4L2_PIX_FMT_YUV422P, 286 .bpp = 16, 287 .planar = true, 288 .uv_width_dec = 2, 289 .uv_height_dec = 1, 290 }, { 291 .fourcc = V4L2_PIX_FMT_NV16, 292 .bpp = 16, 293 .planar = true, 294 .uv_width_dec = 2, 295 .uv_height_dec = 1, 296 .uv_packed = true, 297 }, 298 }; 299 300 static const struct ipu_image_pixfmt *get_format(u32 fourcc) 301 { 302 const struct ipu_image_pixfmt *ret = NULL; 303 unsigned int i; 304 305 for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) { 306 if (image_convert_formats[i].fourcc == fourcc) { 307 ret = &image_convert_formats[i]; 308 break; 309 } 310 } 311 312 return ret; 313 } 314 315 static void dump_format(struct ipu_image_convert_ctx *ctx, 316 struct ipu_image_convert_image *ic_image) 317 { 318 struct ipu_image_convert_chan *chan = ctx->chan; 319 struct ipu_image_convert_priv *priv = chan->priv; 320 321 dev_dbg(priv->ipu->dev, 322 "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n", 323 chan->ic_task, ctx, 324 ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input", 325 ic_image->base.pix.width, ic_image->base.pix.height, 326 ic_image->num_cols, ic_image->num_rows, 327 ic_image->fmt->fourcc & 0xff, 328 (ic_image->fmt->fourcc >> 8) & 0xff, 329 (ic_image->fmt->fourcc >> 16) & 0xff, 330 (ic_image->fmt->fourcc >> 24) & 0xff); 331 } 332 333 int ipu_image_convert_enum_format(int index, u32 *fourcc) 334 { 335 const struct ipu_image_pixfmt *fmt; 336 337 if (index >= (int)ARRAY_SIZE(image_convert_formats)) 338 return -EINVAL; 339 340 /* Format found */ 341 fmt = &image_convert_formats[index]; 342 *fourcc = fmt->fourcc; 343 return 0; 344 } 345 EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format); 346 347 static void free_dma_buf(struct ipu_image_convert_priv *priv, 348 struct ipu_image_convert_dma_buf *buf) 349 { 350 if (buf->virt) 351 dma_free_coherent(priv->ipu->dev, 352 buf->len, buf->virt, buf->phys); 353 buf->virt = NULL; 354 buf->phys = 0; 355 } 356 357 static int alloc_dma_buf(struct ipu_image_convert_priv *priv, 358 struct ipu_image_convert_dma_buf *buf, 359 int size) 360 { 361 buf->len = PAGE_ALIGN(size); 362 buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys, 363 GFP_DMA | GFP_KERNEL); 364 if (!buf->virt) { 365 dev_err(priv->ipu->dev, "failed to alloc dma buffer\n"); 366 return -ENOMEM; 367 } 368 369 return 0; 370 } 371 372 static inline int num_stripes(int dim) 373 { 374 return (dim - 1) / 1024 + 1; 375 } 376 377 /* 378 * Calculate downsizing coefficients, which are the same for all tiles, 379 * and bilinear resizing coefficients, which are used to find the best 380 * seam positions. 381 */ 382 static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx, 383 struct ipu_image *in, 384 struct ipu_image *out) 385 { 386 u32 downsized_width = in->rect.width; 387 u32 downsized_height = in->rect.height; 388 u32 downsize_coeff_v = 0; 389 u32 downsize_coeff_h = 0; 390 u32 resized_width = out->rect.width; 391 u32 resized_height = out->rect.height; 392 u32 resize_coeff_h; 393 u32 resize_coeff_v; 394 395 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 396 resized_width = out->rect.height; 397 resized_height = out->rect.width; 398 } 399 400 /* Do not let invalid input lead to an endless loop below */ 401 if (WARN_ON(resized_width == 0 || resized_height == 0)) 402 return -EINVAL; 403 404 while (downsized_width > 1024 || 405 downsized_width >= resized_width * 2) { 406 downsized_width >>= 1; 407 downsize_coeff_h++; 408 } 409 410 while (downsized_height > 1024 || 411 downsized_height >= resized_height * 2) { 412 downsized_height >>= 1; 413 downsize_coeff_v++; 414 } 415 416 /* 417 * Calculate the bilinear resizing coefficients that could be used if 418 * we were converting with a single tile. The bottom right output pixel 419 * should sample as close as possible to the bottom right input pixel 420 * out of the decimator, but not overshoot it: 421 */ 422 resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1); 423 resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1); 424 425 dev_dbg(ctx->chan->priv->ipu->dev, 426 "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n", 427 __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v, 428 resize_coeff_v, ctx->in.num_cols, ctx->in.num_rows); 429 430 if (downsize_coeff_h > 2 || downsize_coeff_v > 2 || 431 resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff) 432 return -EINVAL; 433 434 ctx->downsize_coeff_h = downsize_coeff_h; 435 ctx->downsize_coeff_v = downsize_coeff_v; 436 ctx->image_resize_coeff_h = resize_coeff_h; 437 ctx->image_resize_coeff_v = resize_coeff_v; 438 439 return 0; 440 } 441 442 #define round_closest(x, y) round_down((x) + (y)/2, (y)) 443 444 /* 445 * Find the best aligned seam position in the inverval [out_start, out_end]. 446 * Rotation and image offsets are out of scope. 447 * 448 * @out_start: start of inverval, must be within 1024 pixels / lines 449 * of out_end 450 * @out_end: end of interval, smaller than or equal to out_edge 451 * @in_edge: input right / bottom edge 452 * @out_edge: output right / bottom edge 453 * @in_align: input alignment, either horizontal 8-byte line start address 454 * alignment, or pixel alignment due to image format 455 * @out_align: output alignment, either horizontal 8-byte line start address 456 * alignment, or pixel alignment due to image format or rotator 457 * block size 458 * @in_burst: horizontal input burst size in case of horizontal flip 459 * @out_burst: horizontal output burst size or rotator block size 460 * @downsize_coeff: downsizing section coefficient 461 * @resize_coeff: main processing section resizing coefficient 462 * @_in_seam: aligned input seam position return value 463 * @_out_seam: aligned output seam position return value 464 */ 465 static void find_best_seam(struct ipu_image_convert_ctx *ctx, 466 unsigned int out_start, 467 unsigned int out_end, 468 unsigned int in_edge, 469 unsigned int out_edge, 470 unsigned int in_align, 471 unsigned int out_align, 472 unsigned int in_burst, 473 unsigned int out_burst, 474 unsigned int downsize_coeff, 475 unsigned int resize_coeff, 476 u32 *_in_seam, 477 u32 *_out_seam) 478 { 479 struct device *dev = ctx->chan->priv->ipu->dev; 480 unsigned int out_pos; 481 /* Input / output seam position candidates */ 482 unsigned int out_seam = 0; 483 unsigned int in_seam = 0; 484 unsigned int min_diff = UINT_MAX; 485 486 /* 487 * Output tiles must start at a multiple of 8 bytes horizontally and 488 * possibly at an even line horizontally depending on the pixel format. 489 * Only consider output aligned positions for the seam. 490 */ 491 out_start = round_up(out_start, out_align); 492 for (out_pos = out_start; out_pos < out_end; out_pos += out_align) { 493 unsigned int in_pos; 494 unsigned int in_pos_aligned; 495 unsigned int abs_diff; 496 497 /* 498 * Tiles in the right row / bottom column may not be allowed to 499 * overshoot horizontally / vertically. out_burst may be the 500 * actual DMA burst size, or the rotator block size. 501 */ 502 if ((out_burst > 1) && (out_edge - out_pos) % out_burst) 503 continue; 504 505 /* 506 * Input sample position, corresponding to out_pos, 19.13 fixed 507 * point. 508 */ 509 in_pos = (out_pos * resize_coeff) << downsize_coeff; 510 /* 511 * The closest input sample position that we could actually 512 * start the input tile at, 19.13 fixed point. 513 */ 514 in_pos_aligned = round_closest(in_pos, 8192U * in_align); 515 516 if ((in_burst > 1) && 517 (in_edge - in_pos_aligned / 8192U) % in_burst) 518 continue; 519 520 if (in_pos < in_pos_aligned) 521 abs_diff = in_pos_aligned - in_pos; 522 else 523 abs_diff = in_pos - in_pos_aligned; 524 525 if (abs_diff < min_diff) { 526 in_seam = in_pos_aligned; 527 out_seam = out_pos; 528 min_diff = abs_diff; 529 } 530 } 531 532 *_out_seam = out_seam; 533 /* Convert 19.13 fixed point to integer seam position */ 534 *_in_seam = DIV_ROUND_CLOSEST(in_seam, 8192U); 535 536 dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) diff %u.%03u\n", 537 __func__, out_seam, out_align, out_start, out_end, 538 *_in_seam, in_align, min_diff / 8192, 539 DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192)); 540 } 541 542 /* 543 * Tile left edges are required to be aligned to multiples of 8 bytes 544 * by the IDMAC. 545 */ 546 static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt) 547 { 548 if (fmt->planar) 549 return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec; 550 else 551 return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8; 552 } 553 554 /* 555 * Tile top edge alignment is only limited by chroma subsampling. 556 */ 557 static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt) 558 { 559 return fmt->uv_height_dec > 1 ? 2 : 1; 560 } 561 562 static inline u32 tile_width_align(enum ipu_image_convert_type type, 563 const struct ipu_image_pixfmt *fmt, 564 enum ipu_rotate_mode rot_mode) 565 { 566 if (type == IMAGE_CONVERT_IN) { 567 /* 568 * The IC burst reads 8 pixels at a time. Reading beyond the 569 * end of the line is usually acceptable. Those pixels are 570 * ignored, unless the IC has to write the scaled line in 571 * reverse. 572 */ 573 return (!ipu_rot_mode_is_irt(rot_mode) && 574 (rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2; 575 } 576 577 /* 578 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled 579 * formats to guarantee 8-byte aligned line start addresses in the 580 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size 581 * for all other formats. 582 */ 583 return (ipu_rot_mode_is_irt(rot_mode) && 584 fmt->planar && !fmt->uv_packed) ? 585 8 * fmt->uv_width_dec : 8; 586 } 587 588 static inline u32 tile_height_align(enum ipu_image_convert_type type, 589 const struct ipu_image_pixfmt *fmt, 590 enum ipu_rotate_mode rot_mode) 591 { 592 if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode)) 593 return 2; 594 595 /* 596 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled 597 * formats to guarantee 8-byte aligned line start addresses in the 598 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size 599 * for all other formats. 600 */ 601 return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8; 602 } 603 604 /* 605 * Fill in left position and width and for all tiles in an input column, and 606 * for all corresponding output tiles. If the 90° rotator is used, the output 607 * tiles are in a row, and output tile top position and height are set. 608 */ 609 static void fill_tile_column(struct ipu_image_convert_ctx *ctx, 610 unsigned int col, 611 struct ipu_image_convert_image *in, 612 unsigned int in_left, unsigned int in_width, 613 struct ipu_image_convert_image *out, 614 unsigned int out_left, unsigned int out_width) 615 { 616 unsigned int row, tile_idx; 617 struct ipu_image_tile *in_tile, *out_tile; 618 619 for (row = 0; row < in->num_rows; row++) { 620 tile_idx = in->num_cols * row + col; 621 in_tile = &in->tile[tile_idx]; 622 out_tile = &out->tile[ctx->out_tile_map[tile_idx]]; 623 624 in_tile->left = in_left; 625 in_tile->width = in_width; 626 627 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 628 out_tile->top = out_left; 629 out_tile->height = out_width; 630 } else { 631 out_tile->left = out_left; 632 out_tile->width = out_width; 633 } 634 } 635 } 636 637 /* 638 * Fill in top position and height and for all tiles in an input row, and 639 * for all corresponding output tiles. If the 90° rotator is used, the output 640 * tiles are in a column, and output tile left position and width are set. 641 */ 642 static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row, 643 struct ipu_image_convert_image *in, 644 unsigned int in_top, unsigned int in_height, 645 struct ipu_image_convert_image *out, 646 unsigned int out_top, unsigned int out_height) 647 { 648 unsigned int col, tile_idx; 649 struct ipu_image_tile *in_tile, *out_tile; 650 651 for (col = 0; col < in->num_cols; col++) { 652 tile_idx = in->num_cols * row + col; 653 in_tile = &in->tile[tile_idx]; 654 out_tile = &out->tile[ctx->out_tile_map[tile_idx]]; 655 656 in_tile->top = in_top; 657 in_tile->height = in_height; 658 659 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 660 out_tile->left = out_top; 661 out_tile->width = out_height; 662 } else { 663 out_tile->top = out_top; 664 out_tile->height = out_height; 665 } 666 } 667 } 668 669 /* 670 * Find the best horizontal and vertical seam positions to split into tiles. 671 * Minimize the fractional part of the input sampling position for the 672 * top / left pixels of each tile. 673 */ 674 static void find_seams(struct ipu_image_convert_ctx *ctx, 675 struct ipu_image_convert_image *in, 676 struct ipu_image_convert_image *out) 677 { 678 struct device *dev = ctx->chan->priv->ipu->dev; 679 unsigned int resized_width = out->base.rect.width; 680 unsigned int resized_height = out->base.rect.height; 681 unsigned int col; 682 unsigned int row; 683 unsigned int in_left_align = tile_left_align(in->fmt); 684 unsigned int in_top_align = tile_top_align(in->fmt); 685 unsigned int out_left_align = tile_left_align(out->fmt); 686 unsigned int out_top_align = tile_top_align(out->fmt); 687 unsigned int out_width_align = tile_width_align(out->type, out->fmt, 688 ctx->rot_mode); 689 unsigned int out_height_align = tile_height_align(out->type, out->fmt, 690 ctx->rot_mode); 691 unsigned int in_right = in->base.rect.width; 692 unsigned int in_bottom = in->base.rect.height; 693 unsigned int out_right = out->base.rect.width; 694 unsigned int out_bottom = out->base.rect.height; 695 unsigned int flipped_out_left; 696 unsigned int flipped_out_top; 697 698 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 699 /* Switch width/height and align top left to IRT block size */ 700 resized_width = out->base.rect.height; 701 resized_height = out->base.rect.width; 702 out_left_align = out_height_align; 703 out_top_align = out_width_align; 704 out_width_align = out_left_align; 705 out_height_align = out_top_align; 706 out_right = out->base.rect.height; 707 out_bottom = out->base.rect.width; 708 } 709 710 for (col = in->num_cols - 1; col > 0; col--) { 711 bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) || 712 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP); 713 bool allow_out_overshoot = (col < in->num_cols - 1) && 714 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP); 715 unsigned int out_start; 716 unsigned int out_end; 717 unsigned int in_left; 718 unsigned int out_left; 719 720 /* 721 * Align input width to burst length if the scaling step flips 722 * horizontally. 723 */ 724 725 /* Start within 1024 pixels of the right edge */ 726 out_start = max_t(int, 0, out_right - 1024); 727 /* End before having to add more columns to the left */ 728 out_end = min_t(unsigned int, out_right, col * 1024); 729 730 find_best_seam(ctx, out_start, out_end, 731 in_right, out_right, 732 in_left_align, out_left_align, 733 allow_in_overshoot ? 1 : 8 /* burst length */, 734 allow_out_overshoot ? 1 : out_width_align, 735 ctx->downsize_coeff_h, ctx->image_resize_coeff_h, 736 &in_left, &out_left); 737 738 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP) 739 flipped_out_left = resized_width - out_right; 740 else 741 flipped_out_left = out_left; 742 743 fill_tile_column(ctx, col, in, in_left, in_right - in_left, 744 out, flipped_out_left, out_right - out_left); 745 746 dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col, 747 in_left, in_right - in_left, 748 flipped_out_left, out_right - out_left); 749 750 in_right = in_left; 751 out_right = out_left; 752 } 753 754 flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ? 755 resized_width - out_right : 0; 756 757 fill_tile_column(ctx, 0, in, 0, in_right, 758 out, flipped_out_left, out_right); 759 760 dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__, 761 in_right, flipped_out_left, out_right); 762 763 for (row = in->num_rows - 1; row > 0; row--) { 764 bool allow_overshoot = row < in->num_rows - 1; 765 unsigned int out_start; 766 unsigned int out_end; 767 unsigned int in_top; 768 unsigned int out_top; 769 770 /* Start within 1024 lines of the bottom edge */ 771 out_start = max_t(int, 0, out_bottom - 1024); 772 /* End before having to add more rows above */ 773 out_end = min_t(unsigned int, out_bottom, row * 1024); 774 775 find_best_seam(ctx, out_start, out_end, 776 in_bottom, out_bottom, 777 in_top_align, out_top_align, 778 1, allow_overshoot ? 1 : out_height_align, 779 ctx->downsize_coeff_v, ctx->image_resize_coeff_v, 780 &in_top, &out_top); 781 782 if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^ 783 ipu_rot_mode_is_irt(ctx->rot_mode)) 784 flipped_out_top = resized_height - out_bottom; 785 else 786 flipped_out_top = out_top; 787 788 fill_tile_row(ctx, row, in, in_top, in_bottom - in_top, 789 out, flipped_out_top, out_bottom - out_top); 790 791 dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row, 792 in_top, in_bottom - in_top, 793 flipped_out_top, out_bottom - out_top); 794 795 in_bottom = in_top; 796 out_bottom = out_top; 797 } 798 799 if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^ 800 ipu_rot_mode_is_irt(ctx->rot_mode)) 801 flipped_out_top = resized_height - out_bottom; 802 else 803 flipped_out_top = 0; 804 805 fill_tile_row(ctx, 0, in, 0, in_bottom, 806 out, flipped_out_top, out_bottom); 807 808 dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__, 809 in_bottom, flipped_out_top, out_bottom); 810 } 811 812 static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx, 813 struct ipu_image_convert_image *image) 814 { 815 struct ipu_image_convert_chan *chan = ctx->chan; 816 struct ipu_image_convert_priv *priv = chan->priv; 817 unsigned int i; 818 819 for (i = 0; i < ctx->num_tiles; i++) { 820 struct ipu_image_tile *tile; 821 const unsigned int row = i / image->num_cols; 822 const unsigned int col = i % image->num_cols; 823 824 if (image->type == IMAGE_CONVERT_OUT) 825 tile = &image->tile[ctx->out_tile_map[i]]; 826 else 827 tile = &image->tile[i]; 828 829 tile->size = ((tile->height * image->fmt->bpp) >> 3) * 830 tile->width; 831 832 if (image->fmt->planar) { 833 tile->stride = tile->width; 834 tile->rot_stride = tile->height; 835 } else { 836 tile->stride = 837 (image->fmt->bpp * tile->width) >> 3; 838 tile->rot_stride = 839 (image->fmt->bpp * tile->height) >> 3; 840 } 841 842 dev_dbg(priv->ipu->dev, 843 "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n", 844 chan->ic_task, ctx, 845 image->type == IMAGE_CONVERT_IN ? "Input" : "Output", 846 row, col, 847 tile->width, tile->height, tile->left, tile->top); 848 } 849 } 850 851 /* 852 * Use the rotation transformation to find the tile coordinates 853 * (row, col) of a tile in the destination frame that corresponds 854 * to the given tile coordinates of a source frame. The destination 855 * coordinate is then converted to a tile index. 856 */ 857 static int transform_tile_index(struct ipu_image_convert_ctx *ctx, 858 int src_row, int src_col) 859 { 860 struct ipu_image_convert_chan *chan = ctx->chan; 861 struct ipu_image_convert_priv *priv = chan->priv; 862 struct ipu_image_convert_image *s_image = &ctx->in; 863 struct ipu_image_convert_image *d_image = &ctx->out; 864 int dst_row, dst_col; 865 866 /* with no rotation it's a 1:1 mapping */ 867 if (ctx->rot_mode == IPU_ROTATE_NONE) 868 return src_row * s_image->num_cols + src_col; 869 870 /* 871 * before doing the transform, first we have to translate 872 * source row,col for an origin in the center of s_image 873 */ 874 src_row = src_row * 2 - (s_image->num_rows - 1); 875 src_col = src_col * 2 - (s_image->num_cols - 1); 876 877 /* do the rotation transform */ 878 if (ctx->rot_mode & IPU_ROT_BIT_90) { 879 dst_col = -src_row; 880 dst_row = src_col; 881 } else { 882 dst_col = src_col; 883 dst_row = src_row; 884 } 885 886 /* apply flip */ 887 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP) 888 dst_col = -dst_col; 889 if (ctx->rot_mode & IPU_ROT_BIT_VFLIP) 890 dst_row = -dst_row; 891 892 dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n", 893 chan->ic_task, ctx, src_col, src_row, dst_col, dst_row); 894 895 /* 896 * finally translate dest row,col using an origin in upper 897 * left of d_image 898 */ 899 dst_row += d_image->num_rows - 1; 900 dst_col += d_image->num_cols - 1; 901 dst_row /= 2; 902 dst_col /= 2; 903 904 return dst_row * d_image->num_cols + dst_col; 905 } 906 907 /* 908 * Fill the out_tile_map[] with transformed destination tile indeces. 909 */ 910 static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx) 911 { 912 struct ipu_image_convert_image *s_image = &ctx->in; 913 unsigned int row, col, tile = 0; 914 915 for (row = 0; row < s_image->num_rows; row++) { 916 for (col = 0; col < s_image->num_cols; col++) { 917 ctx->out_tile_map[tile] = 918 transform_tile_index(ctx, row, col); 919 tile++; 920 } 921 } 922 } 923 924 static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx, 925 struct ipu_image_convert_image *image) 926 { 927 struct ipu_image_convert_chan *chan = ctx->chan; 928 struct ipu_image_convert_priv *priv = chan->priv; 929 const struct ipu_image_pixfmt *fmt = image->fmt; 930 unsigned int row, col, tile = 0; 931 u32 H, top, y_stride, uv_stride; 932 u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp; 933 u32 y_row_off, y_col_off, y_off; 934 u32 y_size, uv_size; 935 936 /* setup some convenience vars */ 937 H = image->base.pix.height; 938 939 y_stride = image->stride; 940 uv_stride = y_stride / fmt->uv_width_dec; 941 if (fmt->uv_packed) 942 uv_stride *= 2; 943 944 y_size = H * y_stride; 945 uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec); 946 947 for (row = 0; row < image->num_rows; row++) { 948 top = image->tile[tile].top; 949 y_row_off = top * y_stride; 950 uv_row_off = (top * uv_stride) / fmt->uv_height_dec; 951 952 for (col = 0; col < image->num_cols; col++) { 953 y_col_off = image->tile[tile].left; 954 uv_col_off = y_col_off / fmt->uv_width_dec; 955 if (fmt->uv_packed) 956 uv_col_off *= 2; 957 958 y_off = y_row_off + y_col_off; 959 uv_off = uv_row_off + uv_col_off; 960 961 u_off = y_size - y_off + uv_off; 962 v_off = (fmt->uv_packed) ? 0 : u_off + uv_size; 963 if (fmt->uv_swapped) { 964 tmp = u_off; 965 u_off = v_off; 966 v_off = tmp; 967 } 968 969 image->tile[tile].offset = y_off; 970 image->tile[tile].u_off = u_off; 971 image->tile[tile++].v_off = v_off; 972 973 if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) { 974 dev_err(priv->ipu->dev, 975 "task %u: ctx %p: %s@[%d,%d]: " 976 "y_off %08x, u_off %08x, v_off %08x\n", 977 chan->ic_task, ctx, 978 image->type == IMAGE_CONVERT_IN ? 979 "Input" : "Output", row, col, 980 y_off, u_off, v_off); 981 return -EINVAL; 982 } 983 } 984 } 985 986 return 0; 987 } 988 989 static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx, 990 struct ipu_image_convert_image *image) 991 { 992 struct ipu_image_convert_chan *chan = ctx->chan; 993 struct ipu_image_convert_priv *priv = chan->priv; 994 const struct ipu_image_pixfmt *fmt = image->fmt; 995 unsigned int row, col, tile = 0; 996 u32 bpp, stride, offset; 997 u32 row_off, col_off; 998 999 /* setup some convenience vars */ 1000 stride = image->stride; 1001 bpp = fmt->bpp; 1002 1003 for (row = 0; row < image->num_rows; row++) { 1004 row_off = image->tile[tile].top * stride; 1005 1006 for (col = 0; col < image->num_cols; col++) { 1007 col_off = (image->tile[tile].left * bpp) >> 3; 1008 1009 offset = row_off + col_off; 1010 1011 image->tile[tile].offset = offset; 1012 image->tile[tile].u_off = 0; 1013 image->tile[tile++].v_off = 0; 1014 1015 if (offset & 0x7) { 1016 dev_err(priv->ipu->dev, 1017 "task %u: ctx %p: %s@[%d,%d]: " 1018 "phys %08x\n", 1019 chan->ic_task, ctx, 1020 image->type == IMAGE_CONVERT_IN ? 1021 "Input" : "Output", row, col, 1022 row_off + col_off); 1023 return -EINVAL; 1024 } 1025 } 1026 } 1027 1028 return 0; 1029 } 1030 1031 static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx, 1032 struct ipu_image_convert_image *image) 1033 { 1034 if (image->fmt->planar) 1035 return calc_tile_offsets_planar(ctx, image); 1036 1037 return calc_tile_offsets_packed(ctx, image); 1038 } 1039 1040 /* 1041 * Calculate the resizing ratio for the IC main processing section given input 1042 * size, fixed downsizing coefficient, and output size. 1043 * Either round to closest for the next tile's first pixel to minimize seams 1044 * and distortion (for all but right column / bottom row), or round down to 1045 * avoid sampling beyond the edges of the input image for this tile's last 1046 * pixel. 1047 * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff. 1048 */ 1049 static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff, 1050 u32 output_size, bool allow_overshoot) 1051 { 1052 u32 downsized = input_size >> downsize_coeff; 1053 1054 if (allow_overshoot) 1055 return DIV_ROUND_CLOSEST(8192 * downsized, output_size); 1056 else 1057 return 8192 * (downsized - 1) / (output_size - 1); 1058 } 1059 1060 /* 1061 * Slightly modify resize coefficients per tile to hide the bilinear 1062 * interpolator reset at tile borders, shifting the right / bottom edge 1063 * by up to a half input pixel. This removes noticeable seams between 1064 * tiles at higher upscaling factors. 1065 */ 1066 static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx) 1067 { 1068 struct ipu_image_convert_chan *chan = ctx->chan; 1069 struct ipu_image_convert_priv *priv = chan->priv; 1070 struct ipu_image_tile *in_tile, *out_tile; 1071 unsigned int col, row, tile_idx; 1072 unsigned int last_output; 1073 1074 for (col = 0; col < ctx->in.num_cols; col++) { 1075 bool closest = (col < ctx->in.num_cols - 1) && 1076 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP); 1077 u32 resized_width; 1078 u32 resize_coeff_h; 1079 1080 tile_idx = col; 1081 in_tile = &ctx->in.tile[tile_idx]; 1082 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1083 1084 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1085 resized_width = out_tile->height; 1086 else 1087 resized_width = out_tile->width; 1088 1089 resize_coeff_h = calc_resize_coeff(in_tile->width, 1090 ctx->downsize_coeff_h, 1091 resized_width, closest); 1092 1093 dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n", 1094 __func__, col, resize_coeff_h); 1095 1096 1097 for (row = 0; row < ctx->in.num_rows; row++) { 1098 tile_idx = row * ctx->in.num_cols + col; 1099 in_tile = &ctx->in.tile[tile_idx]; 1100 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1101 1102 /* 1103 * With the horizontal scaling factor known, round up 1104 * resized width (output width or height) to burst size. 1105 */ 1106 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1107 out_tile->height = round_up(resized_width, 8); 1108 else 1109 out_tile->width = round_up(resized_width, 8); 1110 1111 /* 1112 * Calculate input width from the last accessed input 1113 * pixel given resized width and scaling coefficients. 1114 * Round up to burst size. 1115 */ 1116 last_output = round_up(resized_width, 8) - 1; 1117 if (closest) 1118 last_output++; 1119 in_tile->width = round_up( 1120 (DIV_ROUND_UP(last_output * resize_coeff_h, 1121 8192) + 1) 1122 << ctx->downsize_coeff_h, 8); 1123 } 1124 1125 ctx->resize_coeffs_h[col] = resize_coeff_h; 1126 } 1127 1128 for (row = 0; row < ctx->in.num_rows; row++) { 1129 bool closest = (row < ctx->in.num_rows - 1) && 1130 !(ctx->rot_mode & IPU_ROT_BIT_VFLIP); 1131 u32 resized_height; 1132 u32 resize_coeff_v; 1133 1134 tile_idx = row * ctx->in.num_cols; 1135 in_tile = &ctx->in.tile[tile_idx]; 1136 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1137 1138 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1139 resized_height = out_tile->width; 1140 else 1141 resized_height = out_tile->height; 1142 1143 resize_coeff_v = calc_resize_coeff(in_tile->height, 1144 ctx->downsize_coeff_v, 1145 resized_height, closest); 1146 1147 dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n", 1148 __func__, row, resize_coeff_v); 1149 1150 for (col = 0; col < ctx->in.num_cols; col++) { 1151 tile_idx = row * ctx->in.num_cols + col; 1152 in_tile = &ctx->in.tile[tile_idx]; 1153 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1154 1155 /* 1156 * With the vertical scaling factor known, round up 1157 * resized height (output width or height) to IDMAC 1158 * limitations. 1159 */ 1160 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1161 out_tile->width = round_up(resized_height, 2); 1162 else 1163 out_tile->height = round_up(resized_height, 2); 1164 1165 /* 1166 * Calculate input width from the last accessed input 1167 * pixel given resized height and scaling coefficients. 1168 * Align to IDMAC restrictions. 1169 */ 1170 last_output = round_up(resized_height, 2) - 1; 1171 if (closest) 1172 last_output++; 1173 in_tile->height = round_up( 1174 (DIV_ROUND_UP(last_output * resize_coeff_v, 1175 8192) + 1) 1176 << ctx->downsize_coeff_v, 2); 1177 } 1178 1179 ctx->resize_coeffs_v[row] = resize_coeff_v; 1180 } 1181 } 1182 1183 /* 1184 * return the number of runs in given queue (pending_q or done_q) 1185 * for this context. hold irqlock when calling. 1186 */ 1187 static int get_run_count(struct ipu_image_convert_ctx *ctx, 1188 struct list_head *q) 1189 { 1190 struct ipu_image_convert_run *run; 1191 int count = 0; 1192 1193 lockdep_assert_held(&ctx->chan->irqlock); 1194 1195 list_for_each_entry(run, q, list) { 1196 if (run->ctx == ctx) 1197 count++; 1198 } 1199 1200 return count; 1201 } 1202 1203 static void convert_stop(struct ipu_image_convert_run *run) 1204 { 1205 struct ipu_image_convert_ctx *ctx = run->ctx; 1206 struct ipu_image_convert_chan *chan = ctx->chan; 1207 struct ipu_image_convert_priv *priv = chan->priv; 1208 1209 dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n", 1210 __func__, chan->ic_task, ctx, run); 1211 1212 /* disable IC tasks and the channels */ 1213 ipu_ic_task_disable(chan->ic); 1214 ipu_idmac_disable_channel(chan->in_chan); 1215 ipu_idmac_disable_channel(chan->out_chan); 1216 1217 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1218 ipu_idmac_disable_channel(chan->rotation_in_chan); 1219 ipu_idmac_disable_channel(chan->rotation_out_chan); 1220 ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan); 1221 } 1222 1223 ipu_ic_disable(chan->ic); 1224 } 1225 1226 static void init_idmac_channel(struct ipu_image_convert_ctx *ctx, 1227 struct ipuv3_channel *channel, 1228 struct ipu_image_convert_image *image, 1229 enum ipu_rotate_mode rot_mode, 1230 bool rot_swap_width_height, 1231 unsigned int tile) 1232 { 1233 struct ipu_image_convert_chan *chan = ctx->chan; 1234 unsigned int burst_size; 1235 u32 width, height, stride; 1236 dma_addr_t addr0, addr1 = 0; 1237 struct ipu_image tile_image; 1238 unsigned int tile_idx[2]; 1239 1240 if (image->type == IMAGE_CONVERT_OUT) { 1241 tile_idx[0] = ctx->out_tile_map[tile]; 1242 tile_idx[1] = ctx->out_tile_map[1]; 1243 } else { 1244 tile_idx[0] = tile; 1245 tile_idx[1] = 1; 1246 } 1247 1248 if (rot_swap_width_height) { 1249 width = image->tile[tile_idx[0]].height; 1250 height = image->tile[tile_idx[0]].width; 1251 stride = image->tile[tile_idx[0]].rot_stride; 1252 addr0 = ctx->rot_intermediate[0].phys; 1253 if (ctx->double_buffering) 1254 addr1 = ctx->rot_intermediate[1].phys; 1255 } else { 1256 width = image->tile[tile_idx[0]].width; 1257 height = image->tile[tile_idx[0]].height; 1258 stride = image->stride; 1259 addr0 = image->base.phys0 + 1260 image->tile[tile_idx[0]].offset; 1261 if (ctx->double_buffering) 1262 addr1 = image->base.phys0 + 1263 image->tile[tile_idx[1]].offset; 1264 } 1265 1266 ipu_cpmem_zero(channel); 1267 1268 memset(&tile_image, 0, sizeof(tile_image)); 1269 tile_image.pix.width = tile_image.rect.width = width; 1270 tile_image.pix.height = tile_image.rect.height = height; 1271 tile_image.pix.bytesperline = stride; 1272 tile_image.pix.pixelformat = image->fmt->fourcc; 1273 tile_image.phys0 = addr0; 1274 tile_image.phys1 = addr1; 1275 if (image->fmt->planar && !rot_swap_width_height) { 1276 tile_image.u_offset = image->tile[tile_idx[0]].u_off; 1277 tile_image.v_offset = image->tile[tile_idx[0]].v_off; 1278 } 1279 1280 ipu_cpmem_set_image(channel, &tile_image); 1281 1282 if (rot_mode) 1283 ipu_cpmem_set_rotation(channel, rot_mode); 1284 1285 /* 1286 * Skip writing U and V components to odd rows in the output 1287 * channels for planar 4:2:0. 1288 */ 1289 if ((channel == chan->out_chan || 1290 channel == chan->rotation_out_chan) && 1291 image->fmt->planar && image->fmt->uv_height_dec == 2) 1292 ipu_cpmem_skip_odd_chroma_rows(channel); 1293 1294 if (channel == chan->rotation_in_chan || 1295 channel == chan->rotation_out_chan) { 1296 burst_size = 8; 1297 ipu_cpmem_set_block_mode(channel); 1298 } else 1299 burst_size = (width % 16) ? 8 : 16; 1300 1301 ipu_cpmem_set_burstsize(channel, burst_size); 1302 1303 ipu_ic_task_idma_init(chan->ic, channel, width, height, 1304 burst_size, rot_mode); 1305 1306 /* 1307 * Setting a non-zero AXI ID collides with the PRG AXI snooping, so 1308 * only do this when there is no PRG present. 1309 */ 1310 if (!channel->ipu->prg_priv) 1311 ipu_cpmem_set_axi_id(channel, 1); 1312 1313 ipu_idmac_set_double_buffer(channel, ctx->double_buffering); 1314 } 1315 1316 static int convert_start(struct ipu_image_convert_run *run, unsigned int tile) 1317 { 1318 struct ipu_image_convert_ctx *ctx = run->ctx; 1319 struct ipu_image_convert_chan *chan = ctx->chan; 1320 struct ipu_image_convert_priv *priv = chan->priv; 1321 struct ipu_image_convert_image *s_image = &ctx->in; 1322 struct ipu_image_convert_image *d_image = &ctx->out; 1323 unsigned int dst_tile = ctx->out_tile_map[tile]; 1324 unsigned int dest_width, dest_height; 1325 unsigned int col, row; 1326 u32 rsc; 1327 int ret; 1328 1329 dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n", 1330 __func__, chan->ic_task, ctx, run, tile, dst_tile); 1331 1332 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1333 /* swap width/height for resizer */ 1334 dest_width = d_image->tile[dst_tile].height; 1335 dest_height = d_image->tile[dst_tile].width; 1336 } else { 1337 dest_width = d_image->tile[dst_tile].width; 1338 dest_height = d_image->tile[dst_tile].height; 1339 } 1340 1341 row = tile / s_image->num_cols; 1342 col = tile % s_image->num_cols; 1343 1344 rsc = (ctx->downsize_coeff_v << 30) | 1345 (ctx->resize_coeffs_v[row] << 16) | 1346 (ctx->downsize_coeff_h << 14) | 1347 (ctx->resize_coeffs_h[col]); 1348 1349 dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n", 1350 __func__, s_image->tile[tile].width, 1351 s_image->tile[tile].height, dest_width, dest_height, rsc); 1352 1353 /* setup the IC resizer and CSC */ 1354 ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc, 1355 s_image->tile[tile].width, 1356 s_image->tile[tile].height, 1357 dest_width, 1358 dest_height, 1359 rsc); 1360 if (ret) { 1361 dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret); 1362 return ret; 1363 } 1364 1365 /* init the source MEM-->IC PP IDMAC channel */ 1366 init_idmac_channel(ctx, chan->in_chan, s_image, 1367 IPU_ROTATE_NONE, false, tile); 1368 1369 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1370 /* init the IC PP-->MEM IDMAC channel */ 1371 init_idmac_channel(ctx, chan->out_chan, d_image, 1372 IPU_ROTATE_NONE, true, tile); 1373 1374 /* init the MEM-->IC PP ROT IDMAC channel */ 1375 init_idmac_channel(ctx, chan->rotation_in_chan, d_image, 1376 ctx->rot_mode, true, tile); 1377 1378 /* init the destination IC PP ROT-->MEM IDMAC channel */ 1379 init_idmac_channel(ctx, chan->rotation_out_chan, d_image, 1380 IPU_ROTATE_NONE, false, tile); 1381 1382 /* now link IC PP-->MEM to MEM-->IC PP ROT */ 1383 ipu_idmac_link(chan->out_chan, chan->rotation_in_chan); 1384 } else { 1385 /* init the destination IC PP-->MEM IDMAC channel */ 1386 init_idmac_channel(ctx, chan->out_chan, d_image, 1387 ctx->rot_mode, false, tile); 1388 } 1389 1390 /* enable the IC */ 1391 ipu_ic_enable(chan->ic); 1392 1393 /* set buffers ready */ 1394 ipu_idmac_select_buffer(chan->in_chan, 0); 1395 ipu_idmac_select_buffer(chan->out_chan, 0); 1396 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1397 ipu_idmac_select_buffer(chan->rotation_out_chan, 0); 1398 if (ctx->double_buffering) { 1399 ipu_idmac_select_buffer(chan->in_chan, 1); 1400 ipu_idmac_select_buffer(chan->out_chan, 1); 1401 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1402 ipu_idmac_select_buffer(chan->rotation_out_chan, 1); 1403 } 1404 1405 /* enable the channels! */ 1406 ipu_idmac_enable_channel(chan->in_chan); 1407 ipu_idmac_enable_channel(chan->out_chan); 1408 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1409 ipu_idmac_enable_channel(chan->rotation_in_chan); 1410 ipu_idmac_enable_channel(chan->rotation_out_chan); 1411 } 1412 1413 ipu_ic_task_enable(chan->ic); 1414 1415 ipu_cpmem_dump(chan->in_chan); 1416 ipu_cpmem_dump(chan->out_chan); 1417 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1418 ipu_cpmem_dump(chan->rotation_in_chan); 1419 ipu_cpmem_dump(chan->rotation_out_chan); 1420 } 1421 1422 ipu_dump(priv->ipu); 1423 1424 return 0; 1425 } 1426 1427 /* hold irqlock when calling */ 1428 static int do_run(struct ipu_image_convert_run *run) 1429 { 1430 struct ipu_image_convert_ctx *ctx = run->ctx; 1431 struct ipu_image_convert_chan *chan = ctx->chan; 1432 1433 lockdep_assert_held(&chan->irqlock); 1434 1435 ctx->in.base.phys0 = run->in_phys; 1436 ctx->out.base.phys0 = run->out_phys; 1437 1438 ctx->cur_buf_num = 0; 1439 ctx->next_tile = 1; 1440 1441 /* remove run from pending_q and set as current */ 1442 list_del(&run->list); 1443 chan->current_run = run; 1444 1445 return convert_start(run, 0); 1446 } 1447 1448 /* hold irqlock when calling */ 1449 static void run_next(struct ipu_image_convert_chan *chan) 1450 { 1451 struct ipu_image_convert_priv *priv = chan->priv; 1452 struct ipu_image_convert_run *run, *tmp; 1453 int ret; 1454 1455 lockdep_assert_held(&chan->irqlock); 1456 1457 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) { 1458 /* skip contexts that are aborting */ 1459 if (run->ctx->aborting) { 1460 dev_dbg(priv->ipu->dev, 1461 "%s: task %u: skipping aborting ctx %p run %p\n", 1462 __func__, chan->ic_task, run->ctx, run); 1463 continue; 1464 } 1465 1466 ret = do_run(run); 1467 if (!ret) 1468 break; 1469 1470 /* 1471 * something went wrong with start, add the run 1472 * to done q and continue to the next run in the 1473 * pending q. 1474 */ 1475 run->status = ret; 1476 list_add_tail(&run->list, &chan->done_q); 1477 chan->current_run = NULL; 1478 } 1479 } 1480 1481 static void empty_done_q(struct ipu_image_convert_chan *chan) 1482 { 1483 struct ipu_image_convert_priv *priv = chan->priv; 1484 struct ipu_image_convert_run *run; 1485 unsigned long flags; 1486 1487 spin_lock_irqsave(&chan->irqlock, flags); 1488 1489 while (!list_empty(&chan->done_q)) { 1490 run = list_entry(chan->done_q.next, 1491 struct ipu_image_convert_run, 1492 list); 1493 1494 list_del(&run->list); 1495 1496 dev_dbg(priv->ipu->dev, 1497 "%s: task %u: completing ctx %p run %p with %d\n", 1498 __func__, chan->ic_task, run->ctx, run, run->status); 1499 1500 /* call the completion callback and free the run */ 1501 spin_unlock_irqrestore(&chan->irqlock, flags); 1502 run->ctx->complete(run, run->ctx->complete_context); 1503 spin_lock_irqsave(&chan->irqlock, flags); 1504 } 1505 1506 spin_unlock_irqrestore(&chan->irqlock, flags); 1507 } 1508 1509 /* 1510 * the bottom half thread clears out the done_q, calling the 1511 * completion handler for each. 1512 */ 1513 static irqreturn_t do_bh(int irq, void *dev_id) 1514 { 1515 struct ipu_image_convert_chan *chan = dev_id; 1516 struct ipu_image_convert_priv *priv = chan->priv; 1517 struct ipu_image_convert_ctx *ctx; 1518 unsigned long flags; 1519 1520 dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__, 1521 chan->ic_task); 1522 1523 empty_done_q(chan); 1524 1525 spin_lock_irqsave(&chan->irqlock, flags); 1526 1527 /* 1528 * the done_q is cleared out, signal any contexts 1529 * that are aborting that abort can complete. 1530 */ 1531 list_for_each_entry(ctx, &chan->ctx_list, list) { 1532 if (ctx->aborting) { 1533 dev_dbg(priv->ipu->dev, 1534 "%s: task %u: signaling abort for ctx %p\n", 1535 __func__, chan->ic_task, ctx); 1536 complete_all(&ctx->aborted); 1537 } 1538 } 1539 1540 spin_unlock_irqrestore(&chan->irqlock, flags); 1541 1542 dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__, 1543 chan->ic_task); 1544 1545 return IRQ_HANDLED; 1546 } 1547 1548 static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx) 1549 { 1550 unsigned int cur_tile = ctx->next_tile - 1; 1551 unsigned int next_tile = ctx->next_tile; 1552 1553 if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] != 1554 ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] || 1555 ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] != 1556 ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] || 1557 ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width || 1558 ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height || 1559 ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width || 1560 ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height) 1561 return true; 1562 1563 return false; 1564 } 1565 1566 /* hold irqlock when calling */ 1567 static irqreturn_t do_irq(struct ipu_image_convert_run *run) 1568 { 1569 struct ipu_image_convert_ctx *ctx = run->ctx; 1570 struct ipu_image_convert_chan *chan = ctx->chan; 1571 struct ipu_image_tile *src_tile, *dst_tile; 1572 struct ipu_image_convert_image *s_image = &ctx->in; 1573 struct ipu_image_convert_image *d_image = &ctx->out; 1574 struct ipuv3_channel *outch; 1575 unsigned int dst_idx; 1576 1577 lockdep_assert_held(&chan->irqlock); 1578 1579 outch = ipu_rot_mode_is_irt(ctx->rot_mode) ? 1580 chan->rotation_out_chan : chan->out_chan; 1581 1582 /* 1583 * It is difficult to stop the channel DMA before the channels 1584 * enter the paused state. Without double-buffering the channels 1585 * are always in a paused state when the EOF irq occurs, so it 1586 * is safe to stop the channels now. For double-buffering we 1587 * just ignore the abort until the operation completes, when it 1588 * is safe to shut down. 1589 */ 1590 if (ctx->aborting && !ctx->double_buffering) { 1591 convert_stop(run); 1592 run->status = -EIO; 1593 goto done; 1594 } 1595 1596 if (ctx->next_tile == ctx->num_tiles) { 1597 /* 1598 * the conversion is complete 1599 */ 1600 convert_stop(run); 1601 run->status = 0; 1602 goto done; 1603 } 1604 1605 /* 1606 * not done, place the next tile buffers. 1607 */ 1608 if (!ctx->double_buffering) { 1609 if (ic_settings_changed(ctx)) { 1610 convert_stop(run); 1611 convert_start(run, ctx->next_tile); 1612 } else { 1613 src_tile = &s_image->tile[ctx->next_tile]; 1614 dst_idx = ctx->out_tile_map[ctx->next_tile]; 1615 dst_tile = &d_image->tile[dst_idx]; 1616 1617 ipu_cpmem_set_buffer(chan->in_chan, 0, 1618 s_image->base.phys0 + 1619 src_tile->offset); 1620 ipu_cpmem_set_buffer(outch, 0, 1621 d_image->base.phys0 + 1622 dst_tile->offset); 1623 if (s_image->fmt->planar) 1624 ipu_cpmem_set_uv_offset(chan->in_chan, 1625 src_tile->u_off, 1626 src_tile->v_off); 1627 if (d_image->fmt->planar) 1628 ipu_cpmem_set_uv_offset(outch, 1629 dst_tile->u_off, 1630 dst_tile->v_off); 1631 1632 ipu_idmac_select_buffer(chan->in_chan, 0); 1633 ipu_idmac_select_buffer(outch, 0); 1634 } 1635 } else if (ctx->next_tile < ctx->num_tiles - 1) { 1636 1637 src_tile = &s_image->tile[ctx->next_tile + 1]; 1638 dst_idx = ctx->out_tile_map[ctx->next_tile + 1]; 1639 dst_tile = &d_image->tile[dst_idx]; 1640 1641 ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num, 1642 s_image->base.phys0 + src_tile->offset); 1643 ipu_cpmem_set_buffer(outch, ctx->cur_buf_num, 1644 d_image->base.phys0 + dst_tile->offset); 1645 1646 ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num); 1647 ipu_idmac_select_buffer(outch, ctx->cur_buf_num); 1648 1649 ctx->cur_buf_num ^= 1; 1650 } 1651 1652 ctx->next_tile++; 1653 return IRQ_HANDLED; 1654 done: 1655 list_add_tail(&run->list, &chan->done_q); 1656 chan->current_run = NULL; 1657 run_next(chan); 1658 return IRQ_WAKE_THREAD; 1659 } 1660 1661 static irqreturn_t norotate_irq(int irq, void *data) 1662 { 1663 struct ipu_image_convert_chan *chan = data; 1664 struct ipu_image_convert_ctx *ctx; 1665 struct ipu_image_convert_run *run; 1666 unsigned long flags; 1667 irqreturn_t ret; 1668 1669 spin_lock_irqsave(&chan->irqlock, flags); 1670 1671 /* get current run and its context */ 1672 run = chan->current_run; 1673 if (!run) { 1674 ret = IRQ_NONE; 1675 goto out; 1676 } 1677 1678 ctx = run->ctx; 1679 1680 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1681 /* this is a rotation operation, just ignore */ 1682 spin_unlock_irqrestore(&chan->irqlock, flags); 1683 return IRQ_HANDLED; 1684 } 1685 1686 ret = do_irq(run); 1687 out: 1688 spin_unlock_irqrestore(&chan->irqlock, flags); 1689 return ret; 1690 } 1691 1692 static irqreturn_t rotate_irq(int irq, void *data) 1693 { 1694 struct ipu_image_convert_chan *chan = data; 1695 struct ipu_image_convert_priv *priv = chan->priv; 1696 struct ipu_image_convert_ctx *ctx; 1697 struct ipu_image_convert_run *run; 1698 unsigned long flags; 1699 irqreturn_t ret; 1700 1701 spin_lock_irqsave(&chan->irqlock, flags); 1702 1703 /* get current run and its context */ 1704 run = chan->current_run; 1705 if (!run) { 1706 ret = IRQ_NONE; 1707 goto out; 1708 } 1709 1710 ctx = run->ctx; 1711 1712 if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { 1713 /* this was NOT a rotation operation, shouldn't happen */ 1714 dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n"); 1715 spin_unlock_irqrestore(&chan->irqlock, flags); 1716 return IRQ_HANDLED; 1717 } 1718 1719 ret = do_irq(run); 1720 out: 1721 spin_unlock_irqrestore(&chan->irqlock, flags); 1722 return ret; 1723 } 1724 1725 /* 1726 * try to force the completion of runs for this ctx. Called when 1727 * abort wait times out in ipu_image_convert_abort(). 1728 */ 1729 static void force_abort(struct ipu_image_convert_ctx *ctx) 1730 { 1731 struct ipu_image_convert_chan *chan = ctx->chan; 1732 struct ipu_image_convert_run *run; 1733 unsigned long flags; 1734 1735 spin_lock_irqsave(&chan->irqlock, flags); 1736 1737 run = chan->current_run; 1738 if (run && run->ctx == ctx) { 1739 convert_stop(run); 1740 run->status = -EIO; 1741 list_add_tail(&run->list, &chan->done_q); 1742 chan->current_run = NULL; 1743 run_next(chan); 1744 } 1745 1746 spin_unlock_irqrestore(&chan->irqlock, flags); 1747 1748 empty_done_q(chan); 1749 } 1750 1751 static void release_ipu_resources(struct ipu_image_convert_chan *chan) 1752 { 1753 if (chan->out_eof_irq >= 0) 1754 free_irq(chan->out_eof_irq, chan); 1755 if (chan->rot_out_eof_irq >= 0) 1756 free_irq(chan->rot_out_eof_irq, chan); 1757 1758 if (!IS_ERR_OR_NULL(chan->in_chan)) 1759 ipu_idmac_put(chan->in_chan); 1760 if (!IS_ERR_OR_NULL(chan->out_chan)) 1761 ipu_idmac_put(chan->out_chan); 1762 if (!IS_ERR_OR_NULL(chan->rotation_in_chan)) 1763 ipu_idmac_put(chan->rotation_in_chan); 1764 if (!IS_ERR_OR_NULL(chan->rotation_out_chan)) 1765 ipu_idmac_put(chan->rotation_out_chan); 1766 if (!IS_ERR_OR_NULL(chan->ic)) 1767 ipu_ic_put(chan->ic); 1768 1769 chan->in_chan = chan->out_chan = chan->rotation_in_chan = 1770 chan->rotation_out_chan = NULL; 1771 chan->out_eof_irq = chan->rot_out_eof_irq = -1; 1772 } 1773 1774 static int get_ipu_resources(struct ipu_image_convert_chan *chan) 1775 { 1776 const struct ipu_image_convert_dma_chan *dma = chan->dma_ch; 1777 struct ipu_image_convert_priv *priv = chan->priv; 1778 int ret; 1779 1780 /* get IC */ 1781 chan->ic = ipu_ic_get(priv->ipu, chan->ic_task); 1782 if (IS_ERR(chan->ic)) { 1783 dev_err(priv->ipu->dev, "could not acquire IC\n"); 1784 ret = PTR_ERR(chan->ic); 1785 goto err; 1786 } 1787 1788 /* get IDMAC channels */ 1789 chan->in_chan = ipu_idmac_get(priv->ipu, dma->in); 1790 chan->out_chan = ipu_idmac_get(priv->ipu, dma->out); 1791 if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) { 1792 dev_err(priv->ipu->dev, "could not acquire idmac channels\n"); 1793 ret = -EBUSY; 1794 goto err; 1795 } 1796 1797 chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in); 1798 chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out); 1799 if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) { 1800 dev_err(priv->ipu->dev, 1801 "could not acquire idmac rotation channels\n"); 1802 ret = -EBUSY; 1803 goto err; 1804 } 1805 1806 /* acquire the EOF interrupts */ 1807 chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu, 1808 chan->out_chan, 1809 IPU_IRQ_EOF); 1810 1811 ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh, 1812 0, "ipu-ic", chan); 1813 if (ret < 0) { 1814 dev_err(priv->ipu->dev, "could not acquire irq %d\n", 1815 chan->out_eof_irq); 1816 chan->out_eof_irq = -1; 1817 goto err; 1818 } 1819 1820 chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu, 1821 chan->rotation_out_chan, 1822 IPU_IRQ_EOF); 1823 1824 ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh, 1825 0, "ipu-ic", chan); 1826 if (ret < 0) { 1827 dev_err(priv->ipu->dev, "could not acquire irq %d\n", 1828 chan->rot_out_eof_irq); 1829 chan->rot_out_eof_irq = -1; 1830 goto err; 1831 } 1832 1833 return 0; 1834 err: 1835 release_ipu_resources(chan); 1836 return ret; 1837 } 1838 1839 static int fill_image(struct ipu_image_convert_ctx *ctx, 1840 struct ipu_image_convert_image *ic_image, 1841 struct ipu_image *image, 1842 enum ipu_image_convert_type type) 1843 { 1844 struct ipu_image_convert_priv *priv = ctx->chan->priv; 1845 1846 ic_image->base = *image; 1847 ic_image->type = type; 1848 1849 ic_image->fmt = get_format(image->pix.pixelformat); 1850 if (!ic_image->fmt) { 1851 dev_err(priv->ipu->dev, "pixelformat not supported for %s\n", 1852 type == IMAGE_CONVERT_OUT ? "Output" : "Input"); 1853 return -EINVAL; 1854 } 1855 1856 if (ic_image->fmt->planar) 1857 ic_image->stride = ic_image->base.pix.width; 1858 else 1859 ic_image->stride = ic_image->base.pix.bytesperline; 1860 1861 return 0; 1862 } 1863 1864 /* borrowed from drivers/media/v4l2-core/v4l2-common.c */ 1865 static unsigned int clamp_align(unsigned int x, unsigned int min, 1866 unsigned int max, unsigned int align) 1867 { 1868 /* Bits that must be zero to be aligned */ 1869 unsigned int mask = ~((1 << align) - 1); 1870 1871 /* Clamp to aligned min and max */ 1872 x = clamp(x, (min + ~mask) & mask, max & mask); 1873 1874 /* Round to nearest aligned value */ 1875 if (align) 1876 x = (x + (1 << (align - 1))) & mask; 1877 1878 return x; 1879 } 1880 1881 /* Adjusts input/output images to IPU restrictions */ 1882 void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out, 1883 enum ipu_rotate_mode rot_mode) 1884 { 1885 const struct ipu_image_pixfmt *infmt, *outfmt; 1886 u32 w_align_out, h_align_out; 1887 u32 w_align_in, h_align_in; 1888 1889 infmt = get_format(in->pix.pixelformat); 1890 outfmt = get_format(out->pix.pixelformat); 1891 1892 /* set some default pixel formats if needed */ 1893 if (!infmt) { 1894 in->pix.pixelformat = V4L2_PIX_FMT_RGB24; 1895 infmt = get_format(V4L2_PIX_FMT_RGB24); 1896 } 1897 if (!outfmt) { 1898 out->pix.pixelformat = V4L2_PIX_FMT_RGB24; 1899 outfmt = get_format(V4L2_PIX_FMT_RGB24); 1900 } 1901 1902 /* image converter does not handle fields */ 1903 in->pix.field = out->pix.field = V4L2_FIELD_NONE; 1904 1905 /* resizer cannot downsize more than 4:1 */ 1906 if (ipu_rot_mode_is_irt(rot_mode)) { 1907 out->pix.height = max_t(__u32, out->pix.height, 1908 in->pix.width / 4); 1909 out->pix.width = max_t(__u32, out->pix.width, 1910 in->pix.height / 4); 1911 } else { 1912 out->pix.width = max_t(__u32, out->pix.width, 1913 in->pix.width / 4); 1914 out->pix.height = max_t(__u32, out->pix.height, 1915 in->pix.height / 4); 1916 } 1917 1918 /* align input width/height */ 1919 w_align_in = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt, 1920 rot_mode)); 1921 h_align_in = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt, 1922 rot_mode)); 1923 in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, 1924 w_align_in); 1925 in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, 1926 h_align_in); 1927 1928 /* align output width/height */ 1929 w_align_out = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt, 1930 rot_mode)); 1931 h_align_out = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt, 1932 rot_mode)); 1933 out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, 1934 w_align_out); 1935 out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, 1936 h_align_out); 1937 1938 /* set input/output strides and image sizes */ 1939 in->pix.bytesperline = infmt->planar ? 1940 clamp_align(in->pix.width, 2 << w_align_in, MAX_W, 1941 w_align_in) : 1942 clamp_align((in->pix.width * infmt->bpp) >> 3, 1943 ((2 << w_align_in) * infmt->bpp) >> 3, 1944 (MAX_W * infmt->bpp) >> 3, 1945 w_align_in); 1946 in->pix.sizeimage = infmt->planar ? 1947 (in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 : 1948 in->pix.height * in->pix.bytesperline; 1949 out->pix.bytesperline = outfmt->planar ? out->pix.width : 1950 (out->pix.width * outfmt->bpp) >> 3; 1951 out->pix.sizeimage = outfmt->planar ? 1952 (out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 : 1953 out->pix.height * out->pix.bytesperline; 1954 } 1955 EXPORT_SYMBOL_GPL(ipu_image_convert_adjust); 1956 1957 /* 1958 * this is used by ipu_image_convert_prepare() to verify set input and 1959 * output images are valid before starting the conversion. Clients can 1960 * also call it before calling ipu_image_convert_prepare(). 1961 */ 1962 int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out, 1963 enum ipu_rotate_mode rot_mode) 1964 { 1965 struct ipu_image testin, testout; 1966 1967 testin = *in; 1968 testout = *out; 1969 1970 ipu_image_convert_adjust(&testin, &testout, rot_mode); 1971 1972 if (testin.pix.width != in->pix.width || 1973 testin.pix.height != in->pix.height || 1974 testout.pix.width != out->pix.width || 1975 testout.pix.height != out->pix.height) 1976 return -EINVAL; 1977 1978 return 0; 1979 } 1980 EXPORT_SYMBOL_GPL(ipu_image_convert_verify); 1981 1982 /* 1983 * Call ipu_image_convert_prepare() to prepare for the conversion of 1984 * given images and rotation mode. Returns a new conversion context. 1985 */ 1986 struct ipu_image_convert_ctx * 1987 ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 1988 struct ipu_image *in, struct ipu_image *out, 1989 enum ipu_rotate_mode rot_mode, 1990 ipu_image_convert_cb_t complete, 1991 void *complete_context) 1992 { 1993 struct ipu_image_convert_priv *priv = ipu->image_convert_priv; 1994 struct ipu_image_convert_image *s_image, *d_image; 1995 struct ipu_image_convert_chan *chan; 1996 struct ipu_image_convert_ctx *ctx; 1997 unsigned long flags; 1998 unsigned int i; 1999 bool get_res; 2000 int ret; 2001 2002 if (!in || !out || !complete || 2003 (ic_task != IC_TASK_VIEWFINDER && 2004 ic_task != IC_TASK_POST_PROCESSOR)) 2005 return ERR_PTR(-EINVAL); 2006 2007 /* verify the in/out images before continuing */ 2008 ret = ipu_image_convert_verify(in, out, rot_mode); 2009 if (ret) { 2010 dev_err(priv->ipu->dev, "%s: in/out formats invalid\n", 2011 __func__); 2012 return ERR_PTR(ret); 2013 } 2014 2015 chan = &priv->chan[ic_task]; 2016 2017 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 2018 if (!ctx) 2019 return ERR_PTR(-ENOMEM); 2020 2021 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__, 2022 chan->ic_task, ctx); 2023 2024 ctx->chan = chan; 2025 init_completion(&ctx->aborted); 2026 2027 s_image = &ctx->in; 2028 d_image = &ctx->out; 2029 2030 /* set tiling and rotation */ 2031 d_image->num_rows = num_stripes(out->pix.height); 2032 d_image->num_cols = num_stripes(out->pix.width); 2033 if (ipu_rot_mode_is_irt(rot_mode)) { 2034 s_image->num_rows = d_image->num_cols; 2035 s_image->num_cols = d_image->num_rows; 2036 } else { 2037 s_image->num_rows = d_image->num_rows; 2038 s_image->num_cols = d_image->num_cols; 2039 } 2040 2041 ctx->num_tiles = d_image->num_cols * d_image->num_rows; 2042 ctx->rot_mode = rot_mode; 2043 2044 ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN); 2045 if (ret) 2046 goto out_free; 2047 ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT); 2048 if (ret) 2049 goto out_free; 2050 2051 ret = calc_image_resize_coefficients(ctx, in, out); 2052 if (ret) 2053 goto out_free; 2054 2055 calc_out_tile_map(ctx); 2056 2057 find_seams(ctx, s_image, d_image); 2058 2059 calc_tile_dimensions(ctx, s_image); 2060 ret = calc_tile_offsets(ctx, s_image); 2061 if (ret) 2062 goto out_free; 2063 2064 calc_tile_dimensions(ctx, d_image); 2065 ret = calc_tile_offsets(ctx, d_image); 2066 if (ret) 2067 goto out_free; 2068 2069 calc_tile_resize_coefficients(ctx); 2070 2071 ret = ipu_ic_calc_csc(&ctx->csc, 2072 s_image->base.pix.ycbcr_enc, 2073 s_image->base.pix.quantization, 2074 ipu_pixelformat_to_colorspace(s_image->fmt->fourcc), 2075 d_image->base.pix.ycbcr_enc, 2076 d_image->base.pix.quantization, 2077 ipu_pixelformat_to_colorspace(d_image->fmt->fourcc)); 2078 if (ret) 2079 goto out_free; 2080 2081 dump_format(ctx, s_image); 2082 dump_format(ctx, d_image); 2083 2084 ctx->complete = complete; 2085 ctx->complete_context = complete_context; 2086 2087 /* 2088 * Can we use double-buffering for this operation? If there is 2089 * only one tile (the whole image can be converted in a single 2090 * operation) there's no point in using double-buffering. Also, 2091 * the IPU's IDMAC channels allow only a single U and V plane 2092 * offset shared between both buffers, but these offsets change 2093 * for every tile, and therefore would have to be updated for 2094 * each buffer which is not possible. So double-buffering is 2095 * impossible when either the source or destination images are 2096 * a planar format (YUV420, YUV422P, etc.). Further, differently 2097 * sized tiles or different resizing coefficients per tile 2098 * prevent double-buffering as well. 2099 */ 2100 ctx->double_buffering = (ctx->num_tiles > 1 && 2101 !s_image->fmt->planar && 2102 !d_image->fmt->planar); 2103 for (i = 1; i < ctx->num_tiles; i++) { 2104 if (ctx->in.tile[i].width != ctx->in.tile[0].width || 2105 ctx->in.tile[i].height != ctx->in.tile[0].height || 2106 ctx->out.tile[i].width != ctx->out.tile[0].width || 2107 ctx->out.tile[i].height != ctx->out.tile[0].height) { 2108 ctx->double_buffering = false; 2109 break; 2110 } 2111 } 2112 for (i = 1; i < ctx->in.num_cols; i++) { 2113 if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) { 2114 ctx->double_buffering = false; 2115 break; 2116 } 2117 } 2118 for (i = 1; i < ctx->in.num_rows; i++) { 2119 if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) { 2120 ctx->double_buffering = false; 2121 break; 2122 } 2123 } 2124 2125 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 2126 unsigned long intermediate_size = d_image->tile[0].size; 2127 2128 for (i = 1; i < ctx->num_tiles; i++) { 2129 if (d_image->tile[i].size > intermediate_size) 2130 intermediate_size = d_image->tile[i].size; 2131 } 2132 2133 ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0], 2134 intermediate_size); 2135 if (ret) 2136 goto out_free; 2137 if (ctx->double_buffering) { 2138 ret = alloc_dma_buf(priv, 2139 &ctx->rot_intermediate[1], 2140 intermediate_size); 2141 if (ret) 2142 goto out_free_dmabuf0; 2143 } 2144 } 2145 2146 spin_lock_irqsave(&chan->irqlock, flags); 2147 2148 get_res = list_empty(&chan->ctx_list); 2149 2150 list_add_tail(&ctx->list, &chan->ctx_list); 2151 2152 spin_unlock_irqrestore(&chan->irqlock, flags); 2153 2154 if (get_res) { 2155 ret = get_ipu_resources(chan); 2156 if (ret) 2157 goto out_free_dmabuf1; 2158 } 2159 2160 return ctx; 2161 2162 out_free_dmabuf1: 2163 free_dma_buf(priv, &ctx->rot_intermediate[1]); 2164 spin_lock_irqsave(&chan->irqlock, flags); 2165 list_del(&ctx->list); 2166 spin_unlock_irqrestore(&chan->irqlock, flags); 2167 out_free_dmabuf0: 2168 free_dma_buf(priv, &ctx->rot_intermediate[0]); 2169 out_free: 2170 kfree(ctx); 2171 return ERR_PTR(ret); 2172 } 2173 EXPORT_SYMBOL_GPL(ipu_image_convert_prepare); 2174 2175 /* 2176 * Carry out a single image conversion run. Only the physaddr's of the input 2177 * and output image buffers are needed. The conversion context must have 2178 * been created previously with ipu_image_convert_prepare(). 2179 */ 2180 int ipu_image_convert_queue(struct ipu_image_convert_run *run) 2181 { 2182 struct ipu_image_convert_chan *chan; 2183 struct ipu_image_convert_priv *priv; 2184 struct ipu_image_convert_ctx *ctx; 2185 unsigned long flags; 2186 int ret = 0; 2187 2188 if (!run || !run->ctx || !run->in_phys || !run->out_phys) 2189 return -EINVAL; 2190 2191 ctx = run->ctx; 2192 chan = ctx->chan; 2193 priv = chan->priv; 2194 2195 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__, 2196 chan->ic_task, ctx, run); 2197 2198 INIT_LIST_HEAD(&run->list); 2199 2200 spin_lock_irqsave(&chan->irqlock, flags); 2201 2202 if (ctx->aborting) { 2203 ret = -EIO; 2204 goto unlock; 2205 } 2206 2207 list_add_tail(&run->list, &chan->pending_q); 2208 2209 if (!chan->current_run) { 2210 ret = do_run(run); 2211 if (ret) 2212 chan->current_run = NULL; 2213 } 2214 unlock: 2215 spin_unlock_irqrestore(&chan->irqlock, flags); 2216 return ret; 2217 } 2218 EXPORT_SYMBOL_GPL(ipu_image_convert_queue); 2219 2220 /* Abort any active or pending conversions for this context */ 2221 static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) 2222 { 2223 struct ipu_image_convert_chan *chan = ctx->chan; 2224 struct ipu_image_convert_priv *priv = chan->priv; 2225 struct ipu_image_convert_run *run, *active_run, *tmp; 2226 unsigned long flags; 2227 int run_count, ret; 2228 2229 spin_lock_irqsave(&chan->irqlock, flags); 2230 2231 /* move all remaining pending runs in this context to done_q */ 2232 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) { 2233 if (run->ctx != ctx) 2234 continue; 2235 run->status = -EIO; 2236 list_move_tail(&run->list, &chan->done_q); 2237 } 2238 2239 run_count = get_run_count(ctx, &chan->done_q); 2240 active_run = (chan->current_run && chan->current_run->ctx == ctx) ? 2241 chan->current_run : NULL; 2242 2243 if (active_run) 2244 reinit_completion(&ctx->aborted); 2245 2246 ctx->aborting = true; 2247 2248 spin_unlock_irqrestore(&chan->irqlock, flags); 2249 2250 if (!run_count && !active_run) { 2251 dev_dbg(priv->ipu->dev, 2252 "%s: task %u: no abort needed for ctx %p\n", 2253 __func__, chan->ic_task, ctx); 2254 return; 2255 } 2256 2257 if (!active_run) { 2258 empty_done_q(chan); 2259 return; 2260 } 2261 2262 dev_dbg(priv->ipu->dev, 2263 "%s: task %u: wait for completion: %d runs\n", 2264 __func__, chan->ic_task, run_count); 2265 2266 ret = wait_for_completion_timeout(&ctx->aborted, 2267 msecs_to_jiffies(10000)); 2268 if (ret == 0) { 2269 dev_warn(priv->ipu->dev, "%s: timeout\n", __func__); 2270 force_abort(ctx); 2271 } 2272 } 2273 2274 void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) 2275 { 2276 __ipu_image_convert_abort(ctx); 2277 ctx->aborting = false; 2278 } 2279 EXPORT_SYMBOL_GPL(ipu_image_convert_abort); 2280 2281 /* Unprepare image conversion context */ 2282 void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx) 2283 { 2284 struct ipu_image_convert_chan *chan = ctx->chan; 2285 struct ipu_image_convert_priv *priv = chan->priv; 2286 unsigned long flags; 2287 bool put_res; 2288 2289 /* make sure no runs are hanging around */ 2290 __ipu_image_convert_abort(ctx); 2291 2292 dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__, 2293 chan->ic_task, ctx); 2294 2295 spin_lock_irqsave(&chan->irqlock, flags); 2296 2297 list_del(&ctx->list); 2298 2299 put_res = list_empty(&chan->ctx_list); 2300 2301 spin_unlock_irqrestore(&chan->irqlock, flags); 2302 2303 if (put_res) 2304 release_ipu_resources(chan); 2305 2306 free_dma_buf(priv, &ctx->rot_intermediate[1]); 2307 free_dma_buf(priv, &ctx->rot_intermediate[0]); 2308 2309 kfree(ctx); 2310 } 2311 EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare); 2312 2313 /* 2314 * "Canned" asynchronous single image conversion. Allocates and returns 2315 * a new conversion run. On successful return the caller must free the 2316 * run and call ipu_image_convert_unprepare() after conversion completes. 2317 */ 2318 struct ipu_image_convert_run * 2319 ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 2320 struct ipu_image *in, struct ipu_image *out, 2321 enum ipu_rotate_mode rot_mode, 2322 ipu_image_convert_cb_t complete, 2323 void *complete_context) 2324 { 2325 struct ipu_image_convert_ctx *ctx; 2326 struct ipu_image_convert_run *run; 2327 int ret; 2328 2329 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode, 2330 complete, complete_context); 2331 if (IS_ERR(ctx)) 2332 return ERR_CAST(ctx); 2333 2334 run = kzalloc(sizeof(*run), GFP_KERNEL); 2335 if (!run) { 2336 ipu_image_convert_unprepare(ctx); 2337 return ERR_PTR(-ENOMEM); 2338 } 2339 2340 run->ctx = ctx; 2341 run->in_phys = in->phys0; 2342 run->out_phys = out->phys0; 2343 2344 ret = ipu_image_convert_queue(run); 2345 if (ret) { 2346 ipu_image_convert_unprepare(ctx); 2347 kfree(run); 2348 return ERR_PTR(ret); 2349 } 2350 2351 return run; 2352 } 2353 EXPORT_SYMBOL_GPL(ipu_image_convert); 2354 2355 /* "Canned" synchronous single image conversion */ 2356 static void image_convert_sync_complete(struct ipu_image_convert_run *run, 2357 void *data) 2358 { 2359 struct completion *comp = data; 2360 2361 complete(comp); 2362 } 2363 2364 int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 2365 struct ipu_image *in, struct ipu_image *out, 2366 enum ipu_rotate_mode rot_mode) 2367 { 2368 struct ipu_image_convert_run *run; 2369 struct completion comp; 2370 int ret; 2371 2372 init_completion(&comp); 2373 2374 run = ipu_image_convert(ipu, ic_task, in, out, rot_mode, 2375 image_convert_sync_complete, &comp); 2376 if (IS_ERR(run)) 2377 return PTR_ERR(run); 2378 2379 ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000)); 2380 ret = (ret == 0) ? -ETIMEDOUT : 0; 2381 2382 ipu_image_convert_unprepare(run->ctx); 2383 kfree(run); 2384 2385 return ret; 2386 } 2387 EXPORT_SYMBOL_GPL(ipu_image_convert_sync); 2388 2389 int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev) 2390 { 2391 struct ipu_image_convert_priv *priv; 2392 int i; 2393 2394 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 2395 if (!priv) 2396 return -ENOMEM; 2397 2398 ipu->image_convert_priv = priv; 2399 priv->ipu = ipu; 2400 2401 for (i = 0; i < IC_NUM_TASKS; i++) { 2402 struct ipu_image_convert_chan *chan = &priv->chan[i]; 2403 2404 chan->ic_task = i; 2405 chan->priv = priv; 2406 chan->dma_ch = &image_convert_dma_chan[i]; 2407 chan->out_eof_irq = -1; 2408 chan->rot_out_eof_irq = -1; 2409 2410 spin_lock_init(&chan->irqlock); 2411 INIT_LIST_HEAD(&chan->ctx_list); 2412 INIT_LIST_HEAD(&chan->pending_q); 2413 INIT_LIST_HEAD(&chan->done_q); 2414 } 2415 2416 return 0; 2417 } 2418 2419 void ipu_image_convert_exit(struct ipu_soc *ipu) 2420 { 2421 } 2422