1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2012-2016 Mentor Graphics Inc. 4 * 5 * Queued image conversion support, with tiling and rotation. 6 */ 7 8 #include <linux/interrupt.h> 9 #include <linux/dma-mapping.h> 10 #include <video/imx-ipu-image-convert.h> 11 #include "ipu-prv.h" 12 13 /* 14 * The IC Resizer has a restriction that the output frame from the 15 * resizer must be 1024 or less in both width (pixels) and height 16 * (lines). 17 * 18 * The image converter attempts to split up a conversion when 19 * the desired output (converted) frame resolution exceeds the 20 * IC resizer limit of 1024 in either dimension. 21 * 22 * If either dimension of the output frame exceeds the limit, the 23 * dimension is split into 1, 2, or 4 equal stripes, for a maximum 24 * of 4*4 or 16 tiles. A conversion is then carried out for each 25 * tile (but taking care to pass the full frame stride length to 26 * the DMA channel's parameter memory!). IDMA double-buffering is used 27 * to convert each tile back-to-back when possible (see note below 28 * when double_buffering boolean is set). 29 * 30 * Note that the input frame must be split up into the same number 31 * of tiles as the output frame: 32 * 33 * +---------+-----+ 34 * +-----+---+ | A | B | 35 * | A | B | | | | 36 * +-----+---+ --> +---------+-----+ 37 * | C | D | | C | D | 38 * +-----+---+ | | | 39 * +---------+-----+ 40 * 41 * Clockwise 90° rotations are handled by first rescaling into a 42 * reusable temporary tile buffer and then rotating with the 8x8 43 * block rotator, writing to the correct destination: 44 * 45 * +-----+-----+ 46 * | | | 47 * +-----+---+ +---------+ | C | A | 48 * | A | B | | A,B, | | | | | 49 * +-----+---+ --> | C,D | | --> | | | 50 * | C | D | +---------+ +-----+-----+ 51 * +-----+---+ | D | B | 52 * | | | 53 * +-----+-----+ 54 * 55 * If the 8x8 block rotator is used, horizontal or vertical flipping 56 * is done during the rotation step, otherwise flipping is done 57 * during the scaling step. 58 * With rotation or flipping, tile order changes between input and 59 * output image. Tiles are numbered row major from top left to bottom 60 * right for both input and output image. 61 */ 62 63 #define MAX_STRIPES_W 4 64 #define MAX_STRIPES_H 4 65 #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H) 66 67 #define MIN_W 16 68 #define MIN_H 8 69 #define MAX_W 4096 70 #define MAX_H 4096 71 72 enum ipu_image_convert_type { 73 IMAGE_CONVERT_IN = 0, 74 IMAGE_CONVERT_OUT, 75 }; 76 77 struct ipu_image_convert_dma_buf { 78 void *virt; 79 dma_addr_t phys; 80 unsigned long len; 81 }; 82 83 struct ipu_image_convert_dma_chan { 84 int in; 85 int out; 86 int rot_in; 87 int rot_out; 88 int vdi_in_p; 89 int vdi_in; 90 int vdi_in_n; 91 }; 92 93 /* dimensions of one tile */ 94 struct ipu_image_tile { 95 u32 width; 96 u32 height; 97 u32 left; 98 u32 top; 99 /* size and strides are in bytes */ 100 u32 size; 101 u32 stride; 102 u32 rot_stride; 103 /* start Y or packed offset of this tile */ 104 u32 offset; 105 /* offset from start to tile in U plane, for planar formats */ 106 u32 u_off; 107 /* offset from start to tile in V plane, for planar formats */ 108 u32 v_off; 109 }; 110 111 struct ipu_image_convert_image { 112 struct ipu_image base; 113 enum ipu_image_convert_type type; 114 115 const struct ipu_image_pixfmt *fmt; 116 unsigned int stride; 117 118 /* # of rows (horizontal stripes) if dest height is > 1024 */ 119 unsigned int num_rows; 120 /* # of columns (vertical stripes) if dest width is > 1024 */ 121 unsigned int num_cols; 122 123 struct ipu_image_tile tile[MAX_TILES]; 124 }; 125 126 struct ipu_image_pixfmt { 127 u32 fourcc; /* V4L2 fourcc */ 128 int bpp; /* total bpp */ 129 int uv_width_dec; /* decimation in width for U/V planes */ 130 int uv_height_dec; /* decimation in height for U/V planes */ 131 bool planar; /* planar format */ 132 bool uv_swapped; /* U and V planes are swapped */ 133 bool uv_packed; /* partial planar (U and V in same plane) */ 134 }; 135 136 struct ipu_image_convert_ctx; 137 struct ipu_image_convert_chan; 138 struct ipu_image_convert_priv; 139 140 struct ipu_image_convert_ctx { 141 struct ipu_image_convert_chan *chan; 142 143 ipu_image_convert_cb_t complete; 144 void *complete_context; 145 146 /* Source/destination image data and rotation mode */ 147 struct ipu_image_convert_image in; 148 struct ipu_image_convert_image out; 149 struct ipu_ic_csc csc; 150 enum ipu_rotate_mode rot_mode; 151 u32 downsize_coeff_h; 152 u32 downsize_coeff_v; 153 u32 image_resize_coeff_h; 154 u32 image_resize_coeff_v; 155 u32 resize_coeffs_h[MAX_STRIPES_W]; 156 u32 resize_coeffs_v[MAX_STRIPES_H]; 157 158 /* intermediate buffer for rotation */ 159 struct ipu_image_convert_dma_buf rot_intermediate[2]; 160 161 /* current buffer number for double buffering */ 162 int cur_buf_num; 163 164 bool aborting; 165 struct completion aborted; 166 167 /* can we use double-buffering for this conversion operation? */ 168 bool double_buffering; 169 /* num_rows * num_cols */ 170 unsigned int num_tiles; 171 /* next tile to process */ 172 unsigned int next_tile; 173 /* where to place converted tile in dest image */ 174 unsigned int out_tile_map[MAX_TILES]; 175 176 struct list_head list; 177 }; 178 179 struct ipu_image_convert_chan { 180 struct ipu_image_convert_priv *priv; 181 182 enum ipu_ic_task ic_task; 183 const struct ipu_image_convert_dma_chan *dma_ch; 184 185 struct ipu_ic *ic; 186 struct ipuv3_channel *in_chan; 187 struct ipuv3_channel *out_chan; 188 struct ipuv3_channel *rotation_in_chan; 189 struct ipuv3_channel *rotation_out_chan; 190 191 /* the IPU end-of-frame irqs */ 192 int out_eof_irq; 193 int rot_out_eof_irq; 194 195 spinlock_t irqlock; 196 197 /* list of convert contexts */ 198 struct list_head ctx_list; 199 /* queue of conversion runs */ 200 struct list_head pending_q; 201 /* queue of completed runs */ 202 struct list_head done_q; 203 204 /* the current conversion run */ 205 struct ipu_image_convert_run *current_run; 206 }; 207 208 struct ipu_image_convert_priv { 209 struct ipu_image_convert_chan chan[IC_NUM_TASKS]; 210 struct ipu_soc *ipu; 211 }; 212 213 static const struct ipu_image_convert_dma_chan 214 image_convert_dma_chan[IC_NUM_TASKS] = { 215 [IC_TASK_VIEWFINDER] = { 216 .in = IPUV3_CHANNEL_MEM_IC_PRP_VF, 217 .out = IPUV3_CHANNEL_IC_PRP_VF_MEM, 218 .rot_in = IPUV3_CHANNEL_MEM_ROT_VF, 219 .rot_out = IPUV3_CHANNEL_ROT_VF_MEM, 220 .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV, 221 .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR, 222 .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT, 223 }, 224 [IC_TASK_POST_PROCESSOR] = { 225 .in = IPUV3_CHANNEL_MEM_IC_PP, 226 .out = IPUV3_CHANNEL_IC_PP_MEM, 227 .rot_in = IPUV3_CHANNEL_MEM_ROT_PP, 228 .rot_out = IPUV3_CHANNEL_ROT_PP_MEM, 229 }, 230 }; 231 232 static const struct ipu_image_pixfmt image_convert_formats[] = { 233 { 234 .fourcc = V4L2_PIX_FMT_RGB565, 235 .bpp = 16, 236 }, { 237 .fourcc = V4L2_PIX_FMT_RGB24, 238 .bpp = 24, 239 }, { 240 .fourcc = V4L2_PIX_FMT_BGR24, 241 .bpp = 24, 242 }, { 243 .fourcc = V4L2_PIX_FMT_RGB32, 244 .bpp = 32, 245 }, { 246 .fourcc = V4L2_PIX_FMT_BGR32, 247 .bpp = 32, 248 }, { 249 .fourcc = V4L2_PIX_FMT_XRGB32, 250 .bpp = 32, 251 }, { 252 .fourcc = V4L2_PIX_FMT_XBGR32, 253 .bpp = 32, 254 }, { 255 .fourcc = V4L2_PIX_FMT_YUYV, 256 .bpp = 16, 257 .uv_width_dec = 2, 258 .uv_height_dec = 1, 259 }, { 260 .fourcc = V4L2_PIX_FMT_UYVY, 261 .bpp = 16, 262 .uv_width_dec = 2, 263 .uv_height_dec = 1, 264 }, { 265 .fourcc = V4L2_PIX_FMT_YUV420, 266 .bpp = 12, 267 .planar = true, 268 .uv_width_dec = 2, 269 .uv_height_dec = 2, 270 }, { 271 .fourcc = V4L2_PIX_FMT_YVU420, 272 .bpp = 12, 273 .planar = true, 274 .uv_width_dec = 2, 275 .uv_height_dec = 2, 276 .uv_swapped = true, 277 }, { 278 .fourcc = V4L2_PIX_FMT_NV12, 279 .bpp = 12, 280 .planar = true, 281 .uv_width_dec = 2, 282 .uv_height_dec = 2, 283 .uv_packed = true, 284 }, { 285 .fourcc = V4L2_PIX_FMT_YUV422P, 286 .bpp = 16, 287 .planar = true, 288 .uv_width_dec = 2, 289 .uv_height_dec = 1, 290 }, { 291 .fourcc = V4L2_PIX_FMT_NV16, 292 .bpp = 16, 293 .planar = true, 294 .uv_width_dec = 2, 295 .uv_height_dec = 1, 296 .uv_packed = true, 297 }, 298 }; 299 300 static const struct ipu_image_pixfmt *get_format(u32 fourcc) 301 { 302 const struct ipu_image_pixfmt *ret = NULL; 303 unsigned int i; 304 305 for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) { 306 if (image_convert_formats[i].fourcc == fourcc) { 307 ret = &image_convert_formats[i]; 308 break; 309 } 310 } 311 312 return ret; 313 } 314 315 static void dump_format(struct ipu_image_convert_ctx *ctx, 316 struct ipu_image_convert_image *ic_image) 317 { 318 struct ipu_image_convert_chan *chan = ctx->chan; 319 struct ipu_image_convert_priv *priv = chan->priv; 320 321 dev_dbg(priv->ipu->dev, 322 "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n", 323 chan->ic_task, ctx, 324 ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input", 325 ic_image->base.pix.width, ic_image->base.pix.height, 326 ic_image->num_cols, ic_image->num_rows, 327 ic_image->fmt->fourcc & 0xff, 328 (ic_image->fmt->fourcc >> 8) & 0xff, 329 (ic_image->fmt->fourcc >> 16) & 0xff, 330 (ic_image->fmt->fourcc >> 24) & 0xff); 331 } 332 333 int ipu_image_convert_enum_format(int index, u32 *fourcc) 334 { 335 const struct ipu_image_pixfmt *fmt; 336 337 if (index >= (int)ARRAY_SIZE(image_convert_formats)) 338 return -EINVAL; 339 340 /* Format found */ 341 fmt = &image_convert_formats[index]; 342 *fourcc = fmt->fourcc; 343 return 0; 344 } 345 EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format); 346 347 static void free_dma_buf(struct ipu_image_convert_priv *priv, 348 struct ipu_image_convert_dma_buf *buf) 349 { 350 if (buf->virt) 351 dma_free_coherent(priv->ipu->dev, 352 buf->len, buf->virt, buf->phys); 353 buf->virt = NULL; 354 buf->phys = 0; 355 } 356 357 static int alloc_dma_buf(struct ipu_image_convert_priv *priv, 358 struct ipu_image_convert_dma_buf *buf, 359 int size) 360 { 361 buf->len = PAGE_ALIGN(size); 362 buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys, 363 GFP_DMA | GFP_KERNEL); 364 if (!buf->virt) { 365 dev_err(priv->ipu->dev, "failed to alloc dma buffer\n"); 366 return -ENOMEM; 367 } 368 369 return 0; 370 } 371 372 static inline int num_stripes(int dim) 373 { 374 return (dim - 1) / 1024 + 1; 375 } 376 377 /* 378 * Calculate downsizing coefficients, which are the same for all tiles, 379 * and bilinear resizing coefficients, which are used to find the best 380 * seam positions. 381 */ 382 static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx, 383 struct ipu_image *in, 384 struct ipu_image *out) 385 { 386 u32 downsized_width = in->rect.width; 387 u32 downsized_height = in->rect.height; 388 u32 downsize_coeff_v = 0; 389 u32 downsize_coeff_h = 0; 390 u32 resized_width = out->rect.width; 391 u32 resized_height = out->rect.height; 392 u32 resize_coeff_h; 393 u32 resize_coeff_v; 394 395 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 396 resized_width = out->rect.height; 397 resized_height = out->rect.width; 398 } 399 400 /* Do not let invalid input lead to an endless loop below */ 401 if (WARN_ON(resized_width == 0 || resized_height == 0)) 402 return -EINVAL; 403 404 while (downsized_width >= resized_width * 2) { 405 downsized_width >>= 1; 406 downsize_coeff_h++; 407 } 408 409 while (downsized_height >= resized_height * 2) { 410 downsized_height >>= 1; 411 downsize_coeff_v++; 412 } 413 414 /* 415 * Calculate the bilinear resizing coefficients that could be used if 416 * we were converting with a single tile. The bottom right output pixel 417 * should sample as close as possible to the bottom right input pixel 418 * out of the decimator, but not overshoot it: 419 */ 420 resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1); 421 resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1); 422 423 dev_dbg(ctx->chan->priv->ipu->dev, 424 "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n", 425 __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v, 426 resize_coeff_v, ctx->in.num_cols, ctx->in.num_rows); 427 428 if (downsize_coeff_h > 2 || downsize_coeff_v > 2 || 429 resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff) 430 return -EINVAL; 431 432 ctx->downsize_coeff_h = downsize_coeff_h; 433 ctx->downsize_coeff_v = downsize_coeff_v; 434 ctx->image_resize_coeff_h = resize_coeff_h; 435 ctx->image_resize_coeff_v = resize_coeff_v; 436 437 return 0; 438 } 439 440 #define round_closest(x, y) round_down((x) + (y)/2, (y)) 441 442 /* 443 * Find the best aligned seam position in the inverval [out_start, out_end]. 444 * Rotation and image offsets are out of scope. 445 * 446 * @out_start: start of inverval, must be within 1024 pixels / lines 447 * of out_end 448 * @out_end: end of interval, smaller than or equal to out_edge 449 * @in_edge: input right / bottom edge 450 * @out_edge: output right / bottom edge 451 * @in_align: input alignment, either horizontal 8-byte line start address 452 * alignment, or pixel alignment due to image format 453 * @out_align: output alignment, either horizontal 8-byte line start address 454 * alignment, or pixel alignment due to image format or rotator 455 * block size 456 * @in_burst: horizontal input burst size in case of horizontal flip 457 * @out_burst: horizontal output burst size or rotator block size 458 * @downsize_coeff: downsizing section coefficient 459 * @resize_coeff: main processing section resizing coefficient 460 * @_in_seam: aligned input seam position return value 461 * @_out_seam: aligned output seam position return value 462 */ 463 static void find_best_seam(struct ipu_image_convert_ctx *ctx, 464 unsigned int out_start, 465 unsigned int out_end, 466 unsigned int in_edge, 467 unsigned int out_edge, 468 unsigned int in_align, 469 unsigned int out_align, 470 unsigned int in_burst, 471 unsigned int out_burst, 472 unsigned int downsize_coeff, 473 unsigned int resize_coeff, 474 u32 *_in_seam, 475 u32 *_out_seam) 476 { 477 struct device *dev = ctx->chan->priv->ipu->dev; 478 unsigned int out_pos; 479 /* Input / output seam position candidates */ 480 unsigned int out_seam = 0; 481 unsigned int in_seam = 0; 482 unsigned int min_diff = UINT_MAX; 483 484 /* 485 * Output tiles must start at a multiple of 8 bytes horizontally and 486 * possibly at an even line horizontally depending on the pixel format. 487 * Only consider output aligned positions for the seam. 488 */ 489 out_start = round_up(out_start, out_align); 490 for (out_pos = out_start; out_pos < out_end; out_pos += out_align) { 491 unsigned int in_pos; 492 unsigned int in_pos_aligned; 493 unsigned int abs_diff; 494 495 /* 496 * Tiles in the right row / bottom column may not be allowed to 497 * overshoot horizontally / vertically. out_burst may be the 498 * actual DMA burst size, or the rotator block size. 499 */ 500 if ((out_burst > 1) && (out_edge - out_pos) % out_burst) 501 continue; 502 503 /* 504 * Input sample position, corresponding to out_pos, 19.13 fixed 505 * point. 506 */ 507 in_pos = (out_pos * resize_coeff) << downsize_coeff; 508 /* 509 * The closest input sample position that we could actually 510 * start the input tile at, 19.13 fixed point. 511 */ 512 in_pos_aligned = round_closest(in_pos, 8192U * in_align); 513 514 if ((in_burst > 1) && 515 (in_edge - in_pos_aligned / 8192U) % in_burst) 516 continue; 517 518 if (in_pos < in_pos_aligned) 519 abs_diff = in_pos_aligned - in_pos; 520 else 521 abs_diff = in_pos - in_pos_aligned; 522 523 if (abs_diff < min_diff) { 524 in_seam = in_pos_aligned; 525 out_seam = out_pos; 526 min_diff = abs_diff; 527 } 528 } 529 530 *_out_seam = out_seam; 531 /* Convert 19.13 fixed point to integer seam position */ 532 *_in_seam = DIV_ROUND_CLOSEST(in_seam, 8192U); 533 534 dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) diff %u.%03u\n", 535 __func__, out_seam, out_align, out_start, out_end, 536 *_in_seam, in_align, min_diff / 8192, 537 DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192)); 538 } 539 540 /* 541 * Tile left edges are required to be aligned to multiples of 8 bytes 542 * by the IDMAC. 543 */ 544 static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt) 545 { 546 if (fmt->planar) 547 return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec; 548 else 549 return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8; 550 } 551 552 /* 553 * Tile top edge alignment is only limited by chroma subsampling. 554 */ 555 static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt) 556 { 557 return fmt->uv_height_dec > 1 ? 2 : 1; 558 } 559 560 static inline u32 tile_width_align(enum ipu_image_convert_type type, 561 const struct ipu_image_pixfmt *fmt, 562 enum ipu_rotate_mode rot_mode) 563 { 564 if (type == IMAGE_CONVERT_IN) { 565 /* 566 * The IC burst reads 8 pixels at a time. Reading beyond the 567 * end of the line is usually acceptable. Those pixels are 568 * ignored, unless the IC has to write the scaled line in 569 * reverse. 570 */ 571 return (!ipu_rot_mode_is_irt(rot_mode) && 572 (rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2; 573 } 574 575 /* 576 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled 577 * formats to guarantee 8-byte aligned line start addresses in the 578 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size 579 * for all other formats. 580 */ 581 return (ipu_rot_mode_is_irt(rot_mode) && 582 fmt->planar && !fmt->uv_packed) ? 583 8 * fmt->uv_width_dec : 8; 584 } 585 586 static inline u32 tile_height_align(enum ipu_image_convert_type type, 587 const struct ipu_image_pixfmt *fmt, 588 enum ipu_rotate_mode rot_mode) 589 { 590 if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode)) 591 return 2; 592 593 /* 594 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled 595 * formats to guarantee 8-byte aligned line start addresses in the 596 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size 597 * for all other formats. 598 */ 599 return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8; 600 } 601 602 /* 603 * Fill in left position and width and for all tiles in an input column, and 604 * for all corresponding output tiles. If the 90° rotator is used, the output 605 * tiles are in a row, and output tile top position and height are set. 606 */ 607 static void fill_tile_column(struct ipu_image_convert_ctx *ctx, 608 unsigned int col, 609 struct ipu_image_convert_image *in, 610 unsigned int in_left, unsigned int in_width, 611 struct ipu_image_convert_image *out, 612 unsigned int out_left, unsigned int out_width) 613 { 614 unsigned int row, tile_idx; 615 struct ipu_image_tile *in_tile, *out_tile; 616 617 for (row = 0; row < in->num_rows; row++) { 618 tile_idx = in->num_cols * row + col; 619 in_tile = &in->tile[tile_idx]; 620 out_tile = &out->tile[ctx->out_tile_map[tile_idx]]; 621 622 in_tile->left = in_left; 623 in_tile->width = in_width; 624 625 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 626 out_tile->top = out_left; 627 out_tile->height = out_width; 628 } else { 629 out_tile->left = out_left; 630 out_tile->width = out_width; 631 } 632 } 633 } 634 635 /* 636 * Fill in top position and height and for all tiles in an input row, and 637 * for all corresponding output tiles. If the 90° rotator is used, the output 638 * tiles are in a column, and output tile left position and width are set. 639 */ 640 static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row, 641 struct ipu_image_convert_image *in, 642 unsigned int in_top, unsigned int in_height, 643 struct ipu_image_convert_image *out, 644 unsigned int out_top, unsigned int out_height) 645 { 646 unsigned int col, tile_idx; 647 struct ipu_image_tile *in_tile, *out_tile; 648 649 for (col = 0; col < in->num_cols; col++) { 650 tile_idx = in->num_cols * row + col; 651 in_tile = &in->tile[tile_idx]; 652 out_tile = &out->tile[ctx->out_tile_map[tile_idx]]; 653 654 in_tile->top = in_top; 655 in_tile->height = in_height; 656 657 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 658 out_tile->left = out_top; 659 out_tile->width = out_height; 660 } else { 661 out_tile->top = out_top; 662 out_tile->height = out_height; 663 } 664 } 665 } 666 667 /* 668 * Find the best horizontal and vertical seam positions to split into tiles. 669 * Minimize the fractional part of the input sampling position for the 670 * top / left pixels of each tile. 671 */ 672 static void find_seams(struct ipu_image_convert_ctx *ctx, 673 struct ipu_image_convert_image *in, 674 struct ipu_image_convert_image *out) 675 { 676 struct device *dev = ctx->chan->priv->ipu->dev; 677 unsigned int resized_width = out->base.rect.width; 678 unsigned int resized_height = out->base.rect.height; 679 unsigned int col; 680 unsigned int row; 681 unsigned int in_left_align = tile_left_align(in->fmt); 682 unsigned int in_top_align = tile_top_align(in->fmt); 683 unsigned int out_left_align = tile_left_align(out->fmt); 684 unsigned int out_top_align = tile_top_align(out->fmt); 685 unsigned int out_width_align = tile_width_align(out->type, out->fmt, 686 ctx->rot_mode); 687 unsigned int out_height_align = tile_height_align(out->type, out->fmt, 688 ctx->rot_mode); 689 unsigned int in_right = in->base.rect.width; 690 unsigned int in_bottom = in->base.rect.height; 691 unsigned int out_right = out->base.rect.width; 692 unsigned int out_bottom = out->base.rect.height; 693 unsigned int flipped_out_left; 694 unsigned int flipped_out_top; 695 696 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 697 /* Switch width/height and align top left to IRT block size */ 698 resized_width = out->base.rect.height; 699 resized_height = out->base.rect.width; 700 out_left_align = out_height_align; 701 out_top_align = out_width_align; 702 out_width_align = out_left_align; 703 out_height_align = out_top_align; 704 out_right = out->base.rect.height; 705 out_bottom = out->base.rect.width; 706 } 707 708 for (col = in->num_cols - 1; col > 0; col--) { 709 bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) || 710 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP); 711 bool allow_out_overshoot = (col < in->num_cols - 1) && 712 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP); 713 unsigned int out_start; 714 unsigned int out_end; 715 unsigned int in_left; 716 unsigned int out_left; 717 718 /* 719 * Align input width to burst length if the scaling step flips 720 * horizontally. 721 */ 722 723 /* Start within 1024 pixels of the right edge */ 724 out_start = max_t(int, 0, out_right - 1024); 725 /* End before having to add more columns to the left */ 726 out_end = min_t(unsigned int, out_right, col * 1024); 727 728 find_best_seam(ctx, out_start, out_end, 729 in_right, out_right, 730 in_left_align, out_left_align, 731 allow_in_overshoot ? 1 : 8 /* burst length */, 732 allow_out_overshoot ? 1 : out_width_align, 733 ctx->downsize_coeff_h, ctx->image_resize_coeff_h, 734 &in_left, &out_left); 735 736 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP) 737 flipped_out_left = resized_width - out_right; 738 else 739 flipped_out_left = out_left; 740 741 fill_tile_column(ctx, col, in, in_left, in_right - in_left, 742 out, flipped_out_left, out_right - out_left); 743 744 dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col, 745 in_left, in_right - in_left, 746 flipped_out_left, out_right - out_left); 747 748 in_right = in_left; 749 out_right = out_left; 750 } 751 752 flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ? 753 resized_width - out_right : 0; 754 755 fill_tile_column(ctx, 0, in, 0, in_right, 756 out, flipped_out_left, out_right); 757 758 dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__, 759 in_right, flipped_out_left, out_right); 760 761 for (row = in->num_rows - 1; row > 0; row--) { 762 bool allow_overshoot = row < in->num_rows - 1; 763 unsigned int out_start; 764 unsigned int out_end; 765 unsigned int in_top; 766 unsigned int out_top; 767 768 /* Start within 1024 lines of the bottom edge */ 769 out_start = max_t(int, 0, out_bottom - 1024); 770 /* End before having to add more rows above */ 771 out_end = min_t(unsigned int, out_bottom, row * 1024); 772 773 find_best_seam(ctx, out_start, out_end, 774 in_bottom, out_bottom, 775 in_top_align, out_top_align, 776 1, allow_overshoot ? 1 : out_height_align, 777 ctx->downsize_coeff_v, ctx->image_resize_coeff_v, 778 &in_top, &out_top); 779 780 if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^ 781 ipu_rot_mode_is_irt(ctx->rot_mode)) 782 flipped_out_top = resized_height - out_bottom; 783 else 784 flipped_out_top = out_top; 785 786 fill_tile_row(ctx, row, in, in_top, in_bottom - in_top, 787 out, flipped_out_top, out_bottom - out_top); 788 789 dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row, 790 in_top, in_bottom - in_top, 791 flipped_out_top, out_bottom - out_top); 792 793 in_bottom = in_top; 794 out_bottom = out_top; 795 } 796 797 if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^ 798 ipu_rot_mode_is_irt(ctx->rot_mode)) 799 flipped_out_top = resized_height - out_bottom; 800 else 801 flipped_out_top = 0; 802 803 fill_tile_row(ctx, 0, in, 0, in_bottom, 804 out, flipped_out_top, out_bottom); 805 806 dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__, 807 in_bottom, flipped_out_top, out_bottom); 808 } 809 810 static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx, 811 struct ipu_image_convert_image *image) 812 { 813 struct ipu_image_convert_chan *chan = ctx->chan; 814 struct ipu_image_convert_priv *priv = chan->priv; 815 unsigned int i; 816 817 for (i = 0; i < ctx->num_tiles; i++) { 818 struct ipu_image_tile *tile; 819 const unsigned int row = i / image->num_cols; 820 const unsigned int col = i % image->num_cols; 821 822 if (image->type == IMAGE_CONVERT_OUT) 823 tile = &image->tile[ctx->out_tile_map[i]]; 824 else 825 tile = &image->tile[i]; 826 827 tile->size = ((tile->height * image->fmt->bpp) >> 3) * 828 tile->width; 829 830 if (image->fmt->planar) { 831 tile->stride = tile->width; 832 tile->rot_stride = tile->height; 833 } else { 834 tile->stride = 835 (image->fmt->bpp * tile->width) >> 3; 836 tile->rot_stride = 837 (image->fmt->bpp * tile->height) >> 3; 838 } 839 840 dev_dbg(priv->ipu->dev, 841 "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n", 842 chan->ic_task, ctx, 843 image->type == IMAGE_CONVERT_IN ? "Input" : "Output", 844 row, col, 845 tile->width, tile->height, tile->left, tile->top); 846 } 847 } 848 849 /* 850 * Use the rotation transformation to find the tile coordinates 851 * (row, col) of a tile in the destination frame that corresponds 852 * to the given tile coordinates of a source frame. The destination 853 * coordinate is then converted to a tile index. 854 */ 855 static int transform_tile_index(struct ipu_image_convert_ctx *ctx, 856 int src_row, int src_col) 857 { 858 struct ipu_image_convert_chan *chan = ctx->chan; 859 struct ipu_image_convert_priv *priv = chan->priv; 860 struct ipu_image_convert_image *s_image = &ctx->in; 861 struct ipu_image_convert_image *d_image = &ctx->out; 862 int dst_row, dst_col; 863 864 /* with no rotation it's a 1:1 mapping */ 865 if (ctx->rot_mode == IPU_ROTATE_NONE) 866 return src_row * s_image->num_cols + src_col; 867 868 /* 869 * before doing the transform, first we have to translate 870 * source row,col for an origin in the center of s_image 871 */ 872 src_row = src_row * 2 - (s_image->num_rows - 1); 873 src_col = src_col * 2 - (s_image->num_cols - 1); 874 875 /* do the rotation transform */ 876 if (ctx->rot_mode & IPU_ROT_BIT_90) { 877 dst_col = -src_row; 878 dst_row = src_col; 879 } else { 880 dst_col = src_col; 881 dst_row = src_row; 882 } 883 884 /* apply flip */ 885 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP) 886 dst_col = -dst_col; 887 if (ctx->rot_mode & IPU_ROT_BIT_VFLIP) 888 dst_row = -dst_row; 889 890 dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n", 891 chan->ic_task, ctx, src_col, src_row, dst_col, dst_row); 892 893 /* 894 * finally translate dest row,col using an origin in upper 895 * left of d_image 896 */ 897 dst_row += d_image->num_rows - 1; 898 dst_col += d_image->num_cols - 1; 899 dst_row /= 2; 900 dst_col /= 2; 901 902 return dst_row * d_image->num_cols + dst_col; 903 } 904 905 /* 906 * Fill the out_tile_map[] with transformed destination tile indeces. 907 */ 908 static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx) 909 { 910 struct ipu_image_convert_image *s_image = &ctx->in; 911 unsigned int row, col, tile = 0; 912 913 for (row = 0; row < s_image->num_rows; row++) { 914 for (col = 0; col < s_image->num_cols; col++) { 915 ctx->out_tile_map[tile] = 916 transform_tile_index(ctx, row, col); 917 tile++; 918 } 919 } 920 } 921 922 static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx, 923 struct ipu_image_convert_image *image) 924 { 925 struct ipu_image_convert_chan *chan = ctx->chan; 926 struct ipu_image_convert_priv *priv = chan->priv; 927 const struct ipu_image_pixfmt *fmt = image->fmt; 928 unsigned int row, col, tile = 0; 929 u32 H, top, y_stride, uv_stride; 930 u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp; 931 u32 y_row_off, y_col_off, y_off; 932 u32 y_size, uv_size; 933 934 /* setup some convenience vars */ 935 H = image->base.pix.height; 936 937 y_stride = image->stride; 938 uv_stride = y_stride / fmt->uv_width_dec; 939 if (fmt->uv_packed) 940 uv_stride *= 2; 941 942 y_size = H * y_stride; 943 uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec); 944 945 for (row = 0; row < image->num_rows; row++) { 946 top = image->tile[tile].top; 947 y_row_off = top * y_stride; 948 uv_row_off = (top * uv_stride) / fmt->uv_height_dec; 949 950 for (col = 0; col < image->num_cols; col++) { 951 y_col_off = image->tile[tile].left; 952 uv_col_off = y_col_off / fmt->uv_width_dec; 953 if (fmt->uv_packed) 954 uv_col_off *= 2; 955 956 y_off = y_row_off + y_col_off; 957 uv_off = uv_row_off + uv_col_off; 958 959 u_off = y_size - y_off + uv_off; 960 v_off = (fmt->uv_packed) ? 0 : u_off + uv_size; 961 if (fmt->uv_swapped) { 962 tmp = u_off; 963 u_off = v_off; 964 v_off = tmp; 965 } 966 967 image->tile[tile].offset = y_off; 968 image->tile[tile].u_off = u_off; 969 image->tile[tile++].v_off = v_off; 970 971 if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) { 972 dev_err(priv->ipu->dev, 973 "task %u: ctx %p: %s@[%d,%d]: " 974 "y_off %08x, u_off %08x, v_off %08x\n", 975 chan->ic_task, ctx, 976 image->type == IMAGE_CONVERT_IN ? 977 "Input" : "Output", row, col, 978 y_off, u_off, v_off); 979 return -EINVAL; 980 } 981 } 982 } 983 984 return 0; 985 } 986 987 static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx, 988 struct ipu_image_convert_image *image) 989 { 990 struct ipu_image_convert_chan *chan = ctx->chan; 991 struct ipu_image_convert_priv *priv = chan->priv; 992 const struct ipu_image_pixfmt *fmt = image->fmt; 993 unsigned int row, col, tile = 0; 994 u32 bpp, stride, offset; 995 u32 row_off, col_off; 996 997 /* setup some convenience vars */ 998 stride = image->stride; 999 bpp = fmt->bpp; 1000 1001 for (row = 0; row < image->num_rows; row++) { 1002 row_off = image->tile[tile].top * stride; 1003 1004 for (col = 0; col < image->num_cols; col++) { 1005 col_off = (image->tile[tile].left * bpp) >> 3; 1006 1007 offset = row_off + col_off; 1008 1009 image->tile[tile].offset = offset; 1010 image->tile[tile].u_off = 0; 1011 image->tile[tile++].v_off = 0; 1012 1013 if (offset & 0x7) { 1014 dev_err(priv->ipu->dev, 1015 "task %u: ctx %p: %s@[%d,%d]: " 1016 "phys %08x\n", 1017 chan->ic_task, ctx, 1018 image->type == IMAGE_CONVERT_IN ? 1019 "Input" : "Output", row, col, 1020 row_off + col_off); 1021 return -EINVAL; 1022 } 1023 } 1024 } 1025 1026 return 0; 1027 } 1028 1029 static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx, 1030 struct ipu_image_convert_image *image) 1031 { 1032 if (image->fmt->planar) 1033 return calc_tile_offsets_planar(ctx, image); 1034 1035 return calc_tile_offsets_packed(ctx, image); 1036 } 1037 1038 /* 1039 * Calculate the resizing ratio for the IC main processing section given input 1040 * size, fixed downsizing coefficient, and output size. 1041 * Either round to closest for the next tile's first pixel to minimize seams 1042 * and distortion (for all but right column / bottom row), or round down to 1043 * avoid sampling beyond the edges of the input image for this tile's last 1044 * pixel. 1045 * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff. 1046 */ 1047 static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff, 1048 u32 output_size, bool allow_overshoot) 1049 { 1050 u32 downsized = input_size >> downsize_coeff; 1051 1052 if (allow_overshoot) 1053 return DIV_ROUND_CLOSEST(8192 * downsized, output_size); 1054 else 1055 return 8192 * (downsized - 1) / (output_size - 1); 1056 } 1057 1058 /* 1059 * Slightly modify resize coefficients per tile to hide the bilinear 1060 * interpolator reset at tile borders, shifting the right / bottom edge 1061 * by up to a half input pixel. This removes noticeable seams between 1062 * tiles at higher upscaling factors. 1063 */ 1064 static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx) 1065 { 1066 struct ipu_image_convert_chan *chan = ctx->chan; 1067 struct ipu_image_convert_priv *priv = chan->priv; 1068 struct ipu_image_tile *in_tile, *out_tile; 1069 unsigned int col, row, tile_idx; 1070 unsigned int last_output; 1071 1072 for (col = 0; col < ctx->in.num_cols; col++) { 1073 bool closest = (col < ctx->in.num_cols - 1) && 1074 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP); 1075 u32 resized_width; 1076 u32 resize_coeff_h; 1077 1078 tile_idx = col; 1079 in_tile = &ctx->in.tile[tile_idx]; 1080 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1081 1082 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1083 resized_width = out_tile->height; 1084 else 1085 resized_width = out_tile->width; 1086 1087 resize_coeff_h = calc_resize_coeff(in_tile->width, 1088 ctx->downsize_coeff_h, 1089 resized_width, closest); 1090 1091 dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n", 1092 __func__, col, resize_coeff_h); 1093 1094 1095 for (row = 0; row < ctx->in.num_rows; row++) { 1096 tile_idx = row * ctx->in.num_cols + col; 1097 in_tile = &ctx->in.tile[tile_idx]; 1098 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1099 1100 /* 1101 * With the horizontal scaling factor known, round up 1102 * resized width (output width or height) to burst size. 1103 */ 1104 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1105 out_tile->height = round_up(resized_width, 8); 1106 else 1107 out_tile->width = round_up(resized_width, 8); 1108 1109 /* 1110 * Calculate input width from the last accessed input 1111 * pixel given resized width and scaling coefficients. 1112 * Round up to burst size. 1113 */ 1114 last_output = round_up(resized_width, 8) - 1; 1115 if (closest) 1116 last_output++; 1117 in_tile->width = round_up( 1118 (DIV_ROUND_UP(last_output * resize_coeff_h, 1119 8192) + 1) 1120 << ctx->downsize_coeff_h, 8); 1121 } 1122 1123 ctx->resize_coeffs_h[col] = resize_coeff_h; 1124 } 1125 1126 for (row = 0; row < ctx->in.num_rows; row++) { 1127 bool closest = (row < ctx->in.num_rows - 1) && 1128 !(ctx->rot_mode & IPU_ROT_BIT_VFLIP); 1129 u32 resized_height; 1130 u32 resize_coeff_v; 1131 1132 tile_idx = row * ctx->in.num_cols; 1133 in_tile = &ctx->in.tile[tile_idx]; 1134 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1135 1136 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1137 resized_height = out_tile->width; 1138 else 1139 resized_height = out_tile->height; 1140 1141 resize_coeff_v = calc_resize_coeff(in_tile->height, 1142 ctx->downsize_coeff_v, 1143 resized_height, closest); 1144 1145 dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n", 1146 __func__, row, resize_coeff_v); 1147 1148 for (col = 0; col < ctx->in.num_cols; col++) { 1149 tile_idx = row * ctx->in.num_cols + col; 1150 in_tile = &ctx->in.tile[tile_idx]; 1151 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]]; 1152 1153 /* 1154 * With the vertical scaling factor known, round up 1155 * resized height (output width or height) to IDMAC 1156 * limitations. 1157 */ 1158 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1159 out_tile->width = round_up(resized_height, 2); 1160 else 1161 out_tile->height = round_up(resized_height, 2); 1162 1163 /* 1164 * Calculate input width from the last accessed input 1165 * pixel given resized height and scaling coefficients. 1166 * Align to IDMAC restrictions. 1167 */ 1168 last_output = round_up(resized_height, 2) - 1; 1169 if (closest) 1170 last_output++; 1171 in_tile->height = round_up( 1172 (DIV_ROUND_UP(last_output * resize_coeff_v, 1173 8192) + 1) 1174 << ctx->downsize_coeff_v, 2); 1175 } 1176 1177 ctx->resize_coeffs_v[row] = resize_coeff_v; 1178 } 1179 } 1180 1181 /* 1182 * return the number of runs in given queue (pending_q or done_q) 1183 * for this context. hold irqlock when calling. 1184 */ 1185 static int get_run_count(struct ipu_image_convert_ctx *ctx, 1186 struct list_head *q) 1187 { 1188 struct ipu_image_convert_run *run; 1189 int count = 0; 1190 1191 lockdep_assert_held(&ctx->chan->irqlock); 1192 1193 list_for_each_entry(run, q, list) { 1194 if (run->ctx == ctx) 1195 count++; 1196 } 1197 1198 return count; 1199 } 1200 1201 static void convert_stop(struct ipu_image_convert_run *run) 1202 { 1203 struct ipu_image_convert_ctx *ctx = run->ctx; 1204 struct ipu_image_convert_chan *chan = ctx->chan; 1205 struct ipu_image_convert_priv *priv = chan->priv; 1206 1207 dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n", 1208 __func__, chan->ic_task, ctx, run); 1209 1210 /* disable IC tasks and the channels */ 1211 ipu_ic_task_disable(chan->ic); 1212 ipu_idmac_disable_channel(chan->in_chan); 1213 ipu_idmac_disable_channel(chan->out_chan); 1214 1215 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1216 ipu_idmac_disable_channel(chan->rotation_in_chan); 1217 ipu_idmac_disable_channel(chan->rotation_out_chan); 1218 ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan); 1219 } 1220 1221 ipu_ic_disable(chan->ic); 1222 } 1223 1224 static void init_idmac_channel(struct ipu_image_convert_ctx *ctx, 1225 struct ipuv3_channel *channel, 1226 struct ipu_image_convert_image *image, 1227 enum ipu_rotate_mode rot_mode, 1228 bool rot_swap_width_height, 1229 unsigned int tile) 1230 { 1231 struct ipu_image_convert_chan *chan = ctx->chan; 1232 unsigned int burst_size; 1233 u32 width, height, stride; 1234 dma_addr_t addr0, addr1 = 0; 1235 struct ipu_image tile_image; 1236 unsigned int tile_idx[2]; 1237 1238 if (image->type == IMAGE_CONVERT_OUT) { 1239 tile_idx[0] = ctx->out_tile_map[tile]; 1240 tile_idx[1] = ctx->out_tile_map[1]; 1241 } else { 1242 tile_idx[0] = tile; 1243 tile_idx[1] = 1; 1244 } 1245 1246 if (rot_swap_width_height) { 1247 width = image->tile[tile_idx[0]].height; 1248 height = image->tile[tile_idx[0]].width; 1249 stride = image->tile[tile_idx[0]].rot_stride; 1250 addr0 = ctx->rot_intermediate[0].phys; 1251 if (ctx->double_buffering) 1252 addr1 = ctx->rot_intermediate[1].phys; 1253 } else { 1254 width = image->tile[tile_idx[0]].width; 1255 height = image->tile[tile_idx[0]].height; 1256 stride = image->stride; 1257 addr0 = image->base.phys0 + 1258 image->tile[tile_idx[0]].offset; 1259 if (ctx->double_buffering) 1260 addr1 = image->base.phys0 + 1261 image->tile[tile_idx[1]].offset; 1262 } 1263 1264 ipu_cpmem_zero(channel); 1265 1266 memset(&tile_image, 0, sizeof(tile_image)); 1267 tile_image.pix.width = tile_image.rect.width = width; 1268 tile_image.pix.height = tile_image.rect.height = height; 1269 tile_image.pix.bytesperline = stride; 1270 tile_image.pix.pixelformat = image->fmt->fourcc; 1271 tile_image.phys0 = addr0; 1272 tile_image.phys1 = addr1; 1273 if (image->fmt->planar && !rot_swap_width_height) { 1274 tile_image.u_offset = image->tile[tile_idx[0]].u_off; 1275 tile_image.v_offset = image->tile[tile_idx[0]].v_off; 1276 } 1277 1278 ipu_cpmem_set_image(channel, &tile_image); 1279 1280 if (rot_mode) 1281 ipu_cpmem_set_rotation(channel, rot_mode); 1282 1283 if (channel == chan->rotation_in_chan || 1284 channel == chan->rotation_out_chan) { 1285 burst_size = 8; 1286 ipu_cpmem_set_block_mode(channel); 1287 } else 1288 burst_size = (width % 16) ? 8 : 16; 1289 1290 ipu_cpmem_set_burstsize(channel, burst_size); 1291 1292 ipu_ic_task_idma_init(chan->ic, channel, width, height, 1293 burst_size, rot_mode); 1294 1295 /* 1296 * Setting a non-zero AXI ID collides with the PRG AXI snooping, so 1297 * only do this when there is no PRG present. 1298 */ 1299 if (!channel->ipu->prg_priv) 1300 ipu_cpmem_set_axi_id(channel, 1); 1301 1302 ipu_idmac_set_double_buffer(channel, ctx->double_buffering); 1303 } 1304 1305 static int convert_start(struct ipu_image_convert_run *run, unsigned int tile) 1306 { 1307 struct ipu_image_convert_ctx *ctx = run->ctx; 1308 struct ipu_image_convert_chan *chan = ctx->chan; 1309 struct ipu_image_convert_priv *priv = chan->priv; 1310 struct ipu_image_convert_image *s_image = &ctx->in; 1311 struct ipu_image_convert_image *d_image = &ctx->out; 1312 unsigned int dst_tile = ctx->out_tile_map[tile]; 1313 unsigned int dest_width, dest_height; 1314 unsigned int col, row; 1315 u32 rsc; 1316 int ret; 1317 1318 dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n", 1319 __func__, chan->ic_task, ctx, run, tile, dst_tile); 1320 1321 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1322 /* swap width/height for resizer */ 1323 dest_width = d_image->tile[dst_tile].height; 1324 dest_height = d_image->tile[dst_tile].width; 1325 } else { 1326 dest_width = d_image->tile[dst_tile].width; 1327 dest_height = d_image->tile[dst_tile].height; 1328 } 1329 1330 row = tile / s_image->num_cols; 1331 col = tile % s_image->num_cols; 1332 1333 rsc = (ctx->downsize_coeff_v << 30) | 1334 (ctx->resize_coeffs_v[row] << 16) | 1335 (ctx->downsize_coeff_h << 14) | 1336 (ctx->resize_coeffs_h[col]); 1337 1338 dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n", 1339 __func__, s_image->tile[tile].width, 1340 s_image->tile[tile].height, dest_width, dest_height, rsc); 1341 1342 /* setup the IC resizer and CSC */ 1343 ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc, 1344 s_image->tile[tile].width, 1345 s_image->tile[tile].height, 1346 dest_width, 1347 dest_height, 1348 rsc); 1349 if (ret) { 1350 dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret); 1351 return ret; 1352 } 1353 1354 /* init the source MEM-->IC PP IDMAC channel */ 1355 init_idmac_channel(ctx, chan->in_chan, s_image, 1356 IPU_ROTATE_NONE, false, tile); 1357 1358 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1359 /* init the IC PP-->MEM IDMAC channel */ 1360 init_idmac_channel(ctx, chan->out_chan, d_image, 1361 IPU_ROTATE_NONE, true, tile); 1362 1363 /* init the MEM-->IC PP ROT IDMAC channel */ 1364 init_idmac_channel(ctx, chan->rotation_in_chan, d_image, 1365 ctx->rot_mode, true, tile); 1366 1367 /* init the destination IC PP ROT-->MEM IDMAC channel */ 1368 init_idmac_channel(ctx, chan->rotation_out_chan, d_image, 1369 IPU_ROTATE_NONE, false, tile); 1370 1371 /* now link IC PP-->MEM to MEM-->IC PP ROT */ 1372 ipu_idmac_link(chan->out_chan, chan->rotation_in_chan); 1373 } else { 1374 /* init the destination IC PP-->MEM IDMAC channel */ 1375 init_idmac_channel(ctx, chan->out_chan, d_image, 1376 ctx->rot_mode, false, tile); 1377 } 1378 1379 /* enable the IC */ 1380 ipu_ic_enable(chan->ic); 1381 1382 /* set buffers ready */ 1383 ipu_idmac_select_buffer(chan->in_chan, 0); 1384 ipu_idmac_select_buffer(chan->out_chan, 0); 1385 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1386 ipu_idmac_select_buffer(chan->rotation_out_chan, 0); 1387 if (ctx->double_buffering) { 1388 ipu_idmac_select_buffer(chan->in_chan, 1); 1389 ipu_idmac_select_buffer(chan->out_chan, 1); 1390 if (ipu_rot_mode_is_irt(ctx->rot_mode)) 1391 ipu_idmac_select_buffer(chan->rotation_out_chan, 1); 1392 } 1393 1394 /* enable the channels! */ 1395 ipu_idmac_enable_channel(chan->in_chan); 1396 ipu_idmac_enable_channel(chan->out_chan); 1397 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1398 ipu_idmac_enable_channel(chan->rotation_in_chan); 1399 ipu_idmac_enable_channel(chan->rotation_out_chan); 1400 } 1401 1402 ipu_ic_task_enable(chan->ic); 1403 1404 ipu_cpmem_dump(chan->in_chan); 1405 ipu_cpmem_dump(chan->out_chan); 1406 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1407 ipu_cpmem_dump(chan->rotation_in_chan); 1408 ipu_cpmem_dump(chan->rotation_out_chan); 1409 } 1410 1411 ipu_dump(priv->ipu); 1412 1413 return 0; 1414 } 1415 1416 /* hold irqlock when calling */ 1417 static int do_run(struct ipu_image_convert_run *run) 1418 { 1419 struct ipu_image_convert_ctx *ctx = run->ctx; 1420 struct ipu_image_convert_chan *chan = ctx->chan; 1421 1422 lockdep_assert_held(&chan->irqlock); 1423 1424 ctx->in.base.phys0 = run->in_phys; 1425 ctx->out.base.phys0 = run->out_phys; 1426 1427 ctx->cur_buf_num = 0; 1428 ctx->next_tile = 1; 1429 1430 /* remove run from pending_q and set as current */ 1431 list_del(&run->list); 1432 chan->current_run = run; 1433 1434 return convert_start(run, 0); 1435 } 1436 1437 /* hold irqlock when calling */ 1438 static void run_next(struct ipu_image_convert_chan *chan) 1439 { 1440 struct ipu_image_convert_priv *priv = chan->priv; 1441 struct ipu_image_convert_run *run, *tmp; 1442 int ret; 1443 1444 lockdep_assert_held(&chan->irqlock); 1445 1446 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) { 1447 /* skip contexts that are aborting */ 1448 if (run->ctx->aborting) { 1449 dev_dbg(priv->ipu->dev, 1450 "%s: task %u: skipping aborting ctx %p run %p\n", 1451 __func__, chan->ic_task, run->ctx, run); 1452 continue; 1453 } 1454 1455 ret = do_run(run); 1456 if (!ret) 1457 break; 1458 1459 /* 1460 * something went wrong with start, add the run 1461 * to done q and continue to the next run in the 1462 * pending q. 1463 */ 1464 run->status = ret; 1465 list_add_tail(&run->list, &chan->done_q); 1466 chan->current_run = NULL; 1467 } 1468 } 1469 1470 static void empty_done_q(struct ipu_image_convert_chan *chan) 1471 { 1472 struct ipu_image_convert_priv *priv = chan->priv; 1473 struct ipu_image_convert_run *run; 1474 unsigned long flags; 1475 1476 spin_lock_irqsave(&chan->irqlock, flags); 1477 1478 while (!list_empty(&chan->done_q)) { 1479 run = list_entry(chan->done_q.next, 1480 struct ipu_image_convert_run, 1481 list); 1482 1483 list_del(&run->list); 1484 1485 dev_dbg(priv->ipu->dev, 1486 "%s: task %u: completing ctx %p run %p with %d\n", 1487 __func__, chan->ic_task, run->ctx, run, run->status); 1488 1489 /* call the completion callback and free the run */ 1490 spin_unlock_irqrestore(&chan->irqlock, flags); 1491 run->ctx->complete(run, run->ctx->complete_context); 1492 spin_lock_irqsave(&chan->irqlock, flags); 1493 } 1494 1495 spin_unlock_irqrestore(&chan->irqlock, flags); 1496 } 1497 1498 /* 1499 * the bottom half thread clears out the done_q, calling the 1500 * completion handler for each. 1501 */ 1502 static irqreturn_t do_bh(int irq, void *dev_id) 1503 { 1504 struct ipu_image_convert_chan *chan = dev_id; 1505 struct ipu_image_convert_priv *priv = chan->priv; 1506 struct ipu_image_convert_ctx *ctx; 1507 unsigned long flags; 1508 1509 dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__, 1510 chan->ic_task); 1511 1512 empty_done_q(chan); 1513 1514 spin_lock_irqsave(&chan->irqlock, flags); 1515 1516 /* 1517 * the done_q is cleared out, signal any contexts 1518 * that are aborting that abort can complete. 1519 */ 1520 list_for_each_entry(ctx, &chan->ctx_list, list) { 1521 if (ctx->aborting) { 1522 dev_dbg(priv->ipu->dev, 1523 "%s: task %u: signaling abort for ctx %p\n", 1524 __func__, chan->ic_task, ctx); 1525 complete_all(&ctx->aborted); 1526 } 1527 } 1528 1529 spin_unlock_irqrestore(&chan->irqlock, flags); 1530 1531 dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__, 1532 chan->ic_task); 1533 1534 return IRQ_HANDLED; 1535 } 1536 1537 static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx) 1538 { 1539 unsigned int cur_tile = ctx->next_tile - 1; 1540 unsigned int next_tile = ctx->next_tile; 1541 1542 if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] != 1543 ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] || 1544 ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] != 1545 ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] || 1546 ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width || 1547 ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height || 1548 ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width || 1549 ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height) 1550 return true; 1551 1552 return false; 1553 } 1554 1555 /* hold irqlock when calling */ 1556 static irqreturn_t do_irq(struct ipu_image_convert_run *run) 1557 { 1558 struct ipu_image_convert_ctx *ctx = run->ctx; 1559 struct ipu_image_convert_chan *chan = ctx->chan; 1560 struct ipu_image_tile *src_tile, *dst_tile; 1561 struct ipu_image_convert_image *s_image = &ctx->in; 1562 struct ipu_image_convert_image *d_image = &ctx->out; 1563 struct ipuv3_channel *outch; 1564 unsigned int dst_idx; 1565 1566 lockdep_assert_held(&chan->irqlock); 1567 1568 outch = ipu_rot_mode_is_irt(ctx->rot_mode) ? 1569 chan->rotation_out_chan : chan->out_chan; 1570 1571 /* 1572 * It is difficult to stop the channel DMA before the channels 1573 * enter the paused state. Without double-buffering the channels 1574 * are always in a paused state when the EOF irq occurs, so it 1575 * is safe to stop the channels now. For double-buffering we 1576 * just ignore the abort until the operation completes, when it 1577 * is safe to shut down. 1578 */ 1579 if (ctx->aborting && !ctx->double_buffering) { 1580 convert_stop(run); 1581 run->status = -EIO; 1582 goto done; 1583 } 1584 1585 if (ctx->next_tile == ctx->num_tiles) { 1586 /* 1587 * the conversion is complete 1588 */ 1589 convert_stop(run); 1590 run->status = 0; 1591 goto done; 1592 } 1593 1594 /* 1595 * not done, place the next tile buffers. 1596 */ 1597 if (!ctx->double_buffering) { 1598 if (ic_settings_changed(ctx)) { 1599 convert_stop(run); 1600 convert_start(run, ctx->next_tile); 1601 } else { 1602 src_tile = &s_image->tile[ctx->next_tile]; 1603 dst_idx = ctx->out_tile_map[ctx->next_tile]; 1604 dst_tile = &d_image->tile[dst_idx]; 1605 1606 ipu_cpmem_set_buffer(chan->in_chan, 0, 1607 s_image->base.phys0 + 1608 src_tile->offset); 1609 ipu_cpmem_set_buffer(outch, 0, 1610 d_image->base.phys0 + 1611 dst_tile->offset); 1612 if (s_image->fmt->planar) 1613 ipu_cpmem_set_uv_offset(chan->in_chan, 1614 src_tile->u_off, 1615 src_tile->v_off); 1616 if (d_image->fmt->planar) 1617 ipu_cpmem_set_uv_offset(outch, 1618 dst_tile->u_off, 1619 dst_tile->v_off); 1620 1621 ipu_idmac_select_buffer(chan->in_chan, 0); 1622 ipu_idmac_select_buffer(outch, 0); 1623 } 1624 } else if (ctx->next_tile < ctx->num_tiles - 1) { 1625 1626 src_tile = &s_image->tile[ctx->next_tile + 1]; 1627 dst_idx = ctx->out_tile_map[ctx->next_tile + 1]; 1628 dst_tile = &d_image->tile[dst_idx]; 1629 1630 ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num, 1631 s_image->base.phys0 + src_tile->offset); 1632 ipu_cpmem_set_buffer(outch, ctx->cur_buf_num, 1633 d_image->base.phys0 + dst_tile->offset); 1634 1635 ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num); 1636 ipu_idmac_select_buffer(outch, ctx->cur_buf_num); 1637 1638 ctx->cur_buf_num ^= 1; 1639 } 1640 1641 ctx->next_tile++; 1642 return IRQ_HANDLED; 1643 done: 1644 list_add_tail(&run->list, &chan->done_q); 1645 chan->current_run = NULL; 1646 run_next(chan); 1647 return IRQ_WAKE_THREAD; 1648 } 1649 1650 static irqreturn_t norotate_irq(int irq, void *data) 1651 { 1652 struct ipu_image_convert_chan *chan = data; 1653 struct ipu_image_convert_ctx *ctx; 1654 struct ipu_image_convert_run *run; 1655 unsigned long flags; 1656 irqreturn_t ret; 1657 1658 spin_lock_irqsave(&chan->irqlock, flags); 1659 1660 /* get current run and its context */ 1661 run = chan->current_run; 1662 if (!run) { 1663 ret = IRQ_NONE; 1664 goto out; 1665 } 1666 1667 ctx = run->ctx; 1668 1669 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 1670 /* this is a rotation operation, just ignore */ 1671 spin_unlock_irqrestore(&chan->irqlock, flags); 1672 return IRQ_HANDLED; 1673 } 1674 1675 ret = do_irq(run); 1676 out: 1677 spin_unlock_irqrestore(&chan->irqlock, flags); 1678 return ret; 1679 } 1680 1681 static irqreturn_t rotate_irq(int irq, void *data) 1682 { 1683 struct ipu_image_convert_chan *chan = data; 1684 struct ipu_image_convert_priv *priv = chan->priv; 1685 struct ipu_image_convert_ctx *ctx; 1686 struct ipu_image_convert_run *run; 1687 unsigned long flags; 1688 irqreturn_t ret; 1689 1690 spin_lock_irqsave(&chan->irqlock, flags); 1691 1692 /* get current run and its context */ 1693 run = chan->current_run; 1694 if (!run) { 1695 ret = IRQ_NONE; 1696 goto out; 1697 } 1698 1699 ctx = run->ctx; 1700 1701 if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { 1702 /* this was NOT a rotation operation, shouldn't happen */ 1703 dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n"); 1704 spin_unlock_irqrestore(&chan->irqlock, flags); 1705 return IRQ_HANDLED; 1706 } 1707 1708 ret = do_irq(run); 1709 out: 1710 spin_unlock_irqrestore(&chan->irqlock, flags); 1711 return ret; 1712 } 1713 1714 /* 1715 * try to force the completion of runs for this ctx. Called when 1716 * abort wait times out in ipu_image_convert_abort(). 1717 */ 1718 static void force_abort(struct ipu_image_convert_ctx *ctx) 1719 { 1720 struct ipu_image_convert_chan *chan = ctx->chan; 1721 struct ipu_image_convert_run *run; 1722 unsigned long flags; 1723 1724 spin_lock_irqsave(&chan->irqlock, flags); 1725 1726 run = chan->current_run; 1727 if (run && run->ctx == ctx) { 1728 convert_stop(run); 1729 run->status = -EIO; 1730 list_add_tail(&run->list, &chan->done_q); 1731 chan->current_run = NULL; 1732 run_next(chan); 1733 } 1734 1735 spin_unlock_irqrestore(&chan->irqlock, flags); 1736 1737 empty_done_q(chan); 1738 } 1739 1740 static void release_ipu_resources(struct ipu_image_convert_chan *chan) 1741 { 1742 if (chan->out_eof_irq >= 0) 1743 free_irq(chan->out_eof_irq, chan); 1744 if (chan->rot_out_eof_irq >= 0) 1745 free_irq(chan->rot_out_eof_irq, chan); 1746 1747 if (!IS_ERR_OR_NULL(chan->in_chan)) 1748 ipu_idmac_put(chan->in_chan); 1749 if (!IS_ERR_OR_NULL(chan->out_chan)) 1750 ipu_idmac_put(chan->out_chan); 1751 if (!IS_ERR_OR_NULL(chan->rotation_in_chan)) 1752 ipu_idmac_put(chan->rotation_in_chan); 1753 if (!IS_ERR_OR_NULL(chan->rotation_out_chan)) 1754 ipu_idmac_put(chan->rotation_out_chan); 1755 if (!IS_ERR_OR_NULL(chan->ic)) 1756 ipu_ic_put(chan->ic); 1757 1758 chan->in_chan = chan->out_chan = chan->rotation_in_chan = 1759 chan->rotation_out_chan = NULL; 1760 chan->out_eof_irq = chan->rot_out_eof_irq = -1; 1761 } 1762 1763 static int get_ipu_resources(struct ipu_image_convert_chan *chan) 1764 { 1765 const struct ipu_image_convert_dma_chan *dma = chan->dma_ch; 1766 struct ipu_image_convert_priv *priv = chan->priv; 1767 int ret; 1768 1769 /* get IC */ 1770 chan->ic = ipu_ic_get(priv->ipu, chan->ic_task); 1771 if (IS_ERR(chan->ic)) { 1772 dev_err(priv->ipu->dev, "could not acquire IC\n"); 1773 ret = PTR_ERR(chan->ic); 1774 goto err; 1775 } 1776 1777 /* get IDMAC channels */ 1778 chan->in_chan = ipu_idmac_get(priv->ipu, dma->in); 1779 chan->out_chan = ipu_idmac_get(priv->ipu, dma->out); 1780 if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) { 1781 dev_err(priv->ipu->dev, "could not acquire idmac channels\n"); 1782 ret = -EBUSY; 1783 goto err; 1784 } 1785 1786 chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in); 1787 chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out); 1788 if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) { 1789 dev_err(priv->ipu->dev, 1790 "could not acquire idmac rotation channels\n"); 1791 ret = -EBUSY; 1792 goto err; 1793 } 1794 1795 /* acquire the EOF interrupts */ 1796 chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu, 1797 chan->out_chan, 1798 IPU_IRQ_EOF); 1799 1800 ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh, 1801 0, "ipu-ic", chan); 1802 if (ret < 0) { 1803 dev_err(priv->ipu->dev, "could not acquire irq %d\n", 1804 chan->out_eof_irq); 1805 chan->out_eof_irq = -1; 1806 goto err; 1807 } 1808 1809 chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu, 1810 chan->rotation_out_chan, 1811 IPU_IRQ_EOF); 1812 1813 ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh, 1814 0, "ipu-ic", chan); 1815 if (ret < 0) { 1816 dev_err(priv->ipu->dev, "could not acquire irq %d\n", 1817 chan->rot_out_eof_irq); 1818 chan->rot_out_eof_irq = -1; 1819 goto err; 1820 } 1821 1822 return 0; 1823 err: 1824 release_ipu_resources(chan); 1825 return ret; 1826 } 1827 1828 static int fill_image(struct ipu_image_convert_ctx *ctx, 1829 struct ipu_image_convert_image *ic_image, 1830 struct ipu_image *image, 1831 enum ipu_image_convert_type type) 1832 { 1833 struct ipu_image_convert_priv *priv = ctx->chan->priv; 1834 1835 ic_image->base = *image; 1836 ic_image->type = type; 1837 1838 ic_image->fmt = get_format(image->pix.pixelformat); 1839 if (!ic_image->fmt) { 1840 dev_err(priv->ipu->dev, "pixelformat not supported for %s\n", 1841 type == IMAGE_CONVERT_OUT ? "Output" : "Input"); 1842 return -EINVAL; 1843 } 1844 1845 if (ic_image->fmt->planar) 1846 ic_image->stride = ic_image->base.pix.width; 1847 else 1848 ic_image->stride = ic_image->base.pix.bytesperline; 1849 1850 return 0; 1851 } 1852 1853 /* borrowed from drivers/media/v4l2-core/v4l2-common.c */ 1854 static unsigned int clamp_align(unsigned int x, unsigned int min, 1855 unsigned int max, unsigned int align) 1856 { 1857 /* Bits that must be zero to be aligned */ 1858 unsigned int mask = ~((1 << align) - 1); 1859 1860 /* Clamp to aligned min and max */ 1861 x = clamp(x, (min + ~mask) & mask, max & mask); 1862 1863 /* Round to nearest aligned value */ 1864 if (align) 1865 x = (x + (1 << (align - 1))) & mask; 1866 1867 return x; 1868 } 1869 1870 /* Adjusts input/output images to IPU restrictions */ 1871 void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out, 1872 enum ipu_rotate_mode rot_mode) 1873 { 1874 const struct ipu_image_pixfmt *infmt, *outfmt; 1875 u32 w_align, h_align; 1876 1877 infmt = get_format(in->pix.pixelformat); 1878 outfmt = get_format(out->pix.pixelformat); 1879 1880 /* set some default pixel formats if needed */ 1881 if (!infmt) { 1882 in->pix.pixelformat = V4L2_PIX_FMT_RGB24; 1883 infmt = get_format(V4L2_PIX_FMT_RGB24); 1884 } 1885 if (!outfmt) { 1886 out->pix.pixelformat = V4L2_PIX_FMT_RGB24; 1887 outfmt = get_format(V4L2_PIX_FMT_RGB24); 1888 } 1889 1890 /* image converter does not handle fields */ 1891 in->pix.field = out->pix.field = V4L2_FIELD_NONE; 1892 1893 /* resizer cannot downsize more than 4:1 */ 1894 if (ipu_rot_mode_is_irt(rot_mode)) { 1895 out->pix.height = max_t(__u32, out->pix.height, 1896 in->pix.width / 4); 1897 out->pix.width = max_t(__u32, out->pix.width, 1898 in->pix.height / 4); 1899 } else { 1900 out->pix.width = max_t(__u32, out->pix.width, 1901 in->pix.width / 4); 1902 out->pix.height = max_t(__u32, out->pix.height, 1903 in->pix.height / 4); 1904 } 1905 1906 /* align input width/height */ 1907 w_align = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt, rot_mode)); 1908 h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt, rot_mode)); 1909 in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align); 1910 in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align); 1911 1912 /* align output width/height */ 1913 w_align = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt, rot_mode)); 1914 h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt, rot_mode)); 1915 out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align); 1916 out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align); 1917 1918 /* set input/output strides and image sizes */ 1919 in->pix.bytesperline = infmt->planar ? 1920 clamp_align(in->pix.width, 2 << w_align, MAX_W, w_align) : 1921 clamp_align((in->pix.width * infmt->bpp) >> 3, 1922 2 << w_align, MAX_W, w_align); 1923 in->pix.sizeimage = infmt->planar ? 1924 (in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 : 1925 in->pix.height * in->pix.bytesperline; 1926 out->pix.bytesperline = outfmt->planar ? out->pix.width : 1927 (out->pix.width * outfmt->bpp) >> 3; 1928 out->pix.sizeimage = outfmt->planar ? 1929 (out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 : 1930 out->pix.height * out->pix.bytesperline; 1931 } 1932 EXPORT_SYMBOL_GPL(ipu_image_convert_adjust); 1933 1934 /* 1935 * this is used by ipu_image_convert_prepare() to verify set input and 1936 * output images are valid before starting the conversion. Clients can 1937 * also call it before calling ipu_image_convert_prepare(). 1938 */ 1939 int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out, 1940 enum ipu_rotate_mode rot_mode) 1941 { 1942 struct ipu_image testin, testout; 1943 1944 testin = *in; 1945 testout = *out; 1946 1947 ipu_image_convert_adjust(&testin, &testout, rot_mode); 1948 1949 if (testin.pix.width != in->pix.width || 1950 testin.pix.height != in->pix.height || 1951 testout.pix.width != out->pix.width || 1952 testout.pix.height != out->pix.height) 1953 return -EINVAL; 1954 1955 return 0; 1956 } 1957 EXPORT_SYMBOL_GPL(ipu_image_convert_verify); 1958 1959 /* 1960 * Call ipu_image_convert_prepare() to prepare for the conversion of 1961 * given images and rotation mode. Returns a new conversion context. 1962 */ 1963 struct ipu_image_convert_ctx * 1964 ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 1965 struct ipu_image *in, struct ipu_image *out, 1966 enum ipu_rotate_mode rot_mode, 1967 ipu_image_convert_cb_t complete, 1968 void *complete_context) 1969 { 1970 struct ipu_image_convert_priv *priv = ipu->image_convert_priv; 1971 struct ipu_image_convert_image *s_image, *d_image; 1972 struct ipu_image_convert_chan *chan; 1973 struct ipu_image_convert_ctx *ctx; 1974 unsigned long flags; 1975 unsigned int i; 1976 bool get_res; 1977 int ret; 1978 1979 if (!in || !out || !complete || 1980 (ic_task != IC_TASK_VIEWFINDER && 1981 ic_task != IC_TASK_POST_PROCESSOR)) 1982 return ERR_PTR(-EINVAL); 1983 1984 /* verify the in/out images before continuing */ 1985 ret = ipu_image_convert_verify(in, out, rot_mode); 1986 if (ret) { 1987 dev_err(priv->ipu->dev, "%s: in/out formats invalid\n", 1988 __func__); 1989 return ERR_PTR(ret); 1990 } 1991 1992 chan = &priv->chan[ic_task]; 1993 1994 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1995 if (!ctx) 1996 return ERR_PTR(-ENOMEM); 1997 1998 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__, 1999 chan->ic_task, ctx); 2000 2001 ctx->chan = chan; 2002 init_completion(&ctx->aborted); 2003 2004 s_image = &ctx->in; 2005 d_image = &ctx->out; 2006 2007 /* set tiling and rotation */ 2008 d_image->num_rows = num_stripes(out->pix.height); 2009 d_image->num_cols = num_stripes(out->pix.width); 2010 if (ipu_rot_mode_is_irt(rot_mode)) { 2011 s_image->num_rows = d_image->num_cols; 2012 s_image->num_cols = d_image->num_rows; 2013 } else { 2014 s_image->num_rows = d_image->num_rows; 2015 s_image->num_cols = d_image->num_cols; 2016 } 2017 2018 ctx->num_tiles = d_image->num_cols * d_image->num_rows; 2019 ctx->rot_mode = rot_mode; 2020 2021 ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN); 2022 if (ret) 2023 goto out_free; 2024 ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT); 2025 if (ret) 2026 goto out_free; 2027 2028 ret = calc_image_resize_coefficients(ctx, in, out); 2029 if (ret) 2030 goto out_free; 2031 2032 calc_out_tile_map(ctx); 2033 2034 find_seams(ctx, s_image, d_image); 2035 2036 calc_tile_dimensions(ctx, s_image); 2037 ret = calc_tile_offsets(ctx, s_image); 2038 if (ret) 2039 goto out_free; 2040 2041 calc_tile_dimensions(ctx, d_image); 2042 ret = calc_tile_offsets(ctx, d_image); 2043 if (ret) 2044 goto out_free; 2045 2046 calc_tile_resize_coefficients(ctx); 2047 2048 ret = ipu_ic_calc_csc(&ctx->csc, 2049 s_image->base.pix.ycbcr_enc, 2050 s_image->base.pix.quantization, 2051 ipu_pixelformat_to_colorspace(s_image->fmt->fourcc), 2052 d_image->base.pix.ycbcr_enc, 2053 d_image->base.pix.quantization, 2054 ipu_pixelformat_to_colorspace(d_image->fmt->fourcc)); 2055 if (ret) 2056 goto out_free; 2057 2058 dump_format(ctx, s_image); 2059 dump_format(ctx, d_image); 2060 2061 ctx->complete = complete; 2062 ctx->complete_context = complete_context; 2063 2064 /* 2065 * Can we use double-buffering for this operation? If there is 2066 * only one tile (the whole image can be converted in a single 2067 * operation) there's no point in using double-buffering. Also, 2068 * the IPU's IDMAC channels allow only a single U and V plane 2069 * offset shared between both buffers, but these offsets change 2070 * for every tile, and therefore would have to be updated for 2071 * each buffer which is not possible. So double-buffering is 2072 * impossible when either the source or destination images are 2073 * a planar format (YUV420, YUV422P, etc.). Further, differently 2074 * sized tiles or different resizing coefficients per tile 2075 * prevent double-buffering as well. 2076 */ 2077 ctx->double_buffering = (ctx->num_tiles > 1 && 2078 !s_image->fmt->planar && 2079 !d_image->fmt->planar); 2080 for (i = 1; i < ctx->num_tiles; i++) { 2081 if (ctx->in.tile[i].width != ctx->in.tile[0].width || 2082 ctx->in.tile[i].height != ctx->in.tile[0].height || 2083 ctx->out.tile[i].width != ctx->out.tile[0].width || 2084 ctx->out.tile[i].height != ctx->out.tile[0].height) { 2085 ctx->double_buffering = false; 2086 break; 2087 } 2088 } 2089 for (i = 1; i < ctx->in.num_cols; i++) { 2090 if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) { 2091 ctx->double_buffering = false; 2092 break; 2093 } 2094 } 2095 for (i = 1; i < ctx->in.num_rows; i++) { 2096 if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) { 2097 ctx->double_buffering = false; 2098 break; 2099 } 2100 } 2101 2102 if (ipu_rot_mode_is_irt(ctx->rot_mode)) { 2103 unsigned long intermediate_size = d_image->tile[0].size; 2104 2105 for (i = 1; i < ctx->num_tiles; i++) { 2106 if (d_image->tile[i].size > intermediate_size) 2107 intermediate_size = d_image->tile[i].size; 2108 } 2109 2110 ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0], 2111 intermediate_size); 2112 if (ret) 2113 goto out_free; 2114 if (ctx->double_buffering) { 2115 ret = alloc_dma_buf(priv, 2116 &ctx->rot_intermediate[1], 2117 intermediate_size); 2118 if (ret) 2119 goto out_free_dmabuf0; 2120 } 2121 } 2122 2123 spin_lock_irqsave(&chan->irqlock, flags); 2124 2125 get_res = list_empty(&chan->ctx_list); 2126 2127 list_add_tail(&ctx->list, &chan->ctx_list); 2128 2129 spin_unlock_irqrestore(&chan->irqlock, flags); 2130 2131 if (get_res) { 2132 ret = get_ipu_resources(chan); 2133 if (ret) 2134 goto out_free_dmabuf1; 2135 } 2136 2137 return ctx; 2138 2139 out_free_dmabuf1: 2140 free_dma_buf(priv, &ctx->rot_intermediate[1]); 2141 spin_lock_irqsave(&chan->irqlock, flags); 2142 list_del(&ctx->list); 2143 spin_unlock_irqrestore(&chan->irqlock, flags); 2144 out_free_dmabuf0: 2145 free_dma_buf(priv, &ctx->rot_intermediate[0]); 2146 out_free: 2147 kfree(ctx); 2148 return ERR_PTR(ret); 2149 } 2150 EXPORT_SYMBOL_GPL(ipu_image_convert_prepare); 2151 2152 /* 2153 * Carry out a single image conversion run. Only the physaddr's of the input 2154 * and output image buffers are needed. The conversion context must have 2155 * been created previously with ipu_image_convert_prepare(). 2156 */ 2157 int ipu_image_convert_queue(struct ipu_image_convert_run *run) 2158 { 2159 struct ipu_image_convert_chan *chan; 2160 struct ipu_image_convert_priv *priv; 2161 struct ipu_image_convert_ctx *ctx; 2162 unsigned long flags; 2163 int ret = 0; 2164 2165 if (!run || !run->ctx || !run->in_phys || !run->out_phys) 2166 return -EINVAL; 2167 2168 ctx = run->ctx; 2169 chan = ctx->chan; 2170 priv = chan->priv; 2171 2172 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__, 2173 chan->ic_task, ctx, run); 2174 2175 INIT_LIST_HEAD(&run->list); 2176 2177 spin_lock_irqsave(&chan->irqlock, flags); 2178 2179 if (ctx->aborting) { 2180 ret = -EIO; 2181 goto unlock; 2182 } 2183 2184 list_add_tail(&run->list, &chan->pending_q); 2185 2186 if (!chan->current_run) { 2187 ret = do_run(run); 2188 if (ret) 2189 chan->current_run = NULL; 2190 } 2191 unlock: 2192 spin_unlock_irqrestore(&chan->irqlock, flags); 2193 return ret; 2194 } 2195 EXPORT_SYMBOL_GPL(ipu_image_convert_queue); 2196 2197 /* Abort any active or pending conversions for this context */ 2198 static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) 2199 { 2200 struct ipu_image_convert_chan *chan = ctx->chan; 2201 struct ipu_image_convert_priv *priv = chan->priv; 2202 struct ipu_image_convert_run *run, *active_run, *tmp; 2203 unsigned long flags; 2204 int run_count, ret; 2205 2206 spin_lock_irqsave(&chan->irqlock, flags); 2207 2208 /* move all remaining pending runs in this context to done_q */ 2209 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) { 2210 if (run->ctx != ctx) 2211 continue; 2212 run->status = -EIO; 2213 list_move_tail(&run->list, &chan->done_q); 2214 } 2215 2216 run_count = get_run_count(ctx, &chan->done_q); 2217 active_run = (chan->current_run && chan->current_run->ctx == ctx) ? 2218 chan->current_run : NULL; 2219 2220 if (active_run) 2221 reinit_completion(&ctx->aborted); 2222 2223 ctx->aborting = true; 2224 2225 spin_unlock_irqrestore(&chan->irqlock, flags); 2226 2227 if (!run_count && !active_run) { 2228 dev_dbg(priv->ipu->dev, 2229 "%s: task %u: no abort needed for ctx %p\n", 2230 __func__, chan->ic_task, ctx); 2231 return; 2232 } 2233 2234 if (!active_run) { 2235 empty_done_q(chan); 2236 return; 2237 } 2238 2239 dev_dbg(priv->ipu->dev, 2240 "%s: task %u: wait for completion: %d runs\n", 2241 __func__, chan->ic_task, run_count); 2242 2243 ret = wait_for_completion_timeout(&ctx->aborted, 2244 msecs_to_jiffies(10000)); 2245 if (ret == 0) { 2246 dev_warn(priv->ipu->dev, "%s: timeout\n", __func__); 2247 force_abort(ctx); 2248 } 2249 } 2250 2251 void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) 2252 { 2253 __ipu_image_convert_abort(ctx); 2254 ctx->aborting = false; 2255 } 2256 EXPORT_SYMBOL_GPL(ipu_image_convert_abort); 2257 2258 /* Unprepare image conversion context */ 2259 void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx) 2260 { 2261 struct ipu_image_convert_chan *chan = ctx->chan; 2262 struct ipu_image_convert_priv *priv = chan->priv; 2263 unsigned long flags; 2264 bool put_res; 2265 2266 /* make sure no runs are hanging around */ 2267 __ipu_image_convert_abort(ctx); 2268 2269 dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__, 2270 chan->ic_task, ctx); 2271 2272 spin_lock_irqsave(&chan->irqlock, flags); 2273 2274 list_del(&ctx->list); 2275 2276 put_res = list_empty(&chan->ctx_list); 2277 2278 spin_unlock_irqrestore(&chan->irqlock, flags); 2279 2280 if (put_res) 2281 release_ipu_resources(chan); 2282 2283 free_dma_buf(priv, &ctx->rot_intermediate[1]); 2284 free_dma_buf(priv, &ctx->rot_intermediate[0]); 2285 2286 kfree(ctx); 2287 } 2288 EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare); 2289 2290 /* 2291 * "Canned" asynchronous single image conversion. Allocates and returns 2292 * a new conversion run. On successful return the caller must free the 2293 * run and call ipu_image_convert_unprepare() after conversion completes. 2294 */ 2295 struct ipu_image_convert_run * 2296 ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 2297 struct ipu_image *in, struct ipu_image *out, 2298 enum ipu_rotate_mode rot_mode, 2299 ipu_image_convert_cb_t complete, 2300 void *complete_context) 2301 { 2302 struct ipu_image_convert_ctx *ctx; 2303 struct ipu_image_convert_run *run; 2304 int ret; 2305 2306 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode, 2307 complete, complete_context); 2308 if (IS_ERR(ctx)) 2309 return ERR_CAST(ctx); 2310 2311 run = kzalloc(sizeof(*run), GFP_KERNEL); 2312 if (!run) { 2313 ipu_image_convert_unprepare(ctx); 2314 return ERR_PTR(-ENOMEM); 2315 } 2316 2317 run->ctx = ctx; 2318 run->in_phys = in->phys0; 2319 run->out_phys = out->phys0; 2320 2321 ret = ipu_image_convert_queue(run); 2322 if (ret) { 2323 ipu_image_convert_unprepare(ctx); 2324 kfree(run); 2325 return ERR_PTR(ret); 2326 } 2327 2328 return run; 2329 } 2330 EXPORT_SYMBOL_GPL(ipu_image_convert); 2331 2332 /* "Canned" synchronous single image conversion */ 2333 static void image_convert_sync_complete(struct ipu_image_convert_run *run, 2334 void *data) 2335 { 2336 struct completion *comp = data; 2337 2338 complete(comp); 2339 } 2340 2341 int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task, 2342 struct ipu_image *in, struct ipu_image *out, 2343 enum ipu_rotate_mode rot_mode) 2344 { 2345 struct ipu_image_convert_run *run; 2346 struct completion comp; 2347 int ret; 2348 2349 init_completion(&comp); 2350 2351 run = ipu_image_convert(ipu, ic_task, in, out, rot_mode, 2352 image_convert_sync_complete, &comp); 2353 if (IS_ERR(run)) 2354 return PTR_ERR(run); 2355 2356 ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000)); 2357 ret = (ret == 0) ? -ETIMEDOUT : 0; 2358 2359 ipu_image_convert_unprepare(run->ctx); 2360 kfree(run); 2361 2362 return ret; 2363 } 2364 EXPORT_SYMBOL_GPL(ipu_image_convert_sync); 2365 2366 int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev) 2367 { 2368 struct ipu_image_convert_priv *priv; 2369 int i; 2370 2371 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 2372 if (!priv) 2373 return -ENOMEM; 2374 2375 ipu->image_convert_priv = priv; 2376 priv->ipu = ipu; 2377 2378 for (i = 0; i < IC_NUM_TASKS; i++) { 2379 struct ipu_image_convert_chan *chan = &priv->chan[i]; 2380 2381 chan->ic_task = i; 2382 chan->priv = priv; 2383 chan->dma_ch = &image_convert_dma_chan[i]; 2384 chan->out_eof_irq = -1; 2385 chan->rot_out_eof_irq = -1; 2386 2387 spin_lock_init(&chan->irqlock); 2388 INIT_LIST_HEAD(&chan->ctx_list); 2389 INIT_LIST_HEAD(&chan->pending_q); 2390 INIT_LIST_HEAD(&chan->done_q); 2391 } 2392 2393 return 0; 2394 } 2395 2396 void ipu_image_convert_exit(struct ipu_soc *ipu) 2397 { 2398 } 2399