1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * camss-vfe.c 4 * 5 * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module 6 * 7 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. 8 * Copyright (C) 2015-2018 Linaro Ltd. 9 */ 10 #include <linux/clk.h> 11 #include <linux/completion.h> 12 #include <linux/interrupt.h> 13 #include <linux/iommu.h> 14 #include <linux/iopoll.h> 15 #include <linux/mutex.h> 16 #include <linux/of.h> 17 #include <linux/platform_device.h> 18 #include <linux/spinlock_types.h> 19 #include <linux/spinlock.h> 20 #include <media/media-entity.h> 21 #include <media/v4l2-device.h> 22 #include <media/v4l2-subdev.h> 23 24 #include "camss-vfe.h" 25 #include "camss.h" 26 27 #define MSM_VFE_NAME "msm_vfe" 28 29 #define vfe_line_array(ptr_line) \ 30 ((const struct vfe_line (*)[]) &(ptr_line[-(ptr_line->id)])) 31 32 #define to_vfe(ptr_line) \ 33 container_of(vfe_line_array(ptr_line), struct vfe_device, ptr_line) 34 35 #define VFE_0_HW_VERSION 0x000 36 37 #define VFE_0_GLOBAL_RESET_CMD 0x00c 38 #define VFE_0_GLOBAL_RESET_CMD_CORE (1 << 0) 39 #define VFE_0_GLOBAL_RESET_CMD_CAMIF (1 << 1) 40 #define VFE_0_GLOBAL_RESET_CMD_BUS (1 << 2) 41 #define VFE_0_GLOBAL_RESET_CMD_BUS_BDG (1 << 3) 42 #define VFE_0_GLOBAL_RESET_CMD_REGISTER (1 << 4) 43 #define VFE_0_GLOBAL_RESET_CMD_TIMER (1 << 5) 44 #define VFE_0_GLOBAL_RESET_CMD_PM (1 << 6) 45 #define VFE_0_GLOBAL_RESET_CMD_BUS_MISR (1 << 7) 46 #define VFE_0_GLOBAL_RESET_CMD_TESTGEN (1 << 8) 47 48 #define VFE_0_MODULE_CFG 0x018 49 #define VFE_0_MODULE_CFG_DEMUX (1 << 2) 50 #define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE (1 << 3) 51 #define VFE_0_MODULE_CFG_SCALE_ENC (1 << 23) 52 #define VFE_0_MODULE_CFG_CROP_ENC (1 << 27) 53 54 #define VFE_0_CORE_CFG 0x01c 55 #define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4 56 #define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5 57 #define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6 58 #define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7 59 60 #define VFE_0_IRQ_CMD 0x024 61 #define VFE_0_IRQ_CMD_GLOBAL_CLEAR (1 << 0) 62 63 #define VFE_0_IRQ_MASK_0 0x028 64 #define VFE_0_IRQ_MASK_0_CAMIF_SOF (1 << 0) 65 #define VFE_0_IRQ_MASK_0_CAMIF_EOF (1 << 1) 66 #define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5)) 67 #define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \ 68 ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n)) 69 #define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8)) 70 #define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25)) 71 #define VFE_0_IRQ_MASK_0_RESET_ACK (1 << 31) 72 #define VFE_0_IRQ_MASK_1 0x02c 73 #define VFE_0_IRQ_MASK_1_CAMIF_ERROR (1 << 0) 74 #define VFE_0_IRQ_MASK_1_VIOLATION (1 << 7) 75 #define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK (1 << 8) 76 #define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) (1 << ((n) + 9)) 77 #define VFE_0_IRQ_MASK_1_RDIn_SOF(n) (1 << ((n) + 29)) 78 79 #define VFE_0_IRQ_CLEAR_0 0x030 80 #define VFE_0_IRQ_CLEAR_1 0x034 81 82 #define VFE_0_IRQ_STATUS_0 0x038 83 #define VFE_0_IRQ_STATUS_0_CAMIF_SOF (1 << 0) 84 #define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5)) 85 #define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \ 86 ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n)) 87 #define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8)) 88 #define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25)) 89 #define VFE_0_IRQ_STATUS_0_RESET_ACK (1 << 31) 90 #define VFE_0_IRQ_STATUS_1 0x03c 91 #define VFE_0_IRQ_STATUS_1_VIOLATION (1 << 7) 92 #define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK (1 << 8) 93 #define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) (1 << ((n) + 29)) 94 95 #define VFE_0_IRQ_COMPOSITE_MASK_0 0x40 96 #define VFE_0_VIOLATION_STATUS 0x48 97 98 #define VFE_0_BUS_CMD 0x4c 99 #define VFE_0_BUS_CMD_Mx_RLD_CMD(x) (1 << (x)) 100 101 #define VFE_0_BUS_CFG 0x050 102 103 #define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2)) 104 #define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN (1 << 1) 105 #define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4) 106 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8 107 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0 108 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5 109 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6 110 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7 111 112 #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n)) 113 #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0 114 #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1 115 #define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n)) 116 #define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n)) 117 #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n)) 118 #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2 119 #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1F << 2) 120 121 #define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n)) 122 #define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16 123 #define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n)) 124 #define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n)) 125 #define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \ 126 (0x088 + 0x24 * (n)) 127 #define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \ 128 (0x08c + 0x24 * (n)) 129 #define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff 130 131 #define VFE_0_BUS_PING_PONG_STATUS 0x268 132 133 #define VFE_0_BUS_BDG_CMD 0x2c0 134 #define VFE_0_BUS_BDG_CMD_HALT_REQ 1 135 136 #define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4 137 #define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5 138 #define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8 139 #define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc 140 #define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0 141 #define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4 142 #define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8 143 #define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc 144 #define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0 145 #define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5 146 147 #define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x))) 148 #define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28 149 #define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28) 150 #define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4 151 #define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4) 152 #define VFE_0_RDI_CFG_x_RDI_EN_BIT (1 << 2) 153 #define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3 154 #define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) (1 << (16 + (r))) 155 156 #define VFE_0_CAMIF_CMD 0x2f4 157 #define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0 158 #define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1 159 #define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS (1 << 2) 160 #define VFE_0_CAMIF_CFG 0x2f8 161 #define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN (1 << 6) 162 #define VFE_0_CAMIF_FRAME_CFG 0x300 163 #define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304 164 #define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308 165 #define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c 166 #define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314 167 #define VFE_0_CAMIF_STATUS 0x31c 168 #define VFE_0_CAMIF_STATUS_HALT (1 << 31) 169 170 #define VFE_0_REG_UPDATE 0x378 171 #define VFE_0_REG_UPDATE_RDIn(n) (1 << (1 + (n))) 172 #define VFE_0_REG_UPDATE_line_n(n) \ 173 ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n)) 174 175 #define VFE_0_DEMUX_CFG 0x424 176 #define VFE_0_DEMUX_CFG_PERIOD 0x3 177 #define VFE_0_DEMUX_GAIN_0 0x428 178 #define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0) 179 #define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16) 180 #define VFE_0_DEMUX_GAIN_1 0x42c 181 #define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0) 182 #define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16) 183 #define VFE_0_DEMUX_EVEN_CFG 0x438 184 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac 185 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c 186 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca 187 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9 188 #define VFE_0_DEMUX_ODD_CFG 0x43c 189 #define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac 190 #define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c 191 #define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca 192 #define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9 193 194 #define VFE_0_SCALE_ENC_Y_CFG 0x75c 195 #define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760 196 #define VFE_0_SCALE_ENC_Y_H_PHASE 0x764 197 #define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c 198 #define VFE_0_SCALE_ENC_Y_V_PHASE 0x770 199 #define VFE_0_SCALE_ENC_CBCR_CFG 0x778 200 #define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c 201 #define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780 202 #define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790 203 #define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794 204 205 #define VFE_0_CROP_ENC_Y_WIDTH 0x854 206 #define VFE_0_CROP_ENC_Y_HEIGHT 0x858 207 #define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c 208 #define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860 209 210 #define VFE_0_CLAMP_ENC_MAX_CFG 0x874 211 #define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0) 212 #define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8) 213 #define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16) 214 #define VFE_0_CLAMP_ENC_MIN_CFG 0x878 215 #define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0) 216 #define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8) 217 #define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16) 218 219 #define VFE_0_CGC_OVERRIDE_1 0x974 220 #define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) (1 << (x)) 221 222 /* VFE reset timeout */ 223 #define VFE_RESET_TIMEOUT_MS 50 224 /* VFE halt timeout */ 225 #define VFE_HALT_TIMEOUT_MS 100 226 /* Max number of frame drop updates per frame */ 227 #define VFE_FRAME_DROP_UPDATES 5 228 /* Frame drop value. NOTE: VAL + UPDATES should not exceed 31 */ 229 #define VFE_FRAME_DROP_VAL 20 230 231 #define VFE_NEXT_SOF_MS 500 232 233 #define CAMIF_TIMEOUT_SLEEP_US 1000 234 #define CAMIF_TIMEOUT_ALL_US 1000000 235 236 #define SCALER_RATIO_MAX 16 237 238 static const struct { 239 u32 code; 240 u8 bpp; 241 } vfe_formats[] = { 242 { 243 MEDIA_BUS_FMT_UYVY8_2X8, 244 8, 245 }, 246 { 247 MEDIA_BUS_FMT_VYUY8_2X8, 248 8, 249 }, 250 { 251 MEDIA_BUS_FMT_YUYV8_2X8, 252 8, 253 }, 254 { 255 MEDIA_BUS_FMT_YVYU8_2X8, 256 8, 257 }, 258 { 259 MEDIA_BUS_FMT_SBGGR8_1X8, 260 8, 261 }, 262 { 263 MEDIA_BUS_FMT_SGBRG8_1X8, 264 8, 265 }, 266 { 267 MEDIA_BUS_FMT_SGRBG8_1X8, 268 8, 269 }, 270 { 271 MEDIA_BUS_FMT_SRGGB8_1X8, 272 8, 273 }, 274 { 275 MEDIA_BUS_FMT_SBGGR10_1X10, 276 10, 277 }, 278 { 279 MEDIA_BUS_FMT_SGBRG10_1X10, 280 10, 281 }, 282 { 283 MEDIA_BUS_FMT_SGRBG10_1X10, 284 10, 285 }, 286 { 287 MEDIA_BUS_FMT_SRGGB10_1X10, 288 10, 289 }, 290 { 291 MEDIA_BUS_FMT_SBGGR12_1X12, 292 12, 293 }, 294 { 295 MEDIA_BUS_FMT_SGBRG12_1X12, 296 12, 297 }, 298 { 299 MEDIA_BUS_FMT_SGRBG12_1X12, 300 12, 301 }, 302 { 303 MEDIA_BUS_FMT_SRGGB12_1X12, 304 12, 305 } 306 }; 307 308 /* 309 * vfe_get_bpp - map media bus format to bits per pixel 310 * @code: media bus format code 311 * 312 * Return number of bits per pixel 313 */ 314 static u8 vfe_get_bpp(u32 code) 315 { 316 unsigned int i; 317 318 for (i = 0; i < ARRAY_SIZE(vfe_formats); i++) 319 if (code == vfe_formats[i].code) 320 return vfe_formats[i].bpp; 321 322 WARN(1, "Unknown format\n"); 323 324 return vfe_formats[0].bpp; 325 } 326 327 static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits) 328 { 329 u32 bits = readl_relaxed(vfe->base + reg); 330 331 writel_relaxed(bits & ~clr_bits, vfe->base + reg); 332 } 333 334 static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits) 335 { 336 u32 bits = readl_relaxed(vfe->base + reg); 337 338 writel_relaxed(bits | set_bits, vfe->base + reg); 339 } 340 341 static void vfe_global_reset(struct vfe_device *vfe) 342 { 343 u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN | 344 VFE_0_GLOBAL_RESET_CMD_BUS_MISR | 345 VFE_0_GLOBAL_RESET_CMD_PM | 346 VFE_0_GLOBAL_RESET_CMD_TIMER | 347 VFE_0_GLOBAL_RESET_CMD_REGISTER | 348 VFE_0_GLOBAL_RESET_CMD_BUS_BDG | 349 VFE_0_GLOBAL_RESET_CMD_BUS | 350 VFE_0_GLOBAL_RESET_CMD_CAMIF | 351 VFE_0_GLOBAL_RESET_CMD_CORE; 352 353 writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD); 354 } 355 356 static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable) 357 { 358 if (enable) 359 vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), 360 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT); 361 else 362 vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), 363 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT); 364 } 365 366 static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable) 367 { 368 if (enable) 369 vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), 370 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT); 371 else 372 vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), 373 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT); 374 } 375 376 #define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N)) 377 378 static int vfe_word_per_line(uint32_t format, uint32_t pixel_per_line) 379 { 380 int val = 0; 381 382 switch (format) { 383 case V4L2_PIX_FMT_NV12: 384 case V4L2_PIX_FMT_NV21: 385 case V4L2_PIX_FMT_NV16: 386 case V4L2_PIX_FMT_NV61: 387 val = CALC_WORD(pixel_per_line, 1, 8); 388 break; 389 case V4L2_PIX_FMT_YUYV: 390 case V4L2_PIX_FMT_YVYU: 391 case V4L2_PIX_FMT_UYVY: 392 case V4L2_PIX_FMT_VYUY: 393 val = CALC_WORD(pixel_per_line, 2, 8); 394 break; 395 } 396 397 return val; 398 } 399 400 static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane, 401 u16 *width, u16 *height, u16 *bytesperline) 402 { 403 switch (pix->pixelformat) { 404 case V4L2_PIX_FMT_NV12: 405 case V4L2_PIX_FMT_NV21: 406 *width = pix->width; 407 *height = pix->height; 408 *bytesperline = pix->plane_fmt[0].bytesperline; 409 if (plane == 1) 410 *height /= 2; 411 break; 412 case V4L2_PIX_FMT_NV16: 413 case V4L2_PIX_FMT_NV61: 414 *width = pix->width; 415 *height = pix->height; 416 *bytesperline = pix->plane_fmt[0].bytesperline; 417 break; 418 } 419 } 420 421 static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm, 422 struct v4l2_pix_format_mplane *pix, 423 u8 plane, u32 enable) 424 { 425 u32 reg; 426 427 if (enable) { 428 u16 width = 0, height = 0, bytesperline = 0, wpl; 429 430 vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline); 431 432 wpl = vfe_word_per_line(pix->pixelformat, width); 433 434 reg = height - 1; 435 reg |= ((wpl + 1) / 2 - 1) << 16; 436 437 writel_relaxed(reg, vfe->base + 438 VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm)); 439 440 wpl = vfe_word_per_line(pix->pixelformat, bytesperline); 441 442 reg = 0x3; 443 reg |= (height - 1) << 4; 444 reg |= wpl << 16; 445 446 writel_relaxed(reg, vfe->base + 447 VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm)); 448 } else { 449 writel_relaxed(0, vfe->base + 450 VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm)); 451 writel_relaxed(0, vfe->base + 452 VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm)); 453 } 454 } 455 456 static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per) 457 { 458 u32 reg; 459 460 reg = readl_relaxed(vfe->base + 461 VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm)); 462 463 reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK); 464 465 reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT) 466 & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK; 467 468 writel_relaxed(reg, 469 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm)); 470 } 471 472 static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm, 473 u32 pattern) 474 { 475 writel_relaxed(pattern, 476 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm)); 477 } 478 479 static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm, u16 offset, 480 u16 depth) 481 { 482 u32 reg; 483 484 reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) | 485 depth; 486 writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm)); 487 } 488 489 static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm) 490 { 491 wmb(); 492 writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD); 493 wmb(); 494 } 495 496 static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr) 497 { 498 writel_relaxed(addr, 499 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm)); 500 } 501 502 static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr) 503 { 504 writel_relaxed(addr, 505 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm)); 506 } 507 508 static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm) 509 { 510 u32 reg; 511 512 reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS); 513 514 return (reg >> wm) & 0x1; 515 } 516 517 static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable) 518 { 519 if (enable) 520 writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG); 521 else 522 writel_relaxed(0, vfe->base + VFE_0_BUS_CFG); 523 } 524 525 static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm, 526 enum vfe_line_id id) 527 { 528 u32 reg; 529 530 reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS; 531 reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id); 532 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg); 533 534 reg = VFE_0_RDI_CFG_x_RDI_EN_BIT; 535 reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) & 536 VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK; 537 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg); 538 539 switch (id) { 540 case VFE_LINE_RDI0: 541 default: 542 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 << 543 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; 544 break; 545 case VFE_LINE_RDI1: 546 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 << 547 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; 548 break; 549 case VFE_LINE_RDI2: 550 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 << 551 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; 552 break; 553 } 554 555 if (wm % 2 == 1) 556 reg <<= 16; 557 558 vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg); 559 } 560 561 static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm) 562 { 563 writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF, 564 vfe->base + 565 VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm)); 566 } 567 568 static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm, 569 enum vfe_line_id id) 570 { 571 u32 reg; 572 573 reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id); 574 vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg); 575 576 reg = VFE_0_RDI_CFG_x_RDI_EN_BIT; 577 vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg); 578 579 switch (id) { 580 case VFE_LINE_RDI0: 581 default: 582 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 << 583 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; 584 break; 585 case VFE_LINE_RDI1: 586 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 << 587 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; 588 break; 589 case VFE_LINE_RDI2: 590 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 << 591 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; 592 break; 593 } 594 595 if (wm % 2 == 1) 596 reg <<= 16; 597 598 vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg); 599 } 600 601 static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output, 602 u8 enable) 603 { 604 struct vfe_line *line = container_of(output, struct vfe_line, output); 605 u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; 606 u32 reg; 607 unsigned int i; 608 609 for (i = 0; i < output->wm_num; i++) { 610 if (i == 0) { 611 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA << 612 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; 613 } else if (i == 1) { 614 reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN; 615 if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16) 616 reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA; 617 } else { 618 /* On current devices output->wm_num is always <= 2 */ 619 break; 620 } 621 622 if (output->wm_idx[i] % 2 == 1) 623 reg <<= 16; 624 625 if (enable) 626 vfe_reg_set(vfe, 627 VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]), 628 reg); 629 else 630 vfe_reg_clr(vfe, 631 VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]), 632 reg); 633 } 634 } 635 636 static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid) 637 { 638 vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), 639 VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK); 640 641 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), 642 cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT); 643 } 644 645 static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) 646 { 647 vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id); 648 wmb(); 649 writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE); 650 wmb(); 651 } 652 653 static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm, 654 enum vfe_line_id line_id, u8 enable) 655 { 656 u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) | 657 VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id); 658 u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) | 659 VFE_0_IRQ_MASK_1_RDIn_SOF(line_id); 660 661 if (enable) { 662 vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); 663 vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); 664 } else { 665 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0); 666 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1); 667 } 668 } 669 670 static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp, 671 enum vfe_line_id line_id, u8 enable) 672 { 673 struct vfe_output *output = &vfe->line[line_id].output; 674 unsigned int i; 675 u32 irq_en0; 676 u32 irq_en1; 677 u32 comp_mask = 0; 678 679 irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF; 680 irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF; 681 irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp); 682 irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id); 683 irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR; 684 for (i = 0; i < output->wm_num; i++) { 685 irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW( 686 output->wm_idx[i]); 687 comp_mask |= (1 << output->wm_idx[i]) << comp * 8; 688 } 689 690 if (enable) { 691 vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); 692 vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); 693 vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask); 694 } else { 695 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0); 696 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1); 697 vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask); 698 } 699 } 700 701 static void vfe_enable_irq_common(struct vfe_device *vfe) 702 { 703 u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK; 704 u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION | 705 VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK; 706 707 vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); 708 vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); 709 } 710 711 static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line) 712 { 713 u32 val, even_cfg, odd_cfg; 714 715 writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG); 716 717 val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD; 718 writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0); 719 720 val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2; 721 writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1); 722 723 switch (line->fmt[MSM_VFE_PAD_SINK].code) { 724 case MEDIA_BUS_FMT_YUYV8_2X8: 725 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV; 726 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV; 727 break; 728 case MEDIA_BUS_FMT_YVYU8_2X8: 729 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU; 730 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU; 731 break; 732 case MEDIA_BUS_FMT_UYVY8_2X8: 733 default: 734 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY; 735 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY; 736 break; 737 case MEDIA_BUS_FMT_VYUY8_2X8: 738 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY; 739 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY; 740 break; 741 } 742 743 writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG); 744 writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG); 745 } 746 747 static inline u8 vfe_calc_interp_reso(u16 input, u16 output) 748 { 749 if (input / output >= 16) 750 return 0; 751 752 if (input / output >= 8) 753 return 1; 754 755 if (input / output >= 4) 756 return 2; 757 758 return 3; 759 } 760 761 static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line) 762 { 763 u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; 764 u32 reg; 765 u16 input, output; 766 u8 interp_reso; 767 u32 phase_mult; 768 769 writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG); 770 771 input = line->fmt[MSM_VFE_PAD_SINK].width; 772 output = line->compose.width; 773 reg = (output << 16) | input; 774 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE); 775 776 interp_reso = vfe_calc_interp_reso(input, output); 777 phase_mult = input * (1 << (13 + interp_reso)) / output; 778 reg = (interp_reso << 20) | phase_mult; 779 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE); 780 781 input = line->fmt[MSM_VFE_PAD_SINK].height; 782 output = line->compose.height; 783 reg = (output << 16) | input; 784 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE); 785 786 interp_reso = vfe_calc_interp_reso(input, output); 787 phase_mult = input * (1 << (13 + interp_reso)) / output; 788 reg = (interp_reso << 20) | phase_mult; 789 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE); 790 791 writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG); 792 793 input = line->fmt[MSM_VFE_PAD_SINK].width; 794 output = line->compose.width / 2; 795 reg = (output << 16) | input; 796 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE); 797 798 interp_reso = vfe_calc_interp_reso(input, output); 799 phase_mult = input * (1 << (13 + interp_reso)) / output; 800 reg = (interp_reso << 20) | phase_mult; 801 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE); 802 803 input = line->fmt[MSM_VFE_PAD_SINK].height; 804 output = line->compose.height; 805 if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) 806 output = line->compose.height / 2; 807 reg = (output << 16) | input; 808 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE); 809 810 interp_reso = vfe_calc_interp_reso(input, output); 811 phase_mult = input * (1 << (13 + interp_reso)) / output; 812 reg = (interp_reso << 20) | phase_mult; 813 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE); 814 } 815 816 static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line) 817 { 818 u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; 819 u32 reg; 820 u16 first, last; 821 822 first = line->crop.left; 823 last = line->crop.left + line->crop.width - 1; 824 reg = (first << 16) | last; 825 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH); 826 827 first = line->crop.top; 828 last = line->crop.top + line->crop.height - 1; 829 reg = (first << 16) | last; 830 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT); 831 832 first = line->crop.left / 2; 833 last = line->crop.left / 2 + line->crop.width / 2 - 1; 834 reg = (first << 16) | last; 835 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH); 836 837 first = line->crop.top; 838 last = line->crop.top + line->crop.height - 1; 839 if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) { 840 first = line->crop.top / 2; 841 last = line->crop.top / 2 + line->crop.height / 2 - 1; 842 } 843 reg = (first << 16) | last; 844 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT); 845 } 846 847 static void vfe_set_clamp_cfg(struct vfe_device *vfe) 848 { 849 u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 | 850 VFE_0_CLAMP_ENC_MAX_CFG_CH1 | 851 VFE_0_CLAMP_ENC_MAX_CFG_CH2; 852 853 writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG); 854 855 val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 | 856 VFE_0_CLAMP_ENC_MIN_CFG_CH1 | 857 VFE_0_CLAMP_ENC_MIN_CFG_CH2; 858 859 writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG); 860 } 861 862 /* 863 * vfe_reset - Trigger reset on VFE module and wait to complete 864 * @vfe: VFE device 865 * 866 * Return 0 on success or a negative error code otherwise 867 */ 868 static int vfe_reset(struct vfe_device *vfe) 869 { 870 unsigned long time; 871 872 reinit_completion(&vfe->reset_complete); 873 874 vfe_global_reset(vfe); 875 876 time = wait_for_completion_timeout(&vfe->reset_complete, 877 msecs_to_jiffies(VFE_RESET_TIMEOUT_MS)); 878 if (!time) { 879 dev_err(to_device(vfe), "VFE reset timeout\n"); 880 return -EIO; 881 } 882 883 return 0; 884 } 885 886 /* 887 * vfe_halt - Trigger halt on VFE module and wait to complete 888 * @vfe: VFE device 889 * 890 * Return 0 on success or a negative error code otherwise 891 */ 892 static int vfe_halt(struct vfe_device *vfe) 893 { 894 unsigned long time; 895 896 reinit_completion(&vfe->halt_complete); 897 898 writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ, 899 vfe->base + VFE_0_BUS_BDG_CMD); 900 901 time = wait_for_completion_timeout(&vfe->halt_complete, 902 msecs_to_jiffies(VFE_HALT_TIMEOUT_MS)); 903 if (!time) { 904 dev_err(to_device(vfe), "VFE halt timeout\n"); 905 return -EIO; 906 } 907 908 return 0; 909 } 910 911 static void vfe_init_outputs(struct vfe_device *vfe) 912 { 913 int i; 914 915 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) { 916 struct vfe_output *output = &vfe->line[i].output; 917 918 output->state = VFE_OUTPUT_OFF; 919 output->buf[0] = NULL; 920 output->buf[1] = NULL; 921 INIT_LIST_HEAD(&output->pending_bufs); 922 923 output->wm_num = 1; 924 if (vfe->line[i].id == VFE_LINE_PIX) 925 output->wm_num = 2; 926 } 927 } 928 929 static void vfe_reset_output_maps(struct vfe_device *vfe) 930 { 931 int i; 932 933 for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) 934 vfe->wm_output_map[i] = VFE_LINE_NONE; 935 } 936 937 static void vfe_set_qos(struct vfe_device *vfe) 938 { 939 u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG; 940 u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG; 941 942 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0); 943 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1); 944 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2); 945 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3); 946 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4); 947 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5); 948 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6); 949 writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7); 950 } 951 952 static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable) 953 { 954 u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm); 955 956 if (enable) 957 vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val); 958 else 959 vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val); 960 961 wmb(); 962 } 963 964 static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable) 965 { 966 u32 val = VFE_0_MODULE_CFG_DEMUX | 967 VFE_0_MODULE_CFG_CHROMA_UPSAMPLE | 968 VFE_0_MODULE_CFG_SCALE_ENC | 969 VFE_0_MODULE_CFG_CROP_ENC; 970 971 if (enable) 972 writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG); 973 else 974 writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG); 975 } 976 977 static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line) 978 { 979 u32 val; 980 981 switch (line->fmt[MSM_VFE_PAD_SINK].code) { 982 case MEDIA_BUS_FMT_YUYV8_2X8: 983 val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR; 984 break; 985 case MEDIA_BUS_FMT_YVYU8_2X8: 986 val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB; 987 break; 988 case MEDIA_BUS_FMT_UYVY8_2X8: 989 default: 990 val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY; 991 break; 992 case MEDIA_BUS_FMT_VYUY8_2X8: 993 val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY; 994 break; 995 } 996 997 writel_relaxed(val, vfe->base + VFE_0_CORE_CFG); 998 999 val = line->fmt[MSM_VFE_PAD_SINK].width * 2; 1000 val |= line->fmt[MSM_VFE_PAD_SINK].height << 16; 1001 writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG); 1002 1003 val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1; 1004 writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG); 1005 1006 val = line->fmt[MSM_VFE_PAD_SINK].height - 1; 1007 writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG); 1008 1009 val = 0xffffffff; 1010 writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0); 1011 1012 val = 0xffffffff; 1013 writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN); 1014 1015 val = VFE_0_RDI_CFG_x_MIPI_EN_BITS; 1016 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val); 1017 1018 val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN; 1019 writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG); 1020 } 1021 1022 static void vfe_set_camif_cmd(struct vfe_device *vfe, u32 cmd) 1023 { 1024 writel_relaxed(VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS, 1025 vfe->base + VFE_0_CAMIF_CMD); 1026 1027 writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD); 1028 } 1029 1030 static int vfe_camif_wait_for_stop(struct vfe_device *vfe) 1031 { 1032 u32 val; 1033 int ret; 1034 1035 ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS, 1036 val, 1037 (val & VFE_0_CAMIF_STATUS_HALT), 1038 CAMIF_TIMEOUT_SLEEP_US, 1039 CAMIF_TIMEOUT_ALL_US); 1040 if (ret < 0) 1041 dev_err(to_device(vfe), "%s: camif stop timeout\n", __func__); 1042 1043 return ret; 1044 } 1045 1046 static void vfe_output_init_addrs(struct vfe_device *vfe, 1047 struct vfe_output *output, u8 sync) 1048 { 1049 u32 ping_addr; 1050 u32 pong_addr; 1051 unsigned int i; 1052 1053 output->active_buf = 0; 1054 1055 for (i = 0; i < output->wm_num; i++) { 1056 if (output->buf[0]) 1057 ping_addr = output->buf[0]->addr[i]; 1058 else 1059 ping_addr = 0; 1060 1061 if (output->buf[1]) 1062 pong_addr = output->buf[1]->addr[i]; 1063 else 1064 pong_addr = ping_addr; 1065 1066 vfe_wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr); 1067 vfe_wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr); 1068 if (sync) 1069 vfe_bus_reload_wm(vfe, output->wm_idx[i]); 1070 } 1071 } 1072 1073 static void vfe_output_update_ping_addr(struct vfe_device *vfe, 1074 struct vfe_output *output, u8 sync) 1075 { 1076 u32 addr; 1077 unsigned int i; 1078 1079 for (i = 0; i < output->wm_num; i++) { 1080 if (output->buf[0]) 1081 addr = output->buf[0]->addr[i]; 1082 else 1083 addr = 0; 1084 1085 vfe_wm_set_ping_addr(vfe, output->wm_idx[i], addr); 1086 if (sync) 1087 vfe_bus_reload_wm(vfe, output->wm_idx[i]); 1088 } 1089 } 1090 1091 static void vfe_output_update_pong_addr(struct vfe_device *vfe, 1092 struct vfe_output *output, u8 sync) 1093 { 1094 u32 addr; 1095 unsigned int i; 1096 1097 for (i = 0; i < output->wm_num; i++) { 1098 if (output->buf[1]) 1099 addr = output->buf[1]->addr[i]; 1100 else 1101 addr = 0; 1102 1103 vfe_wm_set_pong_addr(vfe, output->wm_idx[i], addr); 1104 if (sync) 1105 vfe_bus_reload_wm(vfe, output->wm_idx[i]); 1106 } 1107 1108 } 1109 1110 static int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id) 1111 { 1112 int ret = -EBUSY; 1113 int i; 1114 1115 for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) { 1116 if (vfe->wm_output_map[i] == VFE_LINE_NONE) { 1117 vfe->wm_output_map[i] = line_id; 1118 ret = i; 1119 break; 1120 } 1121 } 1122 1123 return ret; 1124 } 1125 1126 static int vfe_release_wm(struct vfe_device *vfe, u8 wm) 1127 { 1128 if (wm >= ARRAY_SIZE(vfe->wm_output_map)) 1129 return -EINVAL; 1130 1131 vfe->wm_output_map[wm] = VFE_LINE_NONE; 1132 1133 return 0; 1134 } 1135 1136 static void vfe_output_frame_drop(struct vfe_device *vfe, 1137 struct vfe_output *output, 1138 u32 drop_pattern) 1139 { 1140 u8 drop_period; 1141 unsigned int i; 1142 1143 /* We need to toggle update period to be valid on next frame */ 1144 output->drop_update_idx++; 1145 output->drop_update_idx %= VFE_FRAME_DROP_UPDATES; 1146 drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx; 1147 1148 for (i = 0; i < output->wm_num; i++) { 1149 vfe_wm_set_framedrop_period(vfe, output->wm_idx[i], 1150 drop_period); 1151 vfe_wm_set_framedrop_pattern(vfe, output->wm_idx[i], 1152 drop_pattern); 1153 } 1154 vfe_reg_update(vfe, container_of(output, struct vfe_line, output)->id); 1155 } 1156 1157 static struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output) 1158 { 1159 struct camss_buffer *buffer = NULL; 1160 1161 if (!list_empty(&output->pending_bufs)) { 1162 buffer = list_first_entry(&output->pending_bufs, 1163 struct camss_buffer, 1164 queue); 1165 list_del(&buffer->queue); 1166 } 1167 1168 return buffer; 1169 } 1170 1171 /* 1172 * vfe_buf_add_pending - Add output buffer to list of pending 1173 * @output: VFE output 1174 * @buffer: Video buffer 1175 */ 1176 static void vfe_buf_add_pending(struct vfe_output *output, 1177 struct camss_buffer *buffer) 1178 { 1179 INIT_LIST_HEAD(&buffer->queue); 1180 list_add_tail(&buffer->queue, &output->pending_bufs); 1181 } 1182 1183 /* 1184 * vfe_buf_flush_pending - Flush all pending buffers. 1185 * @output: VFE output 1186 * @state: vb2 buffer state 1187 */ 1188 static void vfe_buf_flush_pending(struct vfe_output *output, 1189 enum vb2_buffer_state state) 1190 { 1191 struct camss_buffer *buf; 1192 struct camss_buffer *t; 1193 1194 list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) { 1195 vb2_buffer_done(&buf->vb.vb2_buf, state); 1196 list_del(&buf->queue); 1197 } 1198 } 1199 1200 static void vfe_buf_update_wm_on_next(struct vfe_device *vfe, 1201 struct vfe_output *output) 1202 { 1203 switch (output->state) { 1204 case VFE_OUTPUT_CONTINUOUS: 1205 vfe_output_frame_drop(vfe, output, 3); 1206 break; 1207 case VFE_OUTPUT_SINGLE: 1208 default: 1209 dev_err_ratelimited(to_device(vfe), 1210 "Next buf in wrong state! %d\n", 1211 output->state); 1212 break; 1213 } 1214 } 1215 1216 static void vfe_buf_update_wm_on_last(struct vfe_device *vfe, 1217 struct vfe_output *output) 1218 { 1219 switch (output->state) { 1220 case VFE_OUTPUT_CONTINUOUS: 1221 output->state = VFE_OUTPUT_SINGLE; 1222 vfe_output_frame_drop(vfe, output, 1); 1223 break; 1224 case VFE_OUTPUT_SINGLE: 1225 output->state = VFE_OUTPUT_STOPPING; 1226 vfe_output_frame_drop(vfe, output, 0); 1227 break; 1228 default: 1229 dev_err_ratelimited(to_device(vfe), 1230 "Last buff in wrong state! %d\n", 1231 output->state); 1232 break; 1233 } 1234 } 1235 1236 static void vfe_buf_update_wm_on_new(struct vfe_device *vfe, 1237 struct vfe_output *output, 1238 struct camss_buffer *new_buf) 1239 { 1240 int inactive_idx; 1241 1242 switch (output->state) { 1243 case VFE_OUTPUT_SINGLE: 1244 inactive_idx = !output->active_buf; 1245 1246 if (!output->buf[inactive_idx]) { 1247 output->buf[inactive_idx] = new_buf; 1248 1249 if (inactive_idx) 1250 vfe_output_update_pong_addr(vfe, output, 0); 1251 else 1252 vfe_output_update_ping_addr(vfe, output, 0); 1253 1254 vfe_output_frame_drop(vfe, output, 3); 1255 output->state = VFE_OUTPUT_CONTINUOUS; 1256 } else { 1257 vfe_buf_add_pending(output, new_buf); 1258 dev_err_ratelimited(to_device(vfe), 1259 "Inactive buffer is busy\n"); 1260 } 1261 break; 1262 1263 case VFE_OUTPUT_IDLE: 1264 if (!output->buf[0]) { 1265 output->buf[0] = new_buf; 1266 1267 vfe_output_init_addrs(vfe, output, 1); 1268 1269 vfe_output_frame_drop(vfe, output, 1); 1270 output->state = VFE_OUTPUT_SINGLE; 1271 } else { 1272 vfe_buf_add_pending(output, new_buf); 1273 dev_err_ratelimited(to_device(vfe), 1274 "Output idle with buffer set!\n"); 1275 } 1276 break; 1277 1278 case VFE_OUTPUT_CONTINUOUS: 1279 default: 1280 vfe_buf_add_pending(output, new_buf); 1281 break; 1282 } 1283 } 1284 1285 static int vfe_get_output(struct vfe_line *line) 1286 { 1287 struct vfe_device *vfe = to_vfe(line); 1288 struct vfe_output *output; 1289 unsigned long flags; 1290 int i; 1291 int wm_idx; 1292 1293 spin_lock_irqsave(&vfe->output_lock, flags); 1294 1295 output = &line->output; 1296 if (output->state != VFE_OUTPUT_OFF) { 1297 dev_err(to_device(vfe), "Output is running\n"); 1298 goto error; 1299 } 1300 output->state = VFE_OUTPUT_RESERVED; 1301 1302 output->active_buf = 0; 1303 1304 for (i = 0; i < output->wm_num; i++) { 1305 wm_idx = vfe_reserve_wm(vfe, line->id); 1306 if (wm_idx < 0) { 1307 dev_err(to_device(vfe), "Can not reserve wm\n"); 1308 goto error_get_wm; 1309 } 1310 output->wm_idx[i] = wm_idx; 1311 } 1312 1313 output->drop_update_idx = 0; 1314 1315 spin_unlock_irqrestore(&vfe->output_lock, flags); 1316 1317 return 0; 1318 1319 error_get_wm: 1320 for (i--; i >= 0; i--) 1321 vfe_release_wm(vfe, output->wm_idx[i]); 1322 output->state = VFE_OUTPUT_OFF; 1323 error: 1324 spin_unlock_irqrestore(&vfe->output_lock, flags); 1325 1326 return -EINVAL; 1327 } 1328 1329 static int vfe_put_output(struct vfe_line *line) 1330 { 1331 struct vfe_device *vfe = to_vfe(line); 1332 struct vfe_output *output = &line->output; 1333 unsigned long flags; 1334 unsigned int i; 1335 1336 spin_lock_irqsave(&vfe->output_lock, flags); 1337 1338 for (i = 0; i < output->wm_num; i++) 1339 vfe_release_wm(vfe, output->wm_idx[i]); 1340 1341 output->state = VFE_OUTPUT_OFF; 1342 1343 spin_unlock_irqrestore(&vfe->output_lock, flags); 1344 return 0; 1345 } 1346 1347 static int vfe_enable_output(struct vfe_line *line) 1348 { 1349 struct vfe_device *vfe = to_vfe(line); 1350 struct vfe_output *output = &line->output; 1351 unsigned long flags; 1352 unsigned int i; 1353 u16 ub_size; 1354 1355 switch (vfe->id) { 1356 case 0: 1357 ub_size = MSM_VFE_VFE0_UB_SIZE_RDI; 1358 break; 1359 case 1: 1360 ub_size = MSM_VFE_VFE1_UB_SIZE_RDI; 1361 break; 1362 default: 1363 return -EINVAL; 1364 } 1365 1366 spin_lock_irqsave(&vfe->output_lock, flags); 1367 1368 vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line->id); 1369 1370 if (output->state != VFE_OUTPUT_RESERVED) { 1371 dev_err(to_device(vfe), "Output is not in reserved state %d\n", 1372 output->state); 1373 spin_unlock_irqrestore(&vfe->output_lock, flags); 1374 return -EINVAL; 1375 } 1376 output->state = VFE_OUTPUT_IDLE; 1377 1378 output->buf[0] = vfe_buf_get_pending(output); 1379 output->buf[1] = vfe_buf_get_pending(output); 1380 1381 if (!output->buf[0] && output->buf[1]) { 1382 output->buf[0] = output->buf[1]; 1383 output->buf[1] = NULL; 1384 } 1385 1386 if (output->buf[0]) 1387 output->state = VFE_OUTPUT_SINGLE; 1388 1389 if (output->buf[1]) 1390 output->state = VFE_OUTPUT_CONTINUOUS; 1391 1392 switch (output->state) { 1393 case VFE_OUTPUT_SINGLE: 1394 vfe_output_frame_drop(vfe, output, 1); 1395 break; 1396 case VFE_OUTPUT_CONTINUOUS: 1397 vfe_output_frame_drop(vfe, output, 3); 1398 break; 1399 default: 1400 vfe_output_frame_drop(vfe, output, 0); 1401 break; 1402 } 1403 1404 output->sequence = 0; 1405 output->wait_sof = 0; 1406 output->wait_reg_update = 0; 1407 reinit_completion(&output->sof); 1408 reinit_completion(&output->reg_update); 1409 1410 vfe_output_init_addrs(vfe, output, 0); 1411 1412 if (line->id != VFE_LINE_PIX) { 1413 vfe_set_cgc_override(vfe, output->wm_idx[0], 1); 1414 vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1); 1415 vfe_bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id); 1416 vfe_wm_set_subsample(vfe, output->wm_idx[0]); 1417 vfe_set_rdi_cid(vfe, line->id, 0); 1418 vfe_wm_set_ub_cfg(vfe, output->wm_idx[0], 1419 (ub_size + 1) * output->wm_idx[0], ub_size); 1420 vfe_wm_frame_based(vfe, output->wm_idx[0], 1); 1421 vfe_wm_enable(vfe, output->wm_idx[0], 1); 1422 vfe_bus_reload_wm(vfe, output->wm_idx[0]); 1423 } else { 1424 ub_size /= output->wm_num; 1425 for (i = 0; i < output->wm_num; i++) { 1426 vfe_set_cgc_override(vfe, output->wm_idx[i], 1); 1427 vfe_wm_set_subsample(vfe, output->wm_idx[i]); 1428 vfe_wm_set_ub_cfg(vfe, output->wm_idx[i], 1429 (ub_size + 1) * output->wm_idx[i], 1430 ub_size); 1431 vfe_wm_line_based(vfe, output->wm_idx[i], 1432 &line->video_out.active_fmt.fmt.pix_mp, 1433 i, 1); 1434 vfe_wm_enable(vfe, output->wm_idx[i], 1); 1435 vfe_bus_reload_wm(vfe, output->wm_idx[i]); 1436 } 1437 vfe_enable_irq_pix_line(vfe, 0, line->id, 1); 1438 vfe_set_module_cfg(vfe, 1); 1439 vfe_set_camif_cfg(vfe, line); 1440 vfe_set_xbar_cfg(vfe, output, 1); 1441 vfe_set_demux_cfg(vfe, line); 1442 vfe_set_scale_cfg(vfe, line); 1443 vfe_set_crop_cfg(vfe, line); 1444 vfe_set_clamp_cfg(vfe); 1445 vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY); 1446 } 1447 1448 vfe_reg_update(vfe, line->id); 1449 1450 spin_unlock_irqrestore(&vfe->output_lock, flags); 1451 1452 return 0; 1453 } 1454 1455 static int vfe_disable_output(struct vfe_line *line) 1456 { 1457 struct vfe_device *vfe = to_vfe(line); 1458 struct vfe_output *output = &line->output; 1459 unsigned long flags; 1460 unsigned long time; 1461 unsigned int i; 1462 1463 spin_lock_irqsave(&vfe->output_lock, flags); 1464 1465 output->wait_sof = 1; 1466 spin_unlock_irqrestore(&vfe->output_lock, flags); 1467 1468 time = wait_for_completion_timeout(&output->sof, 1469 msecs_to_jiffies(VFE_NEXT_SOF_MS)); 1470 if (!time) 1471 dev_err(to_device(vfe), "VFE sof timeout\n"); 1472 1473 spin_lock_irqsave(&vfe->output_lock, flags); 1474 for (i = 0; i < output->wm_num; i++) 1475 vfe_wm_enable(vfe, output->wm_idx[i], 0); 1476 1477 vfe_reg_update(vfe, line->id); 1478 output->wait_reg_update = 1; 1479 spin_unlock_irqrestore(&vfe->output_lock, flags); 1480 1481 time = wait_for_completion_timeout(&output->reg_update, 1482 msecs_to_jiffies(VFE_NEXT_SOF_MS)); 1483 if (!time) 1484 dev_err(to_device(vfe), "VFE reg update timeout\n"); 1485 1486 spin_lock_irqsave(&vfe->output_lock, flags); 1487 1488 if (line->id != VFE_LINE_PIX) { 1489 vfe_wm_frame_based(vfe, output->wm_idx[0], 0); 1490 vfe_bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id); 1491 vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0); 1492 vfe_set_cgc_override(vfe, output->wm_idx[0], 0); 1493 spin_unlock_irqrestore(&vfe->output_lock, flags); 1494 } else { 1495 for (i = 0; i < output->wm_num; i++) { 1496 vfe_wm_line_based(vfe, output->wm_idx[i], NULL, i, 0); 1497 vfe_set_cgc_override(vfe, output->wm_idx[i], 0); 1498 } 1499 1500 vfe_enable_irq_pix_line(vfe, 0, line->id, 0); 1501 vfe_set_module_cfg(vfe, 0); 1502 vfe_set_xbar_cfg(vfe, output, 0); 1503 1504 vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY); 1505 spin_unlock_irqrestore(&vfe->output_lock, flags); 1506 1507 vfe_camif_wait_for_stop(vfe); 1508 } 1509 1510 return 0; 1511 } 1512 1513 /* 1514 * vfe_enable - Enable streaming on VFE line 1515 * @line: VFE line 1516 * 1517 * Return 0 on success or a negative error code otherwise 1518 */ 1519 static int vfe_enable(struct vfe_line *line) 1520 { 1521 struct vfe_device *vfe = to_vfe(line); 1522 int ret; 1523 1524 mutex_lock(&vfe->stream_lock); 1525 1526 if (!vfe->stream_count) { 1527 vfe_enable_irq_common(vfe); 1528 1529 vfe_bus_enable_wr_if(vfe, 1); 1530 1531 vfe_set_qos(vfe); 1532 } 1533 1534 vfe->stream_count++; 1535 1536 mutex_unlock(&vfe->stream_lock); 1537 1538 ret = vfe_get_output(line); 1539 if (ret < 0) 1540 goto error_get_output; 1541 1542 ret = vfe_enable_output(line); 1543 if (ret < 0) 1544 goto error_enable_output; 1545 1546 vfe->was_streaming = 1; 1547 1548 return 0; 1549 1550 1551 error_enable_output: 1552 vfe_put_output(line); 1553 1554 error_get_output: 1555 mutex_lock(&vfe->stream_lock); 1556 1557 if (vfe->stream_count == 1) 1558 vfe_bus_enable_wr_if(vfe, 0); 1559 1560 vfe->stream_count--; 1561 1562 mutex_unlock(&vfe->stream_lock); 1563 1564 return ret; 1565 } 1566 1567 /* 1568 * vfe_disable - Disable streaming on VFE line 1569 * @line: VFE line 1570 * 1571 * Return 0 on success or a negative error code otherwise 1572 */ 1573 static int vfe_disable(struct vfe_line *line) 1574 { 1575 struct vfe_device *vfe = to_vfe(line); 1576 1577 vfe_disable_output(line); 1578 1579 vfe_put_output(line); 1580 1581 mutex_lock(&vfe->stream_lock); 1582 1583 if (vfe->stream_count == 1) 1584 vfe_bus_enable_wr_if(vfe, 0); 1585 1586 vfe->stream_count--; 1587 1588 mutex_unlock(&vfe->stream_lock); 1589 1590 return 0; 1591 } 1592 1593 /* 1594 * vfe_isr_sof - Process start of frame interrupt 1595 * @vfe: VFE Device 1596 * @line_id: VFE line 1597 */ 1598 static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id) 1599 { 1600 struct vfe_output *output; 1601 unsigned long flags; 1602 1603 spin_lock_irqsave(&vfe->output_lock, flags); 1604 output = &vfe->line[line_id].output; 1605 if (output->wait_sof) { 1606 output->wait_sof = 0; 1607 complete(&output->sof); 1608 } 1609 spin_unlock_irqrestore(&vfe->output_lock, flags); 1610 } 1611 1612 /* 1613 * vfe_isr_reg_update - Process reg update interrupt 1614 * @vfe: VFE Device 1615 * @line_id: VFE line 1616 */ 1617 static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) 1618 { 1619 struct vfe_output *output; 1620 unsigned long flags; 1621 1622 spin_lock_irqsave(&vfe->output_lock, flags); 1623 vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id); 1624 1625 output = &vfe->line[line_id].output; 1626 1627 if (output->wait_reg_update) { 1628 output->wait_reg_update = 0; 1629 complete(&output->reg_update); 1630 spin_unlock_irqrestore(&vfe->output_lock, flags); 1631 return; 1632 } 1633 1634 if (output->state == VFE_OUTPUT_STOPPING) { 1635 /* Release last buffer when hw is idle */ 1636 if (output->last_buffer) { 1637 vb2_buffer_done(&output->last_buffer->vb.vb2_buf, 1638 VB2_BUF_STATE_DONE); 1639 output->last_buffer = NULL; 1640 } 1641 output->state = VFE_OUTPUT_IDLE; 1642 1643 /* Buffers received in stopping state are queued in */ 1644 /* dma pending queue, start next capture here */ 1645 1646 output->buf[0] = vfe_buf_get_pending(output); 1647 output->buf[1] = vfe_buf_get_pending(output); 1648 1649 if (!output->buf[0] && output->buf[1]) { 1650 output->buf[0] = output->buf[1]; 1651 output->buf[1] = NULL; 1652 } 1653 1654 if (output->buf[0]) 1655 output->state = VFE_OUTPUT_SINGLE; 1656 1657 if (output->buf[1]) 1658 output->state = VFE_OUTPUT_CONTINUOUS; 1659 1660 switch (output->state) { 1661 case VFE_OUTPUT_SINGLE: 1662 vfe_output_frame_drop(vfe, output, 2); 1663 break; 1664 case VFE_OUTPUT_CONTINUOUS: 1665 vfe_output_frame_drop(vfe, output, 3); 1666 break; 1667 default: 1668 vfe_output_frame_drop(vfe, output, 0); 1669 break; 1670 } 1671 1672 vfe_output_init_addrs(vfe, output, 1); 1673 } 1674 1675 spin_unlock_irqrestore(&vfe->output_lock, flags); 1676 } 1677 1678 /* 1679 * vfe_isr_wm_done - Process write master done interrupt 1680 * @vfe: VFE Device 1681 * @wm: Write master id 1682 */ 1683 static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm) 1684 { 1685 struct camss_buffer *ready_buf; 1686 struct vfe_output *output; 1687 dma_addr_t *new_addr; 1688 unsigned long flags; 1689 u32 active_index; 1690 u64 ts = ktime_get_ns(); 1691 unsigned int i; 1692 1693 active_index = vfe_wm_get_ping_pong_status(vfe, wm); 1694 1695 spin_lock_irqsave(&vfe->output_lock, flags); 1696 1697 if (vfe->wm_output_map[wm] == VFE_LINE_NONE) { 1698 dev_err_ratelimited(to_device(vfe), 1699 "Received wm done for unmapped index\n"); 1700 goto out_unlock; 1701 } 1702 output = &vfe->line[vfe->wm_output_map[wm]].output; 1703 1704 if (output->active_buf == active_index) { 1705 dev_err_ratelimited(to_device(vfe), 1706 "Active buffer mismatch!\n"); 1707 goto out_unlock; 1708 } 1709 output->active_buf = active_index; 1710 1711 ready_buf = output->buf[!active_index]; 1712 if (!ready_buf) { 1713 dev_err_ratelimited(to_device(vfe), 1714 "Missing ready buf %d %d!\n", 1715 !active_index, output->state); 1716 goto out_unlock; 1717 } 1718 1719 ready_buf->vb.vb2_buf.timestamp = ts; 1720 ready_buf->vb.sequence = output->sequence++; 1721 1722 /* Get next buffer */ 1723 output->buf[!active_index] = vfe_buf_get_pending(output); 1724 if (!output->buf[!active_index]) { 1725 /* No next buffer - set same address */ 1726 new_addr = ready_buf->addr; 1727 vfe_buf_update_wm_on_last(vfe, output); 1728 } else { 1729 new_addr = output->buf[!active_index]->addr; 1730 vfe_buf_update_wm_on_next(vfe, output); 1731 } 1732 1733 if (active_index) 1734 for (i = 0; i < output->wm_num; i++) 1735 vfe_wm_set_ping_addr(vfe, output->wm_idx[i], 1736 new_addr[i]); 1737 else 1738 for (i = 0; i < output->wm_num; i++) 1739 vfe_wm_set_pong_addr(vfe, output->wm_idx[i], 1740 new_addr[i]); 1741 1742 spin_unlock_irqrestore(&vfe->output_lock, flags); 1743 1744 if (output->state == VFE_OUTPUT_STOPPING) 1745 output->last_buffer = ready_buf; 1746 else 1747 vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); 1748 1749 return; 1750 1751 out_unlock: 1752 spin_unlock_irqrestore(&vfe->output_lock, flags); 1753 } 1754 1755 /* 1756 * vfe_isr_wm_done - Process composite image done interrupt 1757 * @vfe: VFE Device 1758 * @comp: Composite image id 1759 */ 1760 static void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp) 1761 { 1762 unsigned int i; 1763 1764 for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) 1765 if (vfe->wm_output_map[i] == VFE_LINE_PIX) { 1766 vfe_isr_wm_done(vfe, i); 1767 break; 1768 } 1769 } 1770 1771 /* 1772 * vfe_isr - ISPIF module interrupt handler 1773 * @irq: Interrupt line 1774 * @dev: VFE device 1775 * 1776 * Return IRQ_HANDLED on success 1777 */ 1778 static irqreturn_t vfe_isr(int irq, void *dev) 1779 { 1780 struct vfe_device *vfe = dev; 1781 u32 value0, value1; 1782 u32 violation; 1783 int i, j; 1784 1785 value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0); 1786 value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1); 1787 1788 writel_relaxed(value0, vfe->base + VFE_0_IRQ_CLEAR_0); 1789 writel_relaxed(value1, vfe->base + VFE_0_IRQ_CLEAR_1); 1790 1791 wmb(); 1792 writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD); 1793 1794 if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK) 1795 complete(&vfe->reset_complete); 1796 1797 if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION) { 1798 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS); 1799 dev_err_ratelimited(to_device(vfe), 1800 "VFE: violation = 0x%08x\n", violation); 1801 } 1802 1803 if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK) { 1804 complete(&vfe->halt_complete); 1805 writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD); 1806 } 1807 1808 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) 1809 if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i)) 1810 vfe_isr_reg_update(vfe, i); 1811 1812 if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF) 1813 vfe_isr_sof(vfe, VFE_LINE_PIX); 1814 1815 for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) 1816 if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i)) 1817 vfe_isr_sof(vfe, i); 1818 1819 for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++) 1820 if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) { 1821 vfe_isr_comp_done(vfe, i); 1822 for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++) 1823 if (vfe->wm_output_map[j] == VFE_LINE_PIX) 1824 value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j); 1825 } 1826 1827 for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) 1828 if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i)) 1829 vfe_isr_wm_done(vfe, i); 1830 1831 return IRQ_HANDLED; 1832 } 1833 1834 /* 1835 * vfe_set_clock_rates - Calculate and set clock rates on VFE module 1836 * @vfe: VFE device 1837 * 1838 * Return 0 on success or a negative error code otherwise 1839 */ 1840 static int vfe_set_clock_rates(struct vfe_device *vfe) 1841 { 1842 struct device *dev = to_device(vfe); 1843 u32 pixel_clock[MSM_VFE_LINE_NUM]; 1844 int i, j; 1845 int ret; 1846 1847 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) { 1848 ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity, 1849 &pixel_clock[i]); 1850 if (ret) 1851 pixel_clock[i] = 0; 1852 } 1853 1854 for (i = 0; i < vfe->nclocks; i++) { 1855 struct camss_clock *clock = &vfe->clock[i]; 1856 1857 if (!strcmp(clock->name, "camss_vfe_vfe")) { 1858 u64 min_rate = 0; 1859 long rate; 1860 1861 for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) { 1862 u32 tmp; 1863 u8 bpp; 1864 1865 if (j == VFE_LINE_PIX) { 1866 tmp = pixel_clock[j]; 1867 } else { 1868 bpp = vfe_get_bpp(vfe->line[j]. 1869 fmt[MSM_VFE_PAD_SINK].code); 1870 tmp = pixel_clock[j] * bpp / 64; 1871 } 1872 1873 if (min_rate < tmp) 1874 min_rate = tmp; 1875 } 1876 1877 camss_add_clock_margin(&min_rate); 1878 1879 for (j = 0; j < clock->nfreqs; j++) 1880 if (min_rate < clock->freq[j]) 1881 break; 1882 1883 if (j == clock->nfreqs) { 1884 dev_err(dev, 1885 "Pixel clock is too high for VFE"); 1886 return -EINVAL; 1887 } 1888 1889 /* if sensor pixel clock is not available */ 1890 /* set highest possible VFE clock rate */ 1891 if (min_rate == 0) 1892 j = clock->nfreqs - 1; 1893 1894 rate = clk_round_rate(clock->clk, clock->freq[j]); 1895 if (rate < 0) { 1896 dev_err(dev, "clk round rate failed: %ld\n", 1897 rate); 1898 return -EINVAL; 1899 } 1900 1901 ret = clk_set_rate(clock->clk, rate); 1902 if (ret < 0) { 1903 dev_err(dev, "clk set rate failed: %d\n", ret); 1904 return ret; 1905 } 1906 } 1907 } 1908 1909 return 0; 1910 } 1911 1912 /* 1913 * vfe_check_clock_rates - Check current clock rates on VFE module 1914 * @vfe: VFE device 1915 * 1916 * Return 0 if current clock rates are suitable for a new pipeline 1917 * or a negative error code otherwise 1918 */ 1919 static int vfe_check_clock_rates(struct vfe_device *vfe) 1920 { 1921 u32 pixel_clock[MSM_VFE_LINE_NUM]; 1922 int i, j; 1923 int ret; 1924 1925 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) { 1926 ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity, 1927 &pixel_clock[i]); 1928 if (ret) 1929 pixel_clock[i] = 0; 1930 } 1931 1932 for (i = 0; i < vfe->nclocks; i++) { 1933 struct camss_clock *clock = &vfe->clock[i]; 1934 1935 if (!strcmp(clock->name, "camss_vfe_vfe")) { 1936 u64 min_rate = 0; 1937 unsigned long rate; 1938 1939 for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) { 1940 u32 tmp; 1941 u8 bpp; 1942 1943 if (j == VFE_LINE_PIX) { 1944 tmp = pixel_clock[j]; 1945 } else { 1946 bpp = vfe_get_bpp(vfe->line[j]. 1947 fmt[MSM_VFE_PAD_SINK].code); 1948 tmp = pixel_clock[j] * bpp / 64; 1949 } 1950 1951 if (min_rate < tmp) 1952 min_rate = tmp; 1953 } 1954 1955 camss_add_clock_margin(&min_rate); 1956 1957 rate = clk_get_rate(clock->clk); 1958 if (rate < min_rate) 1959 return -EBUSY; 1960 } 1961 } 1962 1963 return 0; 1964 } 1965 1966 /* 1967 * vfe_get - Power up and reset VFE module 1968 * @vfe: VFE Device 1969 * 1970 * Return 0 on success or a negative error code otherwise 1971 */ 1972 static int vfe_get(struct vfe_device *vfe) 1973 { 1974 int ret; 1975 1976 mutex_lock(&vfe->power_lock); 1977 1978 if (vfe->power_count == 0) { 1979 ret = vfe_set_clock_rates(vfe); 1980 if (ret < 0) 1981 goto error_clocks; 1982 1983 ret = camss_enable_clocks(vfe->nclocks, vfe->clock, 1984 to_device(vfe)); 1985 if (ret < 0) 1986 goto error_clocks; 1987 1988 ret = vfe_reset(vfe); 1989 if (ret < 0) 1990 goto error_reset; 1991 1992 vfe_reset_output_maps(vfe); 1993 1994 vfe_init_outputs(vfe); 1995 } else { 1996 ret = vfe_check_clock_rates(vfe); 1997 if (ret < 0) 1998 goto error_clocks; 1999 } 2000 vfe->power_count++; 2001 2002 mutex_unlock(&vfe->power_lock); 2003 2004 return 0; 2005 2006 error_reset: 2007 camss_disable_clocks(vfe->nclocks, vfe->clock); 2008 2009 error_clocks: 2010 mutex_unlock(&vfe->power_lock); 2011 2012 return ret; 2013 } 2014 2015 /* 2016 * vfe_put - Power down VFE module 2017 * @vfe: VFE Device 2018 */ 2019 static void vfe_put(struct vfe_device *vfe) 2020 { 2021 mutex_lock(&vfe->power_lock); 2022 2023 if (vfe->power_count == 0) { 2024 dev_err(to_device(vfe), "vfe power off on power_count == 0\n"); 2025 goto exit; 2026 } else if (vfe->power_count == 1) { 2027 if (vfe->was_streaming) { 2028 vfe->was_streaming = 0; 2029 vfe_halt(vfe); 2030 } 2031 camss_disable_clocks(vfe->nclocks, vfe->clock); 2032 } 2033 2034 vfe->power_count--; 2035 2036 exit: 2037 mutex_unlock(&vfe->power_lock); 2038 } 2039 2040 /* 2041 * vfe_video_pad_to_line - Get pointer to VFE line by media pad 2042 * @pad: Media pad 2043 * 2044 * Return pointer to vfe line structure 2045 */ 2046 static struct vfe_line *vfe_video_pad_to_line(struct media_pad *pad) 2047 { 2048 struct media_pad *vfe_pad; 2049 struct v4l2_subdev *subdev; 2050 2051 vfe_pad = media_entity_remote_pad(pad); 2052 if (vfe_pad == NULL) 2053 return NULL; 2054 2055 subdev = media_entity_to_v4l2_subdev(vfe_pad->entity); 2056 2057 return container_of(subdev, struct vfe_line, subdev); 2058 } 2059 2060 /* 2061 * vfe_queue_buffer - Add empty buffer 2062 * @vid: Video device structure 2063 * @buf: Buffer to be enqueued 2064 * 2065 * Add an empty buffer - depending on the current number of buffers it will be 2066 * put in pending buffer queue or directly given to the hardware to be filled. 2067 * 2068 * Return 0 on success or a negative error code otherwise 2069 */ 2070 static int vfe_queue_buffer(struct camss_video *vid, 2071 struct camss_buffer *buf) 2072 { 2073 struct vfe_device *vfe = &vid->camss->vfe; 2074 struct vfe_line *line; 2075 struct vfe_output *output; 2076 unsigned long flags; 2077 2078 line = vfe_video_pad_to_line(&vid->pad); 2079 if (!line) { 2080 dev_err(to_device(vfe), "Can not queue buffer\n"); 2081 return -1; 2082 } 2083 output = &line->output; 2084 2085 spin_lock_irqsave(&vfe->output_lock, flags); 2086 2087 vfe_buf_update_wm_on_new(vfe, output, buf); 2088 2089 spin_unlock_irqrestore(&vfe->output_lock, flags); 2090 2091 return 0; 2092 } 2093 2094 /* 2095 * vfe_flush_buffers - Return all vb2 buffers 2096 * @vid: Video device structure 2097 * @state: vb2 buffer state of the returned buffers 2098 * 2099 * Return all buffers to vb2. This includes queued pending buffers (still 2100 * unused) and any buffers given to the hardware but again still not used. 2101 * 2102 * Return 0 on success or a negative error code otherwise 2103 */ 2104 static int vfe_flush_buffers(struct camss_video *vid, 2105 enum vb2_buffer_state state) 2106 { 2107 struct vfe_device *vfe = &vid->camss->vfe; 2108 struct vfe_line *line; 2109 struct vfe_output *output; 2110 unsigned long flags; 2111 2112 line = vfe_video_pad_to_line(&vid->pad); 2113 if (!line) { 2114 dev_err(to_device(vfe), "Can not flush buffers\n"); 2115 return -1; 2116 } 2117 output = &line->output; 2118 2119 spin_lock_irqsave(&vfe->output_lock, flags); 2120 2121 vfe_buf_flush_pending(output, state); 2122 2123 if (output->buf[0]) 2124 vb2_buffer_done(&output->buf[0]->vb.vb2_buf, state); 2125 2126 if (output->buf[1]) 2127 vb2_buffer_done(&output->buf[1]->vb.vb2_buf, state); 2128 2129 if (output->last_buffer) { 2130 vb2_buffer_done(&output->last_buffer->vb.vb2_buf, state); 2131 output->last_buffer = NULL; 2132 } 2133 2134 spin_unlock_irqrestore(&vfe->output_lock, flags); 2135 2136 return 0; 2137 } 2138 2139 /* 2140 * vfe_set_power - Power on/off VFE module 2141 * @sd: VFE V4L2 subdevice 2142 * @on: Requested power state 2143 * 2144 * Return 0 on success or a negative error code otherwise 2145 */ 2146 static int vfe_set_power(struct v4l2_subdev *sd, int on) 2147 { 2148 struct vfe_line *line = v4l2_get_subdevdata(sd); 2149 struct vfe_device *vfe = to_vfe(line); 2150 int ret; 2151 2152 if (on) { 2153 u32 hw_version; 2154 2155 ret = vfe_get(vfe); 2156 if (ret < 0) 2157 return ret; 2158 2159 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION); 2160 dev_dbg(to_device(vfe), 2161 "VFE HW Version = 0x%08x\n", hw_version); 2162 } else { 2163 vfe_put(vfe); 2164 } 2165 2166 return 0; 2167 } 2168 2169 /* 2170 * vfe_set_stream - Enable/disable streaming on VFE module 2171 * @sd: VFE V4L2 subdevice 2172 * @enable: Requested streaming state 2173 * 2174 * Main configuration of VFE module is triggered here. 2175 * 2176 * Return 0 on success or a negative error code otherwise 2177 */ 2178 static int vfe_set_stream(struct v4l2_subdev *sd, int enable) 2179 { 2180 struct vfe_line *line = v4l2_get_subdevdata(sd); 2181 struct vfe_device *vfe = to_vfe(line); 2182 int ret; 2183 2184 if (enable) { 2185 ret = vfe_enable(line); 2186 if (ret < 0) 2187 dev_err(to_device(vfe), 2188 "Failed to enable vfe outputs\n"); 2189 } else { 2190 ret = vfe_disable(line); 2191 if (ret < 0) 2192 dev_err(to_device(vfe), 2193 "Failed to disable vfe outputs\n"); 2194 } 2195 2196 return ret; 2197 } 2198 2199 /* 2200 * __vfe_get_format - Get pointer to format structure 2201 * @line: VFE line 2202 * @cfg: V4L2 subdev pad configuration 2203 * @pad: pad from which format is requested 2204 * @which: TRY or ACTIVE format 2205 * 2206 * Return pointer to TRY or ACTIVE format structure 2207 */ 2208 static struct v4l2_mbus_framefmt * 2209 __vfe_get_format(struct vfe_line *line, 2210 struct v4l2_subdev_pad_config *cfg, 2211 unsigned int pad, 2212 enum v4l2_subdev_format_whence which) 2213 { 2214 if (which == V4L2_SUBDEV_FORMAT_TRY) 2215 return v4l2_subdev_get_try_format(&line->subdev, cfg, pad); 2216 2217 return &line->fmt[pad]; 2218 } 2219 2220 /* 2221 * __vfe_get_compose - Get pointer to compose selection structure 2222 * @line: VFE line 2223 * @cfg: V4L2 subdev pad configuration 2224 * @which: TRY or ACTIVE format 2225 * 2226 * Return pointer to TRY or ACTIVE compose rectangle structure 2227 */ 2228 static struct v4l2_rect * 2229 __vfe_get_compose(struct vfe_line *line, 2230 struct v4l2_subdev_pad_config *cfg, 2231 enum v4l2_subdev_format_whence which) 2232 { 2233 if (which == V4L2_SUBDEV_FORMAT_TRY) 2234 return v4l2_subdev_get_try_compose(&line->subdev, cfg, 2235 MSM_VFE_PAD_SINK); 2236 2237 return &line->compose; 2238 } 2239 2240 /* 2241 * __vfe_get_crop - Get pointer to crop selection structure 2242 * @line: VFE line 2243 * @cfg: V4L2 subdev pad configuration 2244 * @which: TRY or ACTIVE format 2245 * 2246 * Return pointer to TRY or ACTIVE crop rectangle structure 2247 */ 2248 static struct v4l2_rect * 2249 __vfe_get_crop(struct vfe_line *line, 2250 struct v4l2_subdev_pad_config *cfg, 2251 enum v4l2_subdev_format_whence which) 2252 { 2253 if (which == V4L2_SUBDEV_FORMAT_TRY) 2254 return v4l2_subdev_get_try_crop(&line->subdev, cfg, 2255 MSM_VFE_PAD_SRC); 2256 2257 return &line->crop; 2258 } 2259 2260 /* 2261 * vfe_try_format - Handle try format by pad subdev method 2262 * @line: VFE line 2263 * @cfg: V4L2 subdev pad configuration 2264 * @pad: pad on which format is requested 2265 * @fmt: pointer to v4l2 format structure 2266 * @which: wanted subdev format 2267 */ 2268 static void vfe_try_format(struct vfe_line *line, 2269 struct v4l2_subdev_pad_config *cfg, 2270 unsigned int pad, 2271 struct v4l2_mbus_framefmt *fmt, 2272 enum v4l2_subdev_format_whence which) 2273 { 2274 unsigned int i; 2275 u32 code; 2276 2277 switch (pad) { 2278 case MSM_VFE_PAD_SINK: 2279 /* Set format on sink pad */ 2280 2281 for (i = 0; i < ARRAY_SIZE(vfe_formats); i++) 2282 if (fmt->code == vfe_formats[i].code) 2283 break; 2284 2285 /* If not found, use UYVY as default */ 2286 if (i >= ARRAY_SIZE(vfe_formats)) 2287 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; 2288 2289 fmt->width = clamp_t(u32, fmt->width, 1, 8191); 2290 fmt->height = clamp_t(u32, fmt->height, 1, 8191); 2291 2292 fmt->field = V4L2_FIELD_NONE; 2293 fmt->colorspace = V4L2_COLORSPACE_SRGB; 2294 2295 break; 2296 2297 case MSM_VFE_PAD_SRC: 2298 /* Set and return a format same as sink pad */ 2299 2300 code = fmt->code; 2301 2302 *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, 2303 which); 2304 2305 if (line->id == VFE_LINE_PIX) { 2306 struct v4l2_rect *rect; 2307 2308 rect = __vfe_get_crop(line, cfg, which); 2309 2310 fmt->width = rect->width; 2311 fmt->height = rect->height; 2312 2313 switch (fmt->code) { 2314 case MEDIA_BUS_FMT_YUYV8_2X8: 2315 if (code == MEDIA_BUS_FMT_YUYV8_1_5X8) 2316 fmt->code = MEDIA_BUS_FMT_YUYV8_1_5X8; 2317 else 2318 fmt->code = MEDIA_BUS_FMT_YUYV8_2X8; 2319 break; 2320 case MEDIA_BUS_FMT_YVYU8_2X8: 2321 if (code == MEDIA_BUS_FMT_YVYU8_1_5X8) 2322 fmt->code = MEDIA_BUS_FMT_YVYU8_1_5X8; 2323 else 2324 fmt->code = MEDIA_BUS_FMT_YVYU8_2X8; 2325 break; 2326 case MEDIA_BUS_FMT_UYVY8_2X8: 2327 default: 2328 if (code == MEDIA_BUS_FMT_UYVY8_1_5X8) 2329 fmt->code = MEDIA_BUS_FMT_UYVY8_1_5X8; 2330 else 2331 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; 2332 break; 2333 case MEDIA_BUS_FMT_VYUY8_2X8: 2334 if (code == MEDIA_BUS_FMT_VYUY8_1_5X8) 2335 fmt->code = MEDIA_BUS_FMT_VYUY8_1_5X8; 2336 else 2337 fmt->code = MEDIA_BUS_FMT_VYUY8_2X8; 2338 break; 2339 } 2340 } 2341 2342 break; 2343 } 2344 2345 fmt->colorspace = V4L2_COLORSPACE_SRGB; 2346 } 2347 2348 /* 2349 * vfe_try_compose - Handle try compose selection by pad subdev method 2350 * @line: VFE line 2351 * @cfg: V4L2 subdev pad configuration 2352 * @rect: pointer to v4l2 rect structure 2353 * @which: wanted subdev format 2354 */ 2355 static void vfe_try_compose(struct vfe_line *line, 2356 struct v4l2_subdev_pad_config *cfg, 2357 struct v4l2_rect *rect, 2358 enum v4l2_subdev_format_whence which) 2359 { 2360 struct v4l2_mbus_framefmt *fmt; 2361 2362 fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which); 2363 2364 if (rect->width > fmt->width) 2365 rect->width = fmt->width; 2366 2367 if (rect->height > fmt->height) 2368 rect->height = fmt->height; 2369 2370 if (fmt->width > rect->width * SCALER_RATIO_MAX) 2371 rect->width = (fmt->width + SCALER_RATIO_MAX - 1) / 2372 SCALER_RATIO_MAX; 2373 2374 rect->width &= ~0x1; 2375 2376 if (fmt->height > rect->height * SCALER_RATIO_MAX) 2377 rect->height = (fmt->height + SCALER_RATIO_MAX - 1) / 2378 SCALER_RATIO_MAX; 2379 2380 if (rect->width < 16) 2381 rect->width = 16; 2382 2383 if (rect->height < 4) 2384 rect->height = 4; 2385 } 2386 2387 /* 2388 * vfe_try_crop - Handle try crop selection by pad subdev method 2389 * @line: VFE line 2390 * @cfg: V4L2 subdev pad configuration 2391 * @rect: pointer to v4l2 rect structure 2392 * @which: wanted subdev format 2393 */ 2394 static void vfe_try_crop(struct vfe_line *line, 2395 struct v4l2_subdev_pad_config *cfg, 2396 struct v4l2_rect *rect, 2397 enum v4l2_subdev_format_whence which) 2398 { 2399 struct v4l2_rect *compose; 2400 2401 compose = __vfe_get_compose(line, cfg, which); 2402 2403 if (rect->width > compose->width) 2404 rect->width = compose->width; 2405 2406 if (rect->width + rect->left > compose->width) 2407 rect->left = compose->width - rect->width; 2408 2409 if (rect->height > compose->height) 2410 rect->height = compose->height; 2411 2412 if (rect->height + rect->top > compose->height) 2413 rect->top = compose->height - rect->height; 2414 2415 /* wm in line based mode writes multiple of 16 horizontally */ 2416 rect->left += (rect->width & 0xf) >> 1; 2417 rect->width &= ~0xf; 2418 2419 if (rect->width < 16) { 2420 rect->left = 0; 2421 rect->width = 16; 2422 } 2423 2424 if (rect->height < 4) { 2425 rect->top = 0; 2426 rect->height = 4; 2427 } 2428 } 2429 2430 /* 2431 * vfe_enum_mbus_code - Handle pixel format enumeration 2432 * @sd: VFE V4L2 subdevice 2433 * @cfg: V4L2 subdev pad configuration 2434 * @code: pointer to v4l2_subdev_mbus_code_enum structure 2435 * 2436 * return -EINVAL or zero on success 2437 */ 2438 static int vfe_enum_mbus_code(struct v4l2_subdev *sd, 2439 struct v4l2_subdev_pad_config *cfg, 2440 struct v4l2_subdev_mbus_code_enum *code) 2441 { 2442 struct vfe_line *line = v4l2_get_subdevdata(sd); 2443 struct v4l2_mbus_framefmt *format; 2444 2445 if (code->pad == MSM_VFE_PAD_SINK) { 2446 if (code->index >= ARRAY_SIZE(vfe_formats)) 2447 return -EINVAL; 2448 2449 code->code = vfe_formats[code->index].code; 2450 } else { 2451 if (code->index > 0) 2452 return -EINVAL; 2453 2454 format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, 2455 code->which); 2456 2457 code->code = format->code; 2458 } 2459 2460 return 0; 2461 } 2462 2463 /* 2464 * vfe_enum_frame_size - Handle frame size enumeration 2465 * @sd: VFE V4L2 subdevice 2466 * @cfg: V4L2 subdev pad configuration 2467 * @fse: pointer to v4l2_subdev_frame_size_enum structure 2468 * 2469 * Return -EINVAL or zero on success 2470 */ 2471 static int vfe_enum_frame_size(struct v4l2_subdev *sd, 2472 struct v4l2_subdev_pad_config *cfg, 2473 struct v4l2_subdev_frame_size_enum *fse) 2474 { 2475 struct vfe_line *line = v4l2_get_subdevdata(sd); 2476 struct v4l2_mbus_framefmt format; 2477 2478 if (fse->index != 0) 2479 return -EINVAL; 2480 2481 format.code = fse->code; 2482 format.width = 1; 2483 format.height = 1; 2484 vfe_try_format(line, cfg, fse->pad, &format, fse->which); 2485 fse->min_width = format.width; 2486 fse->min_height = format.height; 2487 2488 if (format.code != fse->code) 2489 return -EINVAL; 2490 2491 format.code = fse->code; 2492 format.width = -1; 2493 format.height = -1; 2494 vfe_try_format(line, cfg, fse->pad, &format, fse->which); 2495 fse->max_width = format.width; 2496 fse->max_height = format.height; 2497 2498 return 0; 2499 } 2500 2501 /* 2502 * vfe_get_format - Handle get format by pads subdev method 2503 * @sd: VFE V4L2 subdevice 2504 * @cfg: V4L2 subdev pad configuration 2505 * @fmt: pointer to v4l2 subdev format structure 2506 * 2507 * Return -EINVAL or zero on success 2508 */ 2509 static int vfe_get_format(struct v4l2_subdev *sd, 2510 struct v4l2_subdev_pad_config *cfg, 2511 struct v4l2_subdev_format *fmt) 2512 { 2513 struct vfe_line *line = v4l2_get_subdevdata(sd); 2514 struct v4l2_mbus_framefmt *format; 2515 2516 format = __vfe_get_format(line, cfg, fmt->pad, fmt->which); 2517 if (format == NULL) 2518 return -EINVAL; 2519 2520 fmt->format = *format; 2521 2522 return 0; 2523 } 2524 2525 static int vfe_set_selection(struct v4l2_subdev *sd, 2526 struct v4l2_subdev_pad_config *cfg, 2527 struct v4l2_subdev_selection *sel); 2528 2529 /* 2530 * vfe_set_format - Handle set format by pads subdev method 2531 * @sd: VFE V4L2 subdevice 2532 * @cfg: V4L2 subdev pad configuration 2533 * @fmt: pointer to v4l2 subdev format structure 2534 * 2535 * Return -EINVAL or zero on success 2536 */ 2537 static int vfe_set_format(struct v4l2_subdev *sd, 2538 struct v4l2_subdev_pad_config *cfg, 2539 struct v4l2_subdev_format *fmt) 2540 { 2541 struct vfe_line *line = v4l2_get_subdevdata(sd); 2542 struct v4l2_mbus_framefmt *format; 2543 2544 format = __vfe_get_format(line, cfg, fmt->pad, fmt->which); 2545 if (format == NULL) 2546 return -EINVAL; 2547 2548 vfe_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which); 2549 *format = fmt->format; 2550 2551 if (fmt->pad == MSM_VFE_PAD_SINK) { 2552 struct v4l2_subdev_selection sel = { 0 }; 2553 int ret; 2554 2555 /* Propagate the format from sink to source */ 2556 format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SRC, 2557 fmt->which); 2558 2559 *format = fmt->format; 2560 vfe_try_format(line, cfg, MSM_VFE_PAD_SRC, format, 2561 fmt->which); 2562 2563 if (line->id != VFE_LINE_PIX) 2564 return 0; 2565 2566 /* Reset sink pad compose selection */ 2567 sel.which = fmt->which; 2568 sel.pad = MSM_VFE_PAD_SINK; 2569 sel.target = V4L2_SEL_TGT_COMPOSE; 2570 sel.r.width = fmt->format.width; 2571 sel.r.height = fmt->format.height; 2572 ret = vfe_set_selection(sd, cfg, &sel); 2573 if (ret < 0) 2574 return ret; 2575 } 2576 2577 return 0; 2578 } 2579 2580 /* 2581 * vfe_get_selection - Handle get selection by pads subdev method 2582 * @sd: VFE V4L2 subdevice 2583 * @cfg: V4L2 subdev pad configuration 2584 * @sel: pointer to v4l2 subdev selection structure 2585 * 2586 * Return -EINVAL or zero on success 2587 */ 2588 static int vfe_get_selection(struct v4l2_subdev *sd, 2589 struct v4l2_subdev_pad_config *cfg, 2590 struct v4l2_subdev_selection *sel) 2591 { 2592 struct vfe_line *line = v4l2_get_subdevdata(sd); 2593 struct v4l2_subdev_format fmt = { 0 }; 2594 struct v4l2_rect *rect; 2595 int ret; 2596 2597 if (line->id != VFE_LINE_PIX) 2598 return -EINVAL; 2599 2600 if (sel->pad == MSM_VFE_PAD_SINK) 2601 switch (sel->target) { 2602 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 2603 fmt.pad = sel->pad; 2604 fmt.which = sel->which; 2605 ret = vfe_get_format(sd, cfg, &fmt); 2606 if (ret < 0) 2607 return ret; 2608 2609 sel->r.left = 0; 2610 sel->r.top = 0; 2611 sel->r.width = fmt.format.width; 2612 sel->r.height = fmt.format.height; 2613 break; 2614 case V4L2_SEL_TGT_COMPOSE: 2615 rect = __vfe_get_compose(line, cfg, sel->which); 2616 if (rect == NULL) 2617 return -EINVAL; 2618 2619 sel->r = *rect; 2620 break; 2621 default: 2622 return -EINVAL; 2623 } 2624 else if (sel->pad == MSM_VFE_PAD_SRC) 2625 switch (sel->target) { 2626 case V4L2_SEL_TGT_CROP_BOUNDS: 2627 rect = __vfe_get_compose(line, cfg, sel->which); 2628 if (rect == NULL) 2629 return -EINVAL; 2630 2631 sel->r.left = rect->left; 2632 sel->r.top = rect->top; 2633 sel->r.width = rect->width; 2634 sel->r.height = rect->height; 2635 break; 2636 case V4L2_SEL_TGT_CROP: 2637 rect = __vfe_get_crop(line, cfg, sel->which); 2638 if (rect == NULL) 2639 return -EINVAL; 2640 2641 sel->r = *rect; 2642 break; 2643 default: 2644 return -EINVAL; 2645 } 2646 2647 return 0; 2648 } 2649 2650 /* 2651 * vfe_set_selection - Handle set selection by pads subdev method 2652 * @sd: VFE V4L2 subdevice 2653 * @cfg: V4L2 subdev pad configuration 2654 * @sel: pointer to v4l2 subdev selection structure 2655 * 2656 * Return -EINVAL or zero on success 2657 */ 2658 static int vfe_set_selection(struct v4l2_subdev *sd, 2659 struct v4l2_subdev_pad_config *cfg, 2660 struct v4l2_subdev_selection *sel) 2661 { 2662 struct vfe_line *line = v4l2_get_subdevdata(sd); 2663 struct v4l2_rect *rect; 2664 int ret; 2665 2666 if (line->id != VFE_LINE_PIX) 2667 return -EINVAL; 2668 2669 if (sel->target == V4L2_SEL_TGT_COMPOSE && 2670 sel->pad == MSM_VFE_PAD_SINK) { 2671 struct v4l2_subdev_selection crop = { 0 }; 2672 2673 rect = __vfe_get_compose(line, cfg, sel->which); 2674 if (rect == NULL) 2675 return -EINVAL; 2676 2677 vfe_try_compose(line, cfg, &sel->r, sel->which); 2678 *rect = sel->r; 2679 2680 /* Reset source crop selection */ 2681 crop.which = sel->which; 2682 crop.pad = MSM_VFE_PAD_SRC; 2683 crop.target = V4L2_SEL_TGT_CROP; 2684 crop.r = *rect; 2685 ret = vfe_set_selection(sd, cfg, &crop); 2686 } else if (sel->target == V4L2_SEL_TGT_CROP && 2687 sel->pad == MSM_VFE_PAD_SRC) { 2688 struct v4l2_subdev_format fmt = { 0 }; 2689 2690 rect = __vfe_get_crop(line, cfg, sel->which); 2691 if (rect == NULL) 2692 return -EINVAL; 2693 2694 vfe_try_crop(line, cfg, &sel->r, sel->which); 2695 *rect = sel->r; 2696 2697 /* Reset source pad format width and height */ 2698 fmt.which = sel->which; 2699 fmt.pad = MSM_VFE_PAD_SRC; 2700 ret = vfe_get_format(sd, cfg, &fmt); 2701 if (ret < 0) 2702 return ret; 2703 2704 fmt.format.width = rect->width; 2705 fmt.format.height = rect->height; 2706 ret = vfe_set_format(sd, cfg, &fmt); 2707 } else { 2708 ret = -EINVAL; 2709 } 2710 2711 return ret; 2712 } 2713 2714 /* 2715 * vfe_init_formats - Initialize formats on all pads 2716 * @sd: VFE V4L2 subdevice 2717 * @fh: V4L2 subdev file handle 2718 * 2719 * Initialize all pad formats with default values. 2720 * 2721 * Return 0 on success or a negative error code otherwise 2722 */ 2723 static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) 2724 { 2725 struct v4l2_subdev_format format = { 2726 .pad = MSM_VFE_PAD_SINK, 2727 .which = fh ? V4L2_SUBDEV_FORMAT_TRY : 2728 V4L2_SUBDEV_FORMAT_ACTIVE, 2729 .format = { 2730 .code = MEDIA_BUS_FMT_UYVY8_2X8, 2731 .width = 1920, 2732 .height = 1080 2733 } 2734 }; 2735 2736 return vfe_set_format(sd, fh ? fh->pad : NULL, &format); 2737 } 2738 2739 /* 2740 * msm_vfe_subdev_init - Initialize VFE device structure and resources 2741 * @vfe: VFE device 2742 * @res: VFE module resources table 2743 * 2744 * Return 0 on success or a negative error code otherwise 2745 */ 2746 int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res) 2747 { 2748 struct device *dev = to_device(vfe); 2749 struct platform_device *pdev = to_platform_device(dev); 2750 struct resource *r; 2751 struct camss *camss = to_camss(vfe); 2752 int i, j; 2753 int ret; 2754 2755 /* Memory */ 2756 2757 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]); 2758 vfe->base = devm_ioremap_resource(dev, r); 2759 if (IS_ERR(vfe->base)) { 2760 dev_err(dev, "could not map memory\n"); 2761 return PTR_ERR(vfe->base); 2762 } 2763 2764 /* Interrupt */ 2765 2766 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, 2767 res->interrupt[0]); 2768 if (!r) { 2769 dev_err(dev, "missing IRQ\n"); 2770 return -EINVAL; 2771 } 2772 2773 vfe->irq = r->start; 2774 snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d", 2775 dev_name(dev), MSM_VFE_NAME, vfe->id); 2776 ret = devm_request_irq(dev, vfe->irq, vfe_isr, 2777 IRQF_TRIGGER_RISING, vfe->irq_name, vfe); 2778 if (ret < 0) { 2779 dev_err(dev, "request_irq failed: %d\n", ret); 2780 return ret; 2781 } 2782 2783 /* Clocks */ 2784 2785 vfe->nclocks = 0; 2786 while (res->clock[vfe->nclocks]) 2787 vfe->nclocks++; 2788 2789 vfe->clock = devm_kcalloc(dev, vfe->nclocks, sizeof(*vfe->clock), 2790 GFP_KERNEL); 2791 if (!vfe->clock) 2792 return -ENOMEM; 2793 2794 for (i = 0; i < vfe->nclocks; i++) { 2795 struct camss_clock *clock = &vfe->clock[i]; 2796 2797 clock->clk = devm_clk_get(dev, res->clock[i]); 2798 if (IS_ERR(clock->clk)) 2799 return PTR_ERR(clock->clk); 2800 2801 clock->name = res->clock[i]; 2802 2803 clock->nfreqs = 0; 2804 while (res->clock_rate[i][clock->nfreqs]) 2805 clock->nfreqs++; 2806 2807 if (!clock->nfreqs) { 2808 clock->freq = NULL; 2809 continue; 2810 } 2811 2812 clock->freq = devm_kcalloc(dev, 2813 clock->nfreqs, 2814 sizeof(*clock->freq), 2815 GFP_KERNEL); 2816 if (!clock->freq) 2817 return -ENOMEM; 2818 2819 for (j = 0; j < clock->nfreqs; j++) 2820 clock->freq[j] = res->clock_rate[i][j]; 2821 } 2822 2823 mutex_init(&vfe->power_lock); 2824 vfe->power_count = 0; 2825 2826 mutex_init(&vfe->stream_lock); 2827 vfe->stream_count = 0; 2828 2829 spin_lock_init(&vfe->output_lock); 2830 2831 vfe->id = 0; 2832 vfe->reg_update = 0; 2833 2834 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) { 2835 vfe->line[i].video_out.type = 2836 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 2837 vfe->line[i].video_out.camss = camss; 2838 vfe->line[i].id = i; 2839 init_completion(&vfe->line[i].output.sof); 2840 init_completion(&vfe->line[i].output.reg_update); 2841 } 2842 2843 init_completion(&vfe->reset_complete); 2844 init_completion(&vfe->halt_complete); 2845 2846 return 0; 2847 } 2848 2849 /* 2850 * msm_vfe_get_vfe_id - Get VFE HW module id 2851 * @entity: Pointer to VFE media entity structure 2852 * @id: Return CSID HW module id here 2853 */ 2854 void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id) 2855 { 2856 struct v4l2_subdev *sd; 2857 struct vfe_line *line; 2858 struct vfe_device *vfe; 2859 2860 sd = media_entity_to_v4l2_subdev(entity); 2861 line = v4l2_get_subdevdata(sd); 2862 vfe = to_vfe(line); 2863 2864 *id = vfe->id; 2865 } 2866 2867 /* 2868 * msm_vfe_get_vfe_line_id - Get VFE line id by media entity 2869 * @entity: Pointer to VFE media entity structure 2870 * @id: Return VFE line id here 2871 */ 2872 void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id) 2873 { 2874 struct v4l2_subdev *sd; 2875 struct vfe_line *line; 2876 2877 sd = media_entity_to_v4l2_subdev(entity); 2878 line = v4l2_get_subdevdata(sd); 2879 2880 *id = line->id; 2881 } 2882 2883 /* 2884 * vfe_link_setup - Setup VFE connections 2885 * @entity: Pointer to media entity structure 2886 * @local: Pointer to local pad 2887 * @remote: Pointer to remote pad 2888 * @flags: Link flags 2889 * 2890 * Return 0 on success 2891 */ 2892 static int vfe_link_setup(struct media_entity *entity, 2893 const struct media_pad *local, 2894 const struct media_pad *remote, u32 flags) 2895 { 2896 if (flags & MEDIA_LNK_FL_ENABLED) 2897 if (media_entity_remote_pad(local)) 2898 return -EBUSY; 2899 2900 return 0; 2901 } 2902 2903 static const struct v4l2_subdev_core_ops vfe_core_ops = { 2904 .s_power = vfe_set_power, 2905 }; 2906 2907 static const struct v4l2_subdev_video_ops vfe_video_ops = { 2908 .s_stream = vfe_set_stream, 2909 }; 2910 2911 static const struct v4l2_subdev_pad_ops vfe_pad_ops = { 2912 .enum_mbus_code = vfe_enum_mbus_code, 2913 .enum_frame_size = vfe_enum_frame_size, 2914 .get_fmt = vfe_get_format, 2915 .set_fmt = vfe_set_format, 2916 .get_selection = vfe_get_selection, 2917 .set_selection = vfe_set_selection, 2918 }; 2919 2920 static const struct v4l2_subdev_ops vfe_v4l2_ops = { 2921 .core = &vfe_core_ops, 2922 .video = &vfe_video_ops, 2923 .pad = &vfe_pad_ops, 2924 }; 2925 2926 static const struct v4l2_subdev_internal_ops vfe_v4l2_internal_ops = { 2927 .open = vfe_init_formats, 2928 }; 2929 2930 static const struct media_entity_operations vfe_media_ops = { 2931 .link_setup = vfe_link_setup, 2932 .link_validate = v4l2_subdev_link_validate, 2933 }; 2934 2935 static const struct camss_video_ops camss_vfe_video_ops = { 2936 .queue_buffer = vfe_queue_buffer, 2937 .flush_buffers = vfe_flush_buffers, 2938 }; 2939 2940 void msm_vfe_stop_streaming(struct vfe_device *vfe) 2941 { 2942 int i; 2943 2944 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) 2945 msm_video_stop_streaming(&vfe->line[i].video_out); 2946 } 2947 2948 /* 2949 * msm_vfe_register_entities - Register subdev node for VFE module 2950 * @vfe: VFE device 2951 * @v4l2_dev: V4L2 device 2952 * 2953 * Initialize and register a subdev node for the VFE module. Then 2954 * call msm_video_register() to register the video device node which 2955 * will be connected to this subdev node. Then actually create the 2956 * media link between them. 2957 * 2958 * Return 0 on success or a negative error code otherwise 2959 */ 2960 int msm_vfe_register_entities(struct vfe_device *vfe, 2961 struct v4l2_device *v4l2_dev) 2962 { 2963 struct device *dev = to_device(vfe); 2964 struct v4l2_subdev *sd; 2965 struct media_pad *pads; 2966 struct camss_video *video_out; 2967 int ret; 2968 int i; 2969 2970 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) { 2971 char name[32]; 2972 2973 sd = &vfe->line[i].subdev; 2974 pads = vfe->line[i].pads; 2975 video_out = &vfe->line[i].video_out; 2976 2977 v4l2_subdev_init(sd, &vfe_v4l2_ops); 2978 sd->internal_ops = &vfe_v4l2_internal_ops; 2979 sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; 2980 if (i == VFE_LINE_PIX) 2981 snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s", 2982 MSM_VFE_NAME, vfe->id, "pix"); 2983 else 2984 snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s%d", 2985 MSM_VFE_NAME, vfe->id, "rdi", i); 2986 2987 v4l2_set_subdevdata(sd, &vfe->line[i]); 2988 2989 ret = vfe_init_formats(sd, NULL); 2990 if (ret < 0) { 2991 dev_err(dev, "Failed to init format: %d\n", ret); 2992 goto error_init; 2993 } 2994 2995 pads[MSM_VFE_PAD_SINK].flags = MEDIA_PAD_FL_SINK; 2996 pads[MSM_VFE_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; 2997 2998 sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; 2999 sd->entity.ops = &vfe_media_ops; 3000 ret = media_entity_pads_init(&sd->entity, MSM_VFE_PADS_NUM, 3001 pads); 3002 if (ret < 0) { 3003 dev_err(dev, "Failed to init media entity: %d\n", ret); 3004 goto error_init; 3005 } 3006 3007 ret = v4l2_device_register_subdev(v4l2_dev, sd); 3008 if (ret < 0) { 3009 dev_err(dev, "Failed to register subdev: %d\n", ret); 3010 goto error_reg_subdev; 3011 } 3012 3013 video_out->ops = &camss_vfe_video_ops; 3014 video_out->bpl_alignment = 8; 3015 video_out->line_based = 0; 3016 if (i == VFE_LINE_PIX) { 3017 video_out->bpl_alignment = 16; 3018 video_out->line_based = 1; 3019 } 3020 snprintf(name, ARRAY_SIZE(name), "%s%d_%s%d", 3021 MSM_VFE_NAME, vfe->id, "video", i); 3022 ret = msm_video_register(video_out, v4l2_dev, name, 3023 i == VFE_LINE_PIX ? 1 : 0); 3024 if (ret < 0) { 3025 dev_err(dev, "Failed to register video node: %d\n", 3026 ret); 3027 goto error_reg_video; 3028 } 3029 3030 ret = media_create_pad_link( 3031 &sd->entity, MSM_VFE_PAD_SRC, 3032 &video_out->vdev.entity, 0, 3033 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 3034 if (ret < 0) { 3035 dev_err(dev, "Failed to link %s->%s entities: %d\n", 3036 sd->entity.name, video_out->vdev.entity.name, 3037 ret); 3038 goto error_link; 3039 } 3040 } 3041 3042 return 0; 3043 3044 error_link: 3045 msm_video_unregister(video_out); 3046 3047 error_reg_video: 3048 v4l2_device_unregister_subdev(sd); 3049 3050 error_reg_subdev: 3051 media_entity_cleanup(&sd->entity); 3052 3053 error_init: 3054 for (i--; i >= 0; i--) { 3055 sd = &vfe->line[i].subdev; 3056 video_out = &vfe->line[i].video_out; 3057 3058 msm_video_unregister(video_out); 3059 v4l2_device_unregister_subdev(sd); 3060 media_entity_cleanup(&sd->entity); 3061 } 3062 3063 return ret; 3064 } 3065 3066 /* 3067 * msm_vfe_unregister_entities - Unregister VFE module subdev node 3068 * @vfe: VFE device 3069 */ 3070 void msm_vfe_unregister_entities(struct vfe_device *vfe) 3071 { 3072 int i; 3073 3074 mutex_destroy(&vfe->power_lock); 3075 mutex_destroy(&vfe->stream_lock); 3076 3077 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) { 3078 struct v4l2_subdev *sd = &vfe->line[i].subdev; 3079 struct camss_video *video_out = &vfe->line[i].video_out; 3080 3081 msm_video_unregister(video_out); 3082 v4l2_device_unregister_subdev(sd); 3083 media_entity_cleanup(&sd->entity); 3084 } 3085 } 3086