1 /* 2 * Copyright (C) STMicroelectronics SA 2014 3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> 4 * Fabien Dessenne <fabien.dessenne@st.com> 5 * for STMicroelectronics. 6 * License terms: GNU General Public License (GPL), version 2 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/dma-mapping.h> 11 12 #include "sti_gdp.h" 13 #include "sti_layer.h" 14 #include "sti_vtg.h" 15 16 #define ENA_COLOR_FILL BIT(8) 17 #define WAIT_NEXT_VSYNC BIT(31) 18 19 /* GDP color formats */ 20 #define GDP_RGB565 0x00 21 #define GDP_RGB888 0x01 22 #define GDP_RGB888_32 0x02 23 #define GDP_ARGB8565 0x04 24 #define GDP_ARGB8888 0x05 25 #define GDP_ARGB1555 0x06 26 #define GDP_ARGB4444 0x07 27 #define GDP_CLUT8 0x0B 28 #define GDP_YCBR888 0x10 29 #define GDP_YCBR422R 0x12 30 #define GDP_AYCBR8888 0x15 31 32 #define GAM_GDP_CTL_OFFSET 0x00 33 #define GAM_GDP_AGC_OFFSET 0x04 34 #define GAM_GDP_VPO_OFFSET 0x0C 35 #define GAM_GDP_VPS_OFFSET 0x10 36 #define GAM_GDP_PML_OFFSET 0x14 37 #define GAM_GDP_PMP_OFFSET 0x18 38 #define GAM_GDP_SIZE_OFFSET 0x1C 39 #define GAM_GDP_NVN_OFFSET 0x24 40 #define GAM_GDP_KEY1_OFFSET 0x28 41 #define GAM_GDP_KEY2_OFFSET 0x2C 42 #define GAM_GDP_PPT_OFFSET 0x34 43 #define GAM_GDP_CML_OFFSET 0x3C 44 #define GAM_GDP_MST_OFFSET 0x68 45 46 #define GAM_GDP_ALPHARANGE_255 BIT(5) 47 #define GAM_GDP_AGC_FULL_RANGE 0x00808080 48 #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0)) 49 #define GAM_GDP_SIZE_MAX 0x7FF 50 51 #define GDP_NODE_NB_BANK 2 52 #define GDP_NODE_PER_FIELD 2 53 54 struct sti_gdp_node { 55 u32 gam_gdp_ctl; 56 u32 gam_gdp_agc; 57 u32 reserved1; 58 u32 gam_gdp_vpo; 59 u32 gam_gdp_vps; 60 u32 gam_gdp_pml; 61 u32 gam_gdp_pmp; 62 u32 gam_gdp_size; 63 u32 reserved2; 64 u32 gam_gdp_nvn; 65 u32 gam_gdp_key1; 66 u32 gam_gdp_key2; 67 u32 reserved3; 68 u32 gam_gdp_ppt; 69 u32 reserved4; 70 u32 gam_gdp_cml; 71 }; 72 73 struct sti_gdp_node_list { 74 struct sti_gdp_node *top_field; 75 struct sti_gdp_node *btm_field; 76 }; 77 78 /** 79 * STI GDP structure 80 * 81 * @layer: layer structure 82 * @clk_pix: pixel clock for the current gdp 83 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification 84 * @is_curr_top: true if the current node processed is the top field 85 * @node_list: array of node list 86 */ 87 struct sti_gdp { 88 struct sti_layer layer; 89 struct clk *clk_pix; 90 struct notifier_block vtg_field_nb; 91 bool is_curr_top; 92 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK]; 93 }; 94 95 #define to_sti_gdp(x) container_of(x, struct sti_gdp, layer) 96 97 static const uint32_t gdp_supported_formats[] = { 98 DRM_FORMAT_XRGB8888, 99 DRM_FORMAT_ARGB8888, 100 DRM_FORMAT_ARGB4444, 101 DRM_FORMAT_ARGB1555, 102 DRM_FORMAT_RGB565, 103 DRM_FORMAT_RGB888, 104 DRM_FORMAT_AYUV, 105 DRM_FORMAT_YUV444, 106 DRM_FORMAT_VYUY, 107 DRM_FORMAT_C8, 108 }; 109 110 static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer) 111 { 112 return gdp_supported_formats; 113 } 114 115 static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer) 116 { 117 return ARRAY_SIZE(gdp_supported_formats); 118 } 119 120 static int sti_gdp_fourcc2format(int fourcc) 121 { 122 switch (fourcc) { 123 case DRM_FORMAT_XRGB8888: 124 return GDP_RGB888_32; 125 case DRM_FORMAT_ARGB8888: 126 return GDP_ARGB8888; 127 case DRM_FORMAT_ARGB4444: 128 return GDP_ARGB4444; 129 case DRM_FORMAT_ARGB1555: 130 return GDP_ARGB1555; 131 case DRM_FORMAT_RGB565: 132 return GDP_RGB565; 133 case DRM_FORMAT_RGB888: 134 return GDP_RGB888; 135 case DRM_FORMAT_AYUV: 136 return GDP_AYCBR8888; 137 case DRM_FORMAT_YUV444: 138 return GDP_YCBR888; 139 case DRM_FORMAT_VYUY: 140 return GDP_YCBR422R; 141 case DRM_FORMAT_C8: 142 return GDP_CLUT8; 143 } 144 return -1; 145 } 146 147 static int sti_gdp_get_alpharange(int format) 148 { 149 switch (format) { 150 case GDP_ARGB8565: 151 case GDP_ARGB8888: 152 case GDP_AYCBR8888: 153 return GAM_GDP_ALPHARANGE_255; 154 } 155 return 0; 156 } 157 158 /** 159 * sti_gdp_get_free_nodes 160 * @layer: gdp layer 161 * 162 * Look for a GDP node list that is not currently read by the HW. 163 * 164 * RETURNS: 165 * Pointer to the free GDP node list 166 */ 167 static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) 168 { 169 int hw_nvn; 170 void *virt_nvn; 171 struct sti_gdp *gdp = to_sti_gdp(layer); 172 unsigned int i; 173 174 hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); 175 if (!hw_nvn) 176 goto end; 177 178 virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn); 179 180 for (i = 0; i < GDP_NODE_NB_BANK; i++) 181 if ((virt_nvn != gdp->node_list[i].btm_field) && 182 (virt_nvn != gdp->node_list[i].top_field)) 183 return &gdp->node_list[i]; 184 185 end: 186 return &gdp->node_list[0]; 187 } 188 189 /** 190 * sti_gdp_get_current_nodes 191 * @layer: GDP layer 192 * 193 * Look for GDP nodes that are currently read by the HW. 194 * 195 * RETURNS: 196 * Pointer to the current GDP node list 197 */ 198 static 199 struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) 200 { 201 int hw_nvn; 202 void *virt_nvn; 203 struct sti_gdp *gdp = to_sti_gdp(layer); 204 unsigned int i; 205 206 hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); 207 if (!hw_nvn) 208 goto end; 209 210 virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn); 211 212 for (i = 0; i < GDP_NODE_NB_BANK; i++) 213 if ((virt_nvn == gdp->node_list[i].btm_field) || 214 (virt_nvn == gdp->node_list[i].top_field)) 215 return &gdp->node_list[i]; 216 217 end: 218 return NULL; 219 } 220 221 /** 222 * sti_gdp_prepare_layer 223 * @lay: gdp layer 224 * @first_prepare: true if it is the first time this function is called 225 * 226 * Update the free GDP node list according to the layer properties. 227 * 228 * RETURNS: 229 * 0 on success. 230 */ 231 static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare) 232 { 233 struct sti_gdp_node_list *list; 234 struct sti_gdp_node *top_field, *btm_field; 235 struct drm_display_mode *mode = layer->mode; 236 struct device *dev = layer->dev; 237 struct sti_gdp *gdp = to_sti_gdp(layer); 238 int format; 239 unsigned int depth, bpp; 240 int rate = mode->clock * 1000; 241 int res; 242 u32 ydo, xdo, yds, xds; 243 244 list = sti_gdp_get_free_nodes(layer); 245 top_field = list->top_field; 246 btm_field = list->btm_field; 247 248 /* Build the top field from layer params */ 249 top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE; 250 top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC; 251 format = sti_gdp_fourcc2format(layer->format); 252 if (format == -1) { 253 DRM_ERROR("Format not supported by GDP %.4s\n", 254 (char *)&layer->format); 255 return 1; 256 } 257 top_field->gam_gdp_ctl |= format; 258 top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format); 259 top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE; 260 261 /* pixel memory location */ 262 drm_fb_get_bpp_depth(layer->format, &depth, &bpp); 263 top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0]; 264 top_field->gam_gdp_pml += layer->src_x * (bpp >> 3); 265 top_field->gam_gdp_pml += layer->src_y * layer->pitches[0]; 266 267 /* input parameters */ 268 top_field->gam_gdp_pmp = layer->pitches[0]; 269 top_field->gam_gdp_size = 270 clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 | 271 clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX); 272 273 /* output parameters */ 274 ydo = sti_vtg_get_line_number(*mode, layer->dst_y); 275 yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1); 276 xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x); 277 xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1); 278 top_field->gam_gdp_vpo = (ydo << 16) | xdo; 279 top_field->gam_gdp_vps = (yds << 16) | xds; 280 281 /* Same content and chained together */ 282 memcpy(btm_field, top_field, sizeof(*btm_field)); 283 top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field); 284 btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field); 285 286 /* Interlaced mode */ 287 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) 288 btm_field->gam_gdp_pml = top_field->gam_gdp_pml + 289 layer->pitches[0]; 290 291 if (first_prepare) { 292 /* Set and enable gdp clock */ 293 if (gdp->clk_pix) { 294 res = clk_set_rate(gdp->clk_pix, rate); 295 if (res < 0) { 296 DRM_ERROR("Cannot set rate (%dHz) for gdp\n", 297 rate); 298 return 1; 299 } 300 301 if (clk_prepare_enable(gdp->clk_pix)) { 302 DRM_ERROR("Failed to prepare/enable gdp\n"); 303 return 1; 304 } 305 } 306 } 307 308 return 0; 309 } 310 311 /** 312 * sti_gdp_commit_layer 313 * @lay: gdp layer 314 * 315 * Update the NVN field of the 'right' field of the current GDP node (being 316 * used by the HW) with the address of the updated ('free') top field GDP node. 317 * - In interlaced mode the 'right' field is the bottom field as we update 318 * frames starting from their top field 319 * - In progressive mode, we update both bottom and top fields which are 320 * equal nodes. 321 * At the next VSYNC, the updated node list will be used by the HW. 322 * 323 * RETURNS: 324 * 0 on success. 325 */ 326 static int sti_gdp_commit_layer(struct sti_layer *layer) 327 { 328 struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer); 329 struct sti_gdp_node *updated_top_node = updated_list->top_field; 330 struct sti_gdp_node *updated_btm_node = updated_list->btm_field; 331 struct sti_gdp *gdp = to_sti_gdp(layer); 332 u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node); 333 u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node); 334 struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer); 335 336 dev_dbg(layer->dev, "Current NVN:0x%X\n", 337 readl(layer->regs + GAM_GDP_NVN_OFFSET)); 338 dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n", 339 (unsigned long)layer->paddr, 340 readl(layer->regs + GAM_GDP_PML_OFFSET)); 341 342 if (curr_list == NULL) { 343 /* First update or invalid node should directly write in the 344 * hw register */ 345 writel(gdp->is_curr_top == true ? 346 dma_updated_btm : dma_updated_top, 347 layer->regs + GAM_GDP_NVN_OFFSET); 348 return 0; 349 } 350 351 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) { 352 if (gdp->is_curr_top == true) { 353 /* Do not update in the middle of the frame, but 354 * postpone the update after the bottom field has 355 * been displayed */ 356 curr_list->btm_field->gam_gdp_nvn = dma_updated_top; 357 } else { 358 /* Direct update to avoid one frame delay */ 359 writel(dma_updated_top, 360 layer->regs + GAM_GDP_NVN_OFFSET); 361 } 362 } else { 363 /* Direct update for progressive to avoid one frame delay */ 364 writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET); 365 } 366 367 return 0; 368 } 369 370 /** 371 * sti_gdp_disable_layer 372 * @lay: gdp layer 373 * 374 * Disable a GDP. 375 * 376 * RETURNS: 377 * 0 on success. 378 */ 379 static int sti_gdp_disable_layer(struct sti_layer *layer) 380 { 381 unsigned int i; 382 struct sti_gdp *gdp = to_sti_gdp(layer); 383 384 /* Set the nodes as 'to be ignored on mixer' */ 385 for (i = 0; i < GDP_NODE_NB_BANK; i++) { 386 gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE; 387 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE; 388 } 389 390 if (gdp->clk_pix) 391 clk_disable_unprepare(gdp->clk_pix); 392 393 return 0; 394 } 395 396 /** 397 * sti_gdp_field_cb 398 * @nb: notifier block 399 * @event: event message 400 * @data: private data 401 * 402 * Handle VTG top field and bottom field event. 403 * 404 * RETURNS: 405 * 0 on success. 406 */ 407 int sti_gdp_field_cb(struct notifier_block *nb, 408 unsigned long event, void *data) 409 { 410 struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb); 411 412 switch (event) { 413 case VTG_TOP_FIELD_EVENT: 414 gdp->is_curr_top = true; 415 break; 416 case VTG_BOTTOM_FIELD_EVENT: 417 gdp->is_curr_top = false; 418 break; 419 default: 420 DRM_ERROR("unsupported event: %lu\n", event); 421 break; 422 } 423 424 return 0; 425 } 426 427 static void sti_gdp_init(struct sti_layer *layer) 428 { 429 struct sti_gdp *gdp = to_sti_gdp(layer); 430 struct device_node *np = layer->dev->of_node; 431 dma_addr_t dma; 432 void *base; 433 unsigned int i, size; 434 435 /* Allocate all the nodes within a single memory page */ 436 size = sizeof(struct sti_gdp_node) * 437 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK; 438 439 base = dma_alloc_writecombine(layer->dev, 440 size, &dma, GFP_KERNEL | GFP_DMA); 441 if (!base) { 442 DRM_ERROR("Failed to allocate memory for GDP node\n"); 443 return; 444 } 445 memset(base, 0, size); 446 447 for (i = 0; i < GDP_NODE_NB_BANK; i++) { 448 if (virt_to_dma(layer->dev, base) & 0xF) { 449 DRM_ERROR("Mem alignment failed\n"); 450 return; 451 } 452 gdp->node_list[i].top_field = base; 453 DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base); 454 base += sizeof(struct sti_gdp_node); 455 456 if (virt_to_dma(layer->dev, base) & 0xF) { 457 DRM_ERROR("Mem alignment failed\n"); 458 return; 459 } 460 gdp->node_list[i].btm_field = base; 461 DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base); 462 base += sizeof(struct sti_gdp_node); 463 } 464 465 if (of_device_is_compatible(np, "st,stih407-compositor")) { 466 /* GDP of STiH407 chip have its own pixel clock */ 467 char *clk_name; 468 469 switch (layer->desc) { 470 case STI_GDP_0: 471 clk_name = "pix_gdp1"; 472 break; 473 case STI_GDP_1: 474 clk_name = "pix_gdp2"; 475 break; 476 case STI_GDP_2: 477 clk_name = "pix_gdp3"; 478 break; 479 case STI_GDP_3: 480 clk_name = "pix_gdp4"; 481 break; 482 default: 483 DRM_ERROR("GDP id not recognized\n"); 484 return; 485 } 486 487 gdp->clk_pix = devm_clk_get(layer->dev, clk_name); 488 if (IS_ERR(gdp->clk_pix)) 489 DRM_ERROR("Cannot get %s clock\n", clk_name); 490 } 491 } 492 493 static const struct sti_layer_funcs gdp_ops = { 494 .get_formats = sti_gdp_get_formats, 495 .get_nb_formats = sti_gdp_get_nb_formats, 496 .init = sti_gdp_init, 497 .prepare = sti_gdp_prepare_layer, 498 .commit = sti_gdp_commit_layer, 499 .disable = sti_gdp_disable_layer, 500 }; 501 502 struct sti_layer *sti_gdp_create(struct device *dev, int id) 503 { 504 struct sti_gdp *gdp; 505 506 gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL); 507 if (!gdp) { 508 DRM_ERROR("Failed to allocate memory for GDP\n"); 509 return NULL; 510 } 511 512 gdp->layer.ops = &gdp_ops; 513 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb; 514 515 return (struct sti_layer *)gdp; 516 } 517