1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 /** 25 * DOC: atomic plane helpers 26 * 27 * The functions here are used by the atomic plane helper functions to 28 * implement legacy plane updates (i.e., drm_plane->update_plane() and 29 * drm_plane->disable_plane()). This allows plane updates to use the 30 * atomic state infrastructure and perform plane updates as separate 31 * prepare/check/commit/cleanup steps. 32 */ 33 34 #include <drm/drm_atomic_helper.h> 35 #include <drm/drm_fourcc.h> 36 #include <drm/drm_plane_helper.h> 37 38 #include "intel_atomic_plane.h" 39 #include "intel_drv.h" 40 #include "intel_pm.h" 41 #include "intel_sprite.h" 42 43 struct intel_plane *intel_plane_alloc(void) 44 { 45 struct intel_plane_state *plane_state; 46 struct intel_plane *plane; 47 48 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 49 if (!plane) 50 return ERR_PTR(-ENOMEM); 51 52 plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL); 53 if (!plane_state) { 54 kfree(plane); 55 return ERR_PTR(-ENOMEM); 56 } 57 58 __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base); 59 plane_state->scaler_id = -1; 60 61 return plane; 62 } 63 64 void intel_plane_free(struct intel_plane *plane) 65 { 66 intel_plane_destroy_state(&plane->base, plane->base.state); 67 kfree(plane); 68 } 69 70 /** 71 * intel_plane_duplicate_state - duplicate plane state 72 * @plane: drm plane 73 * 74 * Allocates and returns a copy of the plane state (both common and 75 * Intel-specific) for the specified plane. 76 * 77 * Returns: The newly allocated plane state, or NULL on failure. 78 */ 79 struct drm_plane_state * 80 intel_plane_duplicate_state(struct drm_plane *plane) 81 { 82 struct drm_plane_state *state; 83 struct intel_plane_state *intel_state; 84 85 intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL); 86 87 if (!intel_state) 88 return NULL; 89 90 state = &intel_state->base; 91 92 __drm_atomic_helper_plane_duplicate_state(plane, state); 93 94 intel_state->vma = NULL; 95 intel_state->flags = 0; 96 97 return state; 98 } 99 100 /** 101 * intel_plane_destroy_state - destroy plane state 102 * @plane: drm plane 103 * @state: state object to destroy 104 * 105 * Destroys the plane state (both common and Intel-specific) for the 106 * specified plane. 107 */ 108 void 109 intel_plane_destroy_state(struct drm_plane *plane, 110 struct drm_plane_state *state) 111 { 112 WARN_ON(to_intel_plane_state(state)->vma); 113 114 drm_atomic_helper_plane_destroy_state(plane, state); 115 } 116 117 unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, 118 const struct intel_plane_state *plane_state) 119 { 120 const struct drm_framebuffer *fb = plane_state->base.fb; 121 unsigned int cpp; 122 123 if (!plane_state->base.visible) 124 return 0; 125 126 cpp = fb->format->cpp[0]; 127 128 /* 129 * Based on HSD#:1408715493 130 * NV12 cpp == 4, P010 cpp == 8 131 * 132 * FIXME what is the logic behind this? 133 */ 134 if (fb->format->is_yuv && fb->format->num_planes > 1) 135 cpp *= 4; 136 137 return cpp * crtc_state->pixel_rate; 138 } 139 140 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, 141 struct intel_crtc_state *new_crtc_state, 142 const struct intel_plane_state *old_plane_state, 143 struct intel_plane_state *new_plane_state) 144 { 145 struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane); 146 int ret; 147 148 new_crtc_state->active_planes &= ~BIT(plane->id); 149 new_crtc_state->nv12_planes &= ~BIT(plane->id); 150 new_crtc_state->c8_planes &= ~BIT(plane->id); 151 new_crtc_state->data_rate[plane->id] = 0; 152 new_plane_state->base.visible = false; 153 154 if (!new_plane_state->base.crtc && !old_plane_state->base.crtc) 155 return 0; 156 157 ret = plane->check_plane(new_crtc_state, new_plane_state); 158 if (ret) 159 return ret; 160 161 /* FIXME pre-g4x don't work like this */ 162 if (new_plane_state->base.visible) 163 new_crtc_state->active_planes |= BIT(plane->id); 164 165 if (new_plane_state->base.visible && 166 is_planar_yuv_format(new_plane_state->base.fb->format->format)) 167 new_crtc_state->nv12_planes |= BIT(plane->id); 168 169 if (new_plane_state->base.visible && 170 new_plane_state->base.fb->format->format == DRM_FORMAT_C8) 171 new_crtc_state->c8_planes |= BIT(plane->id); 172 173 if (new_plane_state->base.visible || old_plane_state->base.visible) 174 new_crtc_state->update_planes |= BIT(plane->id); 175 176 new_crtc_state->data_rate[plane->id] = 177 intel_plane_data_rate(new_crtc_state, new_plane_state); 178 179 return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state, 180 old_plane_state, new_plane_state); 181 } 182 183 static struct intel_crtc * 184 get_crtc_from_states(const struct intel_plane_state *old_plane_state, 185 const struct intel_plane_state *new_plane_state) 186 { 187 if (new_plane_state->base.crtc) 188 return to_intel_crtc(new_plane_state->base.crtc); 189 190 if (old_plane_state->base.crtc) 191 return to_intel_crtc(old_plane_state->base.crtc); 192 193 return NULL; 194 } 195 196 static int intel_plane_atomic_check(struct drm_plane *_plane, 197 struct drm_plane_state *_new_plane_state) 198 { 199 struct intel_plane *plane = to_intel_plane(_plane); 200 struct intel_atomic_state *state = 201 to_intel_atomic_state(_new_plane_state->state); 202 struct intel_plane_state *new_plane_state = 203 to_intel_plane_state(_new_plane_state); 204 const struct intel_plane_state *old_plane_state = 205 intel_atomic_get_old_plane_state(state, plane); 206 struct intel_crtc *crtc = 207 get_crtc_from_states(old_plane_state, new_plane_state); 208 const struct intel_crtc_state *old_crtc_state; 209 struct intel_crtc_state *new_crtc_state; 210 211 new_plane_state->base.visible = false; 212 if (!crtc) 213 return 0; 214 215 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 216 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 217 218 return intel_plane_atomic_check_with_state(old_crtc_state, 219 new_crtc_state, 220 old_plane_state, 221 new_plane_state); 222 } 223 224 static struct intel_plane * 225 skl_next_plane_to_commit(struct intel_atomic_state *state, 226 struct intel_crtc *crtc, 227 struct skl_ddb_entry entries_y[I915_MAX_PLANES], 228 struct skl_ddb_entry entries_uv[I915_MAX_PLANES], 229 unsigned int *update_mask) 230 { 231 struct intel_crtc_state *crtc_state = 232 intel_atomic_get_new_crtc_state(state, crtc); 233 struct intel_plane_state *plane_state; 234 struct intel_plane *plane; 235 int i; 236 237 if (*update_mask == 0) 238 return NULL; 239 240 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 241 enum plane_id plane_id = plane->id; 242 243 if (crtc->pipe != plane->pipe || 244 !(*update_mask & BIT(plane_id))) 245 continue; 246 247 if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id], 248 entries_y, 249 I915_MAX_PLANES, plane_id) || 250 skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id], 251 entries_uv, 252 I915_MAX_PLANES, plane_id)) 253 continue; 254 255 *update_mask &= ~BIT(plane_id); 256 entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id]; 257 entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id]; 258 259 return plane; 260 } 261 262 /* should never happen */ 263 WARN_ON(1); 264 265 return NULL; 266 } 267 268 void intel_update_plane(struct intel_plane *plane, 269 const struct intel_crtc_state *crtc_state, 270 const struct intel_plane_state *plane_state) 271 { 272 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 273 274 trace_intel_update_plane(&plane->base, crtc); 275 plane->update_plane(plane, crtc_state, plane_state); 276 } 277 278 void intel_update_slave(struct intel_plane *plane, 279 const struct intel_crtc_state *crtc_state, 280 const struct intel_plane_state *plane_state) 281 { 282 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 283 284 trace_intel_update_plane(&plane->base, crtc); 285 plane->update_slave(plane, crtc_state, plane_state); 286 } 287 288 void intel_disable_plane(struct intel_plane *plane, 289 const struct intel_crtc_state *crtc_state) 290 { 291 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 292 293 trace_intel_disable_plane(&plane->base, crtc); 294 plane->disable_plane(plane, crtc_state); 295 } 296 297 void skl_update_planes_on_crtc(struct intel_atomic_state *state, 298 struct intel_crtc *crtc) 299 { 300 struct intel_crtc_state *old_crtc_state = 301 intel_atomic_get_old_crtc_state(state, crtc); 302 struct intel_crtc_state *new_crtc_state = 303 intel_atomic_get_new_crtc_state(state, crtc); 304 struct skl_ddb_entry entries_y[I915_MAX_PLANES]; 305 struct skl_ddb_entry entries_uv[I915_MAX_PLANES]; 306 u32 update_mask = new_crtc_state->update_planes; 307 struct intel_plane *plane; 308 309 memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y, 310 sizeof(old_crtc_state->wm.skl.plane_ddb_y)); 311 memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv, 312 sizeof(old_crtc_state->wm.skl.plane_ddb_uv)); 313 314 while ((plane = skl_next_plane_to_commit(state, crtc, 315 entries_y, entries_uv, 316 &update_mask))) { 317 struct intel_plane_state *new_plane_state = 318 intel_atomic_get_new_plane_state(state, plane); 319 320 if (new_plane_state->base.visible) { 321 intel_update_plane(plane, new_crtc_state, new_plane_state); 322 } else if (new_plane_state->slave) { 323 struct intel_plane *master = 324 new_plane_state->linked_plane; 325 326 /* 327 * We update the slave plane from this function because 328 * programming it from the master plane's update_plane 329 * callback runs into issues when the Y plane is 330 * reassigned, disabled or used by a different plane. 331 * 332 * The slave plane is updated with the master plane's 333 * plane_state. 334 */ 335 new_plane_state = 336 intel_atomic_get_new_plane_state(state, master); 337 338 intel_update_slave(plane, new_crtc_state, new_plane_state); 339 } else { 340 intel_disable_plane(plane, new_crtc_state); 341 } 342 } 343 } 344 345 void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, 346 struct intel_crtc *crtc) 347 { 348 struct intel_crtc_state *new_crtc_state = 349 intel_atomic_get_new_crtc_state(state, crtc); 350 u32 update_mask = new_crtc_state->update_planes; 351 struct intel_plane_state *new_plane_state; 352 struct intel_plane *plane; 353 int i; 354 355 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { 356 if (crtc->pipe != plane->pipe || 357 !(update_mask & BIT(plane->id))) 358 continue; 359 360 if (new_plane_state->base.visible) 361 intel_update_plane(plane, new_crtc_state, new_plane_state); 362 else 363 intel_disable_plane(plane, new_crtc_state); 364 } 365 } 366 367 const struct drm_plane_helper_funcs intel_plane_helper_funcs = { 368 .prepare_fb = intel_prepare_plane_fb, 369 .cleanup_fb = intel_cleanup_plane_fb, 370 .atomic_check = intel_plane_atomic_check, 371 }; 372