1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * DOC: atomic plane helpers
26  *
27  * The functions here are used by the atomic plane helper functions to
28  * implement legacy plane updates (i.e., drm_plane->update_plane() and
29  * drm_plane->disable_plane()).  This allows plane updates to use the
30  * atomic state infrastructure and perform plane updates as separate
31  * prepare/check/commit/cleanup steps.
32  */
33 
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_plane_helper.h>
37 
38 #include "intel_atomic_plane.h"
39 #include "intel_drv.h"
40 #include "intel_pm.h"
41 #include "intel_sprite.h"
42 
43 struct intel_plane *intel_plane_alloc(void)
44 {
45 	struct intel_plane_state *plane_state;
46 	struct intel_plane *plane;
47 
48 	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
49 	if (!plane)
50 		return ERR_PTR(-ENOMEM);
51 
52 	plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
53 	if (!plane_state) {
54 		kfree(plane);
55 		return ERR_PTR(-ENOMEM);
56 	}
57 
58 	__drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
59 	plane_state->scaler_id = -1;
60 
61 	return plane;
62 }
63 
64 void intel_plane_free(struct intel_plane *plane)
65 {
66 	intel_plane_destroy_state(&plane->base, plane->base.state);
67 	kfree(plane);
68 }
69 
70 /**
71  * intel_plane_duplicate_state - duplicate plane state
72  * @plane: drm plane
73  *
74  * Allocates and returns a copy of the plane state (both common and
75  * Intel-specific) for the specified plane.
76  *
77  * Returns: The newly allocated plane state, or NULL on failure.
78  */
79 struct drm_plane_state *
80 intel_plane_duplicate_state(struct drm_plane *plane)
81 {
82 	struct drm_plane_state *state;
83 	struct intel_plane_state *intel_state;
84 
85 	intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
86 
87 	if (!intel_state)
88 		return NULL;
89 
90 	state = &intel_state->base;
91 
92 	__drm_atomic_helper_plane_duplicate_state(plane, state);
93 
94 	intel_state->vma = NULL;
95 	intel_state->flags = 0;
96 
97 	return state;
98 }
99 
100 /**
101  * intel_plane_destroy_state - destroy plane state
102  * @plane: drm plane
103  * @state: state object to destroy
104  *
105  * Destroys the plane state (both common and Intel-specific) for the
106  * specified plane.
107  */
108 void
109 intel_plane_destroy_state(struct drm_plane *plane,
110 			  struct drm_plane_state *state)
111 {
112 	WARN_ON(to_intel_plane_state(state)->vma);
113 
114 	drm_atomic_helper_plane_destroy_state(plane, state);
115 }
116 
117 unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
118 				   const struct intel_plane_state *plane_state)
119 {
120 	const struct drm_framebuffer *fb = plane_state->base.fb;
121 	unsigned int cpp;
122 
123 	if (!plane_state->base.visible)
124 		return 0;
125 
126 	cpp = fb->format->cpp[0];
127 
128 	/*
129 	 * Based on HSD#:1408715493
130 	 * NV12 cpp == 4, P010 cpp == 8
131 	 *
132 	 * FIXME what is the logic behind this?
133 	 */
134 	if (fb->format->is_yuv && fb->format->num_planes > 1)
135 		cpp *= 4;
136 
137 	return cpp * crtc_state->pixel_rate;
138 }
139 
140 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
141 					struct intel_crtc_state *new_crtc_state,
142 					const struct intel_plane_state *old_plane_state,
143 					struct intel_plane_state *new_plane_state)
144 {
145 	struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
146 	int ret;
147 
148 	new_crtc_state->active_planes &= ~BIT(plane->id);
149 	new_crtc_state->nv12_planes &= ~BIT(plane->id);
150 	new_crtc_state->c8_planes &= ~BIT(plane->id);
151 	new_crtc_state->data_rate[plane->id] = 0;
152 	new_plane_state->base.visible = false;
153 
154 	if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
155 		return 0;
156 
157 	ret = plane->check_plane(new_crtc_state, new_plane_state);
158 	if (ret)
159 		return ret;
160 
161 	/* FIXME pre-g4x don't work like this */
162 	if (new_plane_state->base.visible)
163 		new_crtc_state->active_planes |= BIT(plane->id);
164 
165 	if (new_plane_state->base.visible &&
166 	    is_planar_yuv_format(new_plane_state->base.fb->format->format))
167 		new_crtc_state->nv12_planes |= BIT(plane->id);
168 
169 	if (new_plane_state->base.visible &&
170 	    new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
171 		new_crtc_state->c8_planes |= BIT(plane->id);
172 
173 	if (new_plane_state->base.visible || old_plane_state->base.visible)
174 		new_crtc_state->update_planes |= BIT(plane->id);
175 
176 	new_crtc_state->data_rate[plane->id] =
177 		intel_plane_data_rate(new_crtc_state, new_plane_state);
178 
179 	return intel_plane_atomic_calc_changes(old_crtc_state,
180 					       &new_crtc_state->base,
181 					       old_plane_state,
182 					       &new_plane_state->base);
183 }
184 
185 static int intel_plane_atomic_check(struct drm_plane *plane,
186 				    struct drm_plane_state *new_plane_state)
187 {
188 	struct drm_atomic_state *state = new_plane_state->state;
189 	const struct drm_plane_state *old_plane_state =
190 		drm_atomic_get_old_plane_state(state, plane);
191 	struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
192 	const struct drm_crtc_state *old_crtc_state;
193 	struct drm_crtc_state *new_crtc_state;
194 
195 	new_plane_state->visible = false;
196 	if (!crtc)
197 		return 0;
198 
199 	old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
200 	new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
201 
202 	return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state),
203 						   to_intel_crtc_state(new_crtc_state),
204 						   to_intel_plane_state(old_plane_state),
205 						   to_intel_plane_state(new_plane_state));
206 }
207 
208 static struct intel_plane *
209 skl_next_plane_to_commit(struct intel_atomic_state *state,
210 			 struct intel_crtc *crtc,
211 			 struct skl_ddb_entry entries_y[I915_MAX_PLANES],
212 			 struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
213 			 unsigned int *update_mask)
214 {
215 	struct intel_crtc_state *crtc_state =
216 		intel_atomic_get_new_crtc_state(state, crtc);
217 	struct intel_plane_state *plane_state;
218 	struct intel_plane *plane;
219 	int i;
220 
221 	if (*update_mask == 0)
222 		return NULL;
223 
224 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
225 		enum plane_id plane_id = plane->id;
226 
227 		if (crtc->pipe != plane->pipe ||
228 		    !(*update_mask & BIT(plane_id)))
229 			continue;
230 
231 		if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
232 						entries_y,
233 						I915_MAX_PLANES, plane_id) ||
234 		    skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
235 						entries_uv,
236 						I915_MAX_PLANES, plane_id))
237 			continue;
238 
239 		*update_mask &= ~BIT(plane_id);
240 		entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
241 		entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
242 
243 		return plane;
244 	}
245 
246 	/* should never happen */
247 	WARN_ON(1);
248 
249 	return NULL;
250 }
251 
252 void intel_update_plane(struct intel_plane *plane,
253 			const struct intel_crtc_state *crtc_state,
254 			const struct intel_plane_state *plane_state)
255 {
256 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
257 
258 	trace_intel_update_plane(&plane->base, crtc);
259 	plane->update_plane(plane, crtc_state, plane_state);
260 }
261 
262 void intel_update_slave(struct intel_plane *plane,
263 			const struct intel_crtc_state *crtc_state,
264 			const struct intel_plane_state *plane_state)
265 {
266 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
267 
268 	trace_intel_update_plane(&plane->base, crtc);
269 	plane->update_slave(plane, crtc_state, plane_state);
270 }
271 
272 void intel_disable_plane(struct intel_plane *plane,
273 			 const struct intel_crtc_state *crtc_state)
274 {
275 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
276 
277 	trace_intel_disable_plane(&plane->base, crtc);
278 	plane->disable_plane(plane, crtc_state);
279 }
280 
281 void skl_update_planes_on_crtc(struct intel_atomic_state *state,
282 			       struct intel_crtc *crtc)
283 {
284 	struct intel_crtc_state *old_crtc_state =
285 		intel_atomic_get_old_crtc_state(state, crtc);
286 	struct intel_crtc_state *new_crtc_state =
287 		intel_atomic_get_new_crtc_state(state, crtc);
288 	struct skl_ddb_entry entries_y[I915_MAX_PLANES];
289 	struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
290 	u32 update_mask = new_crtc_state->update_planes;
291 	struct intel_plane *plane;
292 
293 	memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
294 	       sizeof(old_crtc_state->wm.skl.plane_ddb_y));
295 	memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
296 	       sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
297 
298 	while ((plane = skl_next_plane_to_commit(state, crtc,
299 						 entries_y, entries_uv,
300 						 &update_mask))) {
301 		struct intel_plane_state *new_plane_state =
302 			intel_atomic_get_new_plane_state(state, plane);
303 
304 		if (new_plane_state->base.visible) {
305 			intel_update_plane(plane, new_crtc_state, new_plane_state);
306 		} else if (new_plane_state->slave) {
307 			struct intel_plane *master =
308 				new_plane_state->linked_plane;
309 
310 			/*
311 			 * We update the slave plane from this function because
312 			 * programming it from the master plane's update_plane
313 			 * callback runs into issues when the Y plane is
314 			 * reassigned, disabled or used by a different plane.
315 			 *
316 			 * The slave plane is updated with the master plane's
317 			 * plane_state.
318 			 */
319 			new_plane_state =
320 				intel_atomic_get_new_plane_state(state, master);
321 
322 			intel_update_slave(plane, new_crtc_state, new_plane_state);
323 		} else {
324 			intel_disable_plane(plane, new_crtc_state);
325 		}
326 	}
327 }
328 
329 void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
330 				struct intel_crtc *crtc)
331 {
332 	struct intel_crtc_state *new_crtc_state =
333 		intel_atomic_get_new_crtc_state(state, crtc);
334 	u32 update_mask = new_crtc_state->update_planes;
335 	struct intel_plane_state *new_plane_state;
336 	struct intel_plane *plane;
337 	int i;
338 
339 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
340 		if (crtc->pipe != plane->pipe ||
341 		    !(update_mask & BIT(plane->id)))
342 			continue;
343 
344 		if (new_plane_state->base.visible)
345 			intel_update_plane(plane, new_crtc_state, new_plane_state);
346 		else
347 			intel_disable_plane(plane, new_crtc_state);
348 	}
349 }
350 
351 const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
352 	.prepare_fb = intel_prepare_plane_fb,
353 	.cleanup_fb = intel_cleanup_plane_fb,
354 	.atomic_check = intel_plane_atomic_check,
355 };
356