1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "wndw.h"
23 #include "wimm.h"
24 
25 #include <nvif/class.h>
26 #include <nvif/cl0002.h>
27 
28 #include <drm/drm_atomic_helper.h>
29 #include "nouveau_bo.h"
30 
31 static void
32 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
33 {
34 	nvif_object_fini(&ctxdma->object);
35 	list_del(&ctxdma->head);
36 	kfree(ctxdma);
37 }
38 
39 static struct nv50_wndw_ctxdma *
40 nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
41 {
42 	struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
43 	struct nv50_wndw_ctxdma *ctxdma;
44 	const u8    kind = fb->nvbo->kind;
45 	const u32 handle = 0xfb000000 | kind;
46 	struct {
47 		struct nv_dma_v0 base;
48 		union {
49 			struct nv50_dma_v0 nv50;
50 			struct gf100_dma_v0 gf100;
51 			struct gf119_dma_v0 gf119;
52 		};
53 	} args = {};
54 	u32 argc = sizeof(args.base);
55 	int ret;
56 
57 	list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
58 		if (ctxdma->object.handle == handle)
59 			return ctxdma;
60 	}
61 
62 	if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
63 		return ERR_PTR(-ENOMEM);
64 	list_add(&ctxdma->head, &wndw->ctxdma.list);
65 
66 	args.base.target = NV_DMA_V0_TARGET_VRAM;
67 	args.base.access = NV_DMA_V0_ACCESS_RDWR;
68 	args.base.start  = 0;
69 	args.base.limit  = drm->client.device.info.ram_user - 1;
70 
71 	if (drm->client.device.info.chipset < 0x80) {
72 		args.nv50.part = NV50_DMA_V0_PART_256;
73 		argc += sizeof(args.nv50);
74 	} else
75 	if (drm->client.device.info.chipset < 0xc0) {
76 		args.nv50.part = NV50_DMA_V0_PART_256;
77 		args.nv50.kind = kind;
78 		argc += sizeof(args.nv50);
79 	} else
80 	if (drm->client.device.info.chipset < 0xd0) {
81 		args.gf100.kind = kind;
82 		argc += sizeof(args.gf100);
83 	} else {
84 		args.gf119.page = GF119_DMA_V0_PAGE_LP;
85 		args.gf119.kind = kind;
86 		argc += sizeof(args.gf119);
87 	}
88 
89 	ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
90 			       &args, argc, &ctxdma->object);
91 	if (ret) {
92 		nv50_wndw_ctxdma_del(ctxdma);
93 		return ERR_PTR(ret);
94 	}
95 
96 	return ctxdma;
97 }
98 
99 int
100 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
101 {
102 	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
103 	if (asyw->set.ntfy) {
104 		return wndw->func->ntfy_wait_begun(disp->sync,
105 						   asyw->ntfy.offset,
106 						   wndw->wndw.base.device);
107 	}
108 	return 0;
109 }
110 
111 void
112 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
113 		    struct nv50_wndw_atom *asyw)
114 {
115 	union nv50_wndw_atom_mask clr = {
116 		.mask = asyw->clr.mask & ~(flush ? 0 : asyw->set.mask),
117 	};
118 	if (clr.sema ) wndw->func-> sema_clr(wndw);
119 	if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
120 	if (clr.xlut ) wndw->func-> xlut_clr(wndw);
121 	if (clr.image) wndw->func->image_clr(wndw);
122 
123 	interlock[wndw->interlock.type] |= wndw->interlock.data;
124 }
125 
126 void
127 nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
128 		    struct nv50_wndw_atom *asyw)
129 {
130 	if (interlock) {
131 		asyw->image.mode = 0;
132 		asyw->image.interval = 1;
133 	}
134 
135 	if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
136 	if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
137 	if (asyw->set.image) wndw->func->image_set(wndw, asyw);
138 
139 	if (asyw->set.xlut ) {
140 		if (asyw->ilut) {
141 			asyw->xlut.i.offset =
142 				nv50_lut_load(&wndw->ilut,
143 					      asyw->xlut.i.mode <= 1,
144 					      asyw->xlut.i.buffer,
145 					      asyw->ilut);
146 		}
147 		wndw->func->xlut_set(wndw, asyw);
148 	}
149 
150 	if (asyw->set.scale) wndw->func->scale_set(wndw, asyw);
151 	if (asyw->set.point) {
152 		if (asyw->set.point = false, asyw->set.mask)
153 			interlock[wndw->interlock.type] |= wndw->interlock.data;
154 		interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data;
155 
156 		wndw->immd->point(wndw, asyw);
157 		wndw->immd->update(wndw, interlock);
158 	} else {
159 		interlock[wndw->interlock.type] |= wndw->interlock.data;
160 	}
161 }
162 
163 void
164 nv50_wndw_ntfy_enable(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
165 {
166 	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
167 
168 	asyw->ntfy.handle = wndw->wndw.sync.handle;
169 	asyw->ntfy.offset = wndw->ntfy;
170 	asyw->ntfy.awaken = false;
171 	asyw->set.ntfy = true;
172 
173 	wndw->func->ntfy_reset(disp->sync, wndw->ntfy);
174 	wndw->ntfy ^= 0x10;
175 }
176 
177 static void
178 nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
179 			       struct nv50_wndw_atom *asyw,
180 			       struct nv50_head_atom *asyh)
181 {
182 	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
183 	NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
184 	wndw->func->release(wndw, asyw, asyh);
185 	asyw->ntfy.handle = 0;
186 	asyw->sema.handle = 0;
187 }
188 
189 static int
190 nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom *asyw)
191 {
192 	switch (asyw->state.fb->format->format) {
193 	case DRM_FORMAT_YUYV: asyw->image.format = 0x28; break;
194 	case DRM_FORMAT_UYVY: asyw->image.format = 0x29; break;
195 	default:
196 		WARN_ON(1);
197 		return -EINVAL;
198 	}
199 	asyw->image.colorspace = 1;
200 	return 0;
201 }
202 
203 static int
204 nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
205 {
206 	switch (asyw->state.fb->format->format) {
207 	case DRM_FORMAT_C8         : asyw->image.format = 0x1e; break;
208 	case DRM_FORMAT_XRGB8888   :
209 	case DRM_FORMAT_ARGB8888   : asyw->image.format = 0xcf; break;
210 	case DRM_FORMAT_RGB565     : asyw->image.format = 0xe8; break;
211 	case DRM_FORMAT_XRGB1555   :
212 	case DRM_FORMAT_ARGB1555   : asyw->image.format = 0xe9; break;
213 	case DRM_FORMAT_XBGR2101010:
214 	case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
215 	case DRM_FORMAT_XBGR8888   :
216 	case DRM_FORMAT_ABGR8888   : asyw->image.format = 0xd5; break;
217 	case DRM_FORMAT_XRGB2101010:
218 	case DRM_FORMAT_ARGB2101010: asyw->image.format = 0xdf; break;
219 	default:
220 		return -EINVAL;
221 	}
222 	asyw->image.colorspace = 0;
223 	return 0;
224 }
225 
226 static int
227 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
228 			       struct nv50_wndw_atom *armw,
229 			       struct nv50_wndw_atom *asyw,
230 			       struct nv50_head_atom *asyh)
231 {
232 	struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
233 	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
234 	int ret;
235 
236 	NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
237 
238 	if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
239 		asyw->image.w = fb->base.width;
240 		asyw->image.h = fb->base.height;
241 		asyw->image.kind = fb->nvbo->kind;
242 
243 		ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
244 		if (ret) {
245 			ret = nv50_wndw_atomic_check_acquire_yuv(asyw);
246 			if (ret)
247 				return ret;
248 		}
249 
250 		if (asyw->image.kind) {
251 			asyw->image.layout = 0;
252 			if (drm->client.device.info.chipset >= 0xc0)
253 				asyw->image.blockh = fb->nvbo->mode >> 4;
254 			else
255 				asyw->image.blockh = fb->nvbo->mode;
256 			asyw->image.blocks[0] = fb->base.pitches[0] / 64;
257 			asyw->image.pitch[0] = 0;
258 		} else {
259 			asyw->image.layout = 1;
260 			asyw->image.blockh = 0;
261 			asyw->image.blocks[0] = 0;
262 			asyw->image.pitch[0] = fb->base.pitches[0];
263 		}
264 
265 		if (!(asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC))
266 			asyw->image.interval = 1;
267 		else
268 			asyw->image.interval = 0;
269 		asyw->image.mode = asyw->image.interval ? 0 : 1;
270 		asyw->set.image = wndw->func->image_set != NULL;
271 	}
272 
273 	if (wndw->func->scale_set) {
274 		asyw->scale.sx = asyw->state.src_x >> 16;
275 		asyw->scale.sy = asyw->state.src_y >> 16;
276 		asyw->scale.sw = asyw->state.src_w >> 16;
277 		asyw->scale.sh = asyw->state.src_h >> 16;
278 		asyw->scale.dw = asyw->state.crtc_w;
279 		asyw->scale.dh = asyw->state.crtc_h;
280 		if (memcmp(&armw->scale, &asyw->scale, sizeof(asyw->scale)))
281 			asyw->set.scale = true;
282 	}
283 
284 	if (wndw->immd) {
285 		asyw->point.x = asyw->state.crtc_x;
286 		asyw->point.y = asyw->state.crtc_y;
287 		if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
288 			asyw->set.point = true;
289 	}
290 
291 	return wndw->func->acquire(wndw, asyw, asyh);
292 }
293 
294 static void
295 nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
296 			   struct nv50_wndw_atom *armw,
297 			   struct nv50_wndw_atom *asyw,
298 			   struct nv50_head_atom *asyh)
299 {
300 	struct drm_property_blob *ilut = asyh->state.degamma_lut;
301 
302 	/* I8 format without an input LUT makes no sense, and the
303 	 * HW error-checks for this.
304 	 *
305 	 * In order to handle legacy gamma, when there's no input
306 	 * LUT we need to steal the output LUT and use it instead.
307 	 */
308 	if (!ilut && asyw->state.fb->format->format == DRM_FORMAT_C8) {
309 		/* This should be an error, but there's legacy clients
310 		 * that do a modeset before providing a gamma table.
311 		 *
312 		 * We keep the window disabled to avoid angering HW.
313 		 */
314 		if (!(ilut = asyh->state.gamma_lut)) {
315 			asyw->visible = false;
316 			return;
317 		}
318 
319 		if (wndw->func->ilut)
320 			asyh->wndw.olut |= BIT(wndw->id);
321 	} else {
322 		asyh->wndw.olut &= ~BIT(wndw->id);
323 	}
324 
325 	/* Recalculate LUT state. */
326 	memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
327 	if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
328 		wndw->func->ilut(wndw, asyw);
329 		asyw->xlut.handle = wndw->wndw.vram.handle;
330 		asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
331 		asyw->set.xlut = true;
332 	}
333 
334 	/* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
335 	if (wndw->func->olut_core &&
336 	    (!armw->visible || (armw->xlut.handle && !asyw->xlut.handle)))
337 		asyw->set.xlut = true;
338 
339 	/* Can't do an immediate flip while changing the LUT. */
340 	asyh->state.pageflip_flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
341 }
342 
343 static int
344 nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
345 {
346 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
347 	struct nv50_wndw *wndw = nv50_wndw(plane);
348 	struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
349 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
350 	struct nv50_head_atom *harm = NULL, *asyh = NULL;
351 	bool modeset = false;
352 	int ret;
353 
354 	NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
355 
356 	/* Fetch the assembly state for the head the window will belong to,
357 	 * and determine whether the window will be visible.
358 	 */
359 	if (asyw->state.crtc) {
360 		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
361 		if (IS_ERR(asyh))
362 			return PTR_ERR(asyh);
363 		modeset = drm_atomic_crtc_needs_modeset(&asyh->state);
364 		asyw->visible = asyh->state.active;
365 	} else {
366 		asyw->visible = false;
367 	}
368 
369 	/* Fetch assembly state for the head the window used to belong to. */
370 	if (armw->state.crtc) {
371 		harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
372 		if (IS_ERR(harm))
373 			return PTR_ERR(harm);
374 	}
375 
376 	/* LUT configuration can potentially cause the window to be disabled. */
377 	if (asyw->visible && wndw->func->xlut_set &&
378 	    (!armw->visible ||
379 	     asyh->state.color_mgmt_changed ||
380 	     asyw->state.fb->format->format !=
381 	     armw->state.fb->format->format))
382 		nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
383 
384 	/* Calculate new window state. */
385 	if (asyw->visible) {
386 		ret = nv50_wndw_atomic_check_acquire(wndw, modeset,
387 						     armw, asyw, asyh);
388 		if (ret)
389 			return ret;
390 
391 		asyh->wndw.mask |= BIT(wndw->id);
392 	} else
393 	if (armw->visible) {
394 		nv50_wndw_atomic_check_release(wndw, asyw, harm);
395 		harm->wndw.mask &= ~BIT(wndw->id);
396 	} else {
397 		return 0;
398 	}
399 
400 	/* Aside from the obvious case where the window is actively being
401 	 * disabled, we might also need to temporarily disable the window
402 	 * when performing certain modeset operations.
403 	 */
404 	if (!asyw->visible || modeset) {
405 		asyw->clr.ntfy = armw->ntfy.handle != 0;
406 		asyw->clr.sema = armw->sema.handle != 0;
407 		asyw->clr.xlut = armw->xlut.handle != 0;
408 		if (wndw->func->image_clr)
409 			asyw->clr.image = armw->image.handle[0] != 0;
410 	}
411 
412 	return 0;
413 }
414 
415 static void
416 nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
417 {
418 	struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
419 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
420 
421 	NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
422 	if (!old_state->fb)
423 		return;
424 
425 	nouveau_bo_unpin(fb->nvbo);
426 }
427 
428 static int
429 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
430 {
431 	struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
432 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
433 	struct nv50_wndw *wndw = nv50_wndw(plane);
434 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
435 	struct nv50_head_atom *asyh;
436 	struct nv50_wndw_ctxdma *ctxdma;
437 	int ret;
438 
439 	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
440 	if (!asyw->state.fb)
441 		return 0;
442 
443 	ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
444 	if (ret)
445 		return ret;
446 
447 	if (wndw->ctxdma.parent) {
448 		ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
449 		if (IS_ERR(ctxdma)) {
450 			nouveau_bo_unpin(fb->nvbo);
451 			return PTR_ERR(ctxdma);
452 		}
453 
454 		asyw->image.handle[0] = ctxdma->object.handle;
455 	}
456 
457 	asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
458 	asyw->image.offset[0] = fb->nvbo->bo.offset;
459 
460 	if (wndw->func->prepare) {
461 		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
462 		if (IS_ERR(asyh))
463 			return PTR_ERR(asyh);
464 
465 		wndw->func->prepare(wndw, asyh, asyw);
466 	}
467 
468 	return 0;
469 }
470 
471 static const struct drm_plane_helper_funcs
472 nv50_wndw_helper = {
473 	.prepare_fb = nv50_wndw_prepare_fb,
474 	.cleanup_fb = nv50_wndw_cleanup_fb,
475 	.atomic_check = nv50_wndw_atomic_check,
476 };
477 
478 static void
479 nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
480 			       struct drm_plane_state *state)
481 {
482 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
483 	__drm_atomic_helper_plane_destroy_state(&asyw->state);
484 	kfree(asyw);
485 }
486 
487 static struct drm_plane_state *
488 nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
489 {
490 	struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
491 	struct nv50_wndw_atom *asyw;
492 	if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
493 		return NULL;
494 	__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
495 	asyw->sema = armw->sema;
496 	asyw->ntfy = armw->ntfy;
497 	asyw->ilut = NULL;
498 	asyw->xlut = armw->xlut;
499 	asyw->image = armw->image;
500 	asyw->point = armw->point;
501 	asyw->clr.mask = 0;
502 	asyw->set.mask = 0;
503 	return &asyw->state;
504 }
505 
506 static void
507 nv50_wndw_reset(struct drm_plane *plane)
508 {
509 	struct nv50_wndw_atom *asyw;
510 
511 	if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
512 		return;
513 
514 	if (plane->state)
515 		plane->funcs->atomic_destroy_state(plane, plane->state);
516 	plane->state = &asyw->state;
517 	plane->state->plane = plane;
518 	plane->state->rotation = DRM_MODE_ROTATE_0;
519 }
520 
521 static void
522 nv50_wndw_destroy(struct drm_plane *plane)
523 {
524 	struct nv50_wndw *wndw = nv50_wndw(plane);
525 	struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
526 
527 	list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
528 		nv50_wndw_ctxdma_del(ctxdma);
529 	}
530 
531 	nvif_notify_fini(&wndw->notify);
532 	nv50_dmac_destroy(&wndw->wimm);
533 	nv50_dmac_destroy(&wndw->wndw);
534 
535 	nv50_lut_fini(&wndw->ilut);
536 
537 	drm_plane_cleanup(&wndw->plane);
538 	kfree(wndw);
539 }
540 
541 const struct drm_plane_funcs
542 nv50_wndw = {
543 	.update_plane = drm_atomic_helper_update_plane,
544 	.disable_plane = drm_atomic_helper_disable_plane,
545 	.destroy = nv50_wndw_destroy,
546 	.reset = nv50_wndw_reset,
547 	.atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
548 	.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
549 };
550 
551 static int
552 nv50_wndw_notify(struct nvif_notify *notify)
553 {
554 	return NVIF_NOTIFY_KEEP;
555 }
556 
557 void
558 nv50_wndw_fini(struct nv50_wndw *wndw)
559 {
560 	nvif_notify_put(&wndw->notify);
561 }
562 
563 void
564 nv50_wndw_init(struct nv50_wndw *wndw)
565 {
566 	nvif_notify_get(&wndw->notify);
567 }
568 
569 int
570 nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
571 	       enum drm_plane_type type, const char *name, int index,
572 	       const u32 *format, u32 heads,
573 	       enum nv50_disp_interlock_type interlock_type, u32 interlock_data,
574 	       struct nv50_wndw **pwndw)
575 {
576 	struct nouveau_drm *drm = nouveau_drm(dev);
577 	struct nvif_mmu *mmu = &drm->client.mmu;
578 	struct nv50_disp *disp = nv50_disp(dev);
579 	struct nv50_wndw *wndw;
580 	int nformat;
581 	int ret;
582 
583 	if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
584 		return -ENOMEM;
585 	wndw->func = func;
586 	wndw->id = index;
587 	wndw->interlock.type = interlock_type;
588 	wndw->interlock.data = interlock_data;
589 	wndw->ctxdma.parent = &wndw->wndw.base.user;
590 
591 	wndw->ctxdma.parent = &wndw->wndw.base.user;
592 	INIT_LIST_HEAD(&wndw->ctxdma.list);
593 
594 	for (nformat = 0; format[nformat]; nformat++);
595 
596 	ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
597 				       format, nformat, NULL,
598 				       type, "%s-%d", name, index);
599 	if (ret) {
600 		kfree(*pwndw);
601 		*pwndw = NULL;
602 		return ret;
603 	}
604 
605 	drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
606 
607 	if (wndw->func->ilut) {
608 		ret = nv50_lut_init(disp, mmu, &wndw->ilut);
609 		if (ret)
610 			return ret;
611 	}
612 
613 	wndw->notify.func = nv50_wndw_notify;
614 	return 0;
615 }
616 
617 int
618 nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
619 	      struct nv50_wndw **pwndw)
620 {
621 	struct {
622 		s32 oclass;
623 		int version;
624 		int (*new)(struct nouveau_drm *, enum drm_plane_type,
625 			   int, s32, struct nv50_wndw **);
626 	} wndws[] = {
627 		{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
628 		{}
629 	};
630 	struct nv50_disp *disp = nv50_disp(drm->dev);
631 	int cid, ret;
632 
633 	cid = nvif_mclass(&disp->disp->object, wndws);
634 	if (cid < 0) {
635 		NV_ERROR(drm, "No supported window class\n");
636 		return cid;
637 	}
638 
639 	ret = wndws[cid].new(drm, type, index, wndws[cid].oclass, pwndw);
640 	if (ret)
641 		return ret;
642 
643 	return nv50_wimm_init(drm, *pwndw);
644 }
645