1 // SPDX-License-Identifier: GPL-2.0-only
2 /**************************************************************************
3  * Copyright (c) 2007-2011, Intel Corporation.
4  * All Rights Reserved.
5  *
6  **************************************************************************/
7 
8 #include <linux/console.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/pfn_t.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/tty.h>
19 
20 #include <drm/drm.h>
21 #include <drm/drm_crtc.h>
22 #include <drm/drm_fb_helper.h>
23 #include <drm/drm_fourcc.h>
24 #include <drm/drm_gem_framebuffer_helper.h>
25 
26 #include "framebuffer.h"
27 #include "gtt.h"
28 #include "psb_drv.h"
29 #include "psb_intel_drv.h"
30 #include "psb_intel_reg.h"
31 
32 static const struct drm_framebuffer_funcs psb_fb_funcs = {
33 	.destroy = drm_gem_fb_destroy,
34 	.create_handle = drm_gem_fb_create_handle,
35 };
36 
37 #define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
38 
39 static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
40 			   unsigned blue, unsigned transp,
41 			   struct fb_info *info)
42 {
43 	struct drm_fb_helper *fb_helper = info->par;
44 	struct drm_framebuffer *fb = fb_helper->fb;
45 	uint32_t v;
46 
47 	if (!fb)
48 		return -ENOMEM;
49 
50 	if (regno > 255)
51 		return 1;
52 
53 	red = CMAP_TOHW(red, info->var.red.length);
54 	blue = CMAP_TOHW(blue, info->var.blue.length);
55 	green = CMAP_TOHW(green, info->var.green.length);
56 	transp = CMAP_TOHW(transp, info->var.transp.length);
57 
58 	v = (red << info->var.red.offset) |
59 	    (green << info->var.green.offset) |
60 	    (blue << info->var.blue.offset) |
61 	    (transp << info->var.transp.offset);
62 
63 	if (regno < 16) {
64 		switch (fb->format->cpp[0] * 8) {
65 		case 16:
66 			((uint32_t *) info->pseudo_palette)[regno] = v;
67 			break;
68 		case 24:
69 		case 32:
70 			((uint32_t *) info->pseudo_palette)[regno] = v;
71 			break;
72 		}
73 	}
74 
75 	return 0;
76 }
77 
78 static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
79 {
80 	struct drm_fb_helper *fb_helper = info->par;
81 	struct drm_framebuffer *fb = fb_helper->fb;
82 	struct drm_device *dev = fb->dev;
83 	struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
84 
85 	/*
86 	 *	We have to poke our nose in here. The core fb code assumes
87 	 *	panning is part of the hardware that can be invoked before
88 	 *	the actual fb is mapped. In our case that isn't quite true.
89 	 */
90 	if (gtt->npage) {
91 		/* GTT roll shifts in 4K pages, we need to shift the right
92 		   number of pages */
93 		int pages = info->fix.line_length >> 12;
94 		psb_gtt_roll(dev, gtt, var->yoffset * pages);
95 	}
96         return 0;
97 }
98 
99 static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
100 {
101 	struct vm_area_struct *vma = vmf->vma;
102 	struct drm_framebuffer *fb = vma->vm_private_data;
103 	struct drm_device *dev = fb->dev;
104 	struct drm_psb_private *dev_priv = dev->dev_private;
105 	struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
106 	int page_num;
107 	int i;
108 	unsigned long address;
109 	vm_fault_t ret = VM_FAULT_SIGBUS;
110 	unsigned long pfn;
111 	unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
112 				  gtt->offset;
113 
114 	page_num = vma_pages(vma);
115 	address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
116 
117 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
118 
119 	for (i = 0; i < page_num; i++) {
120 		pfn = (phys_addr >> PAGE_SHIFT);
121 
122 		ret = vmf_insert_mixed(vma, address,
123 				__pfn_to_pfn_t(pfn, PFN_DEV));
124 		if (unlikely(ret & VM_FAULT_ERROR))
125 			break;
126 		address += PAGE_SIZE;
127 		phys_addr += PAGE_SIZE;
128 	}
129 	return ret;
130 }
131 
132 static void psbfb_vm_open(struct vm_area_struct *vma)
133 {
134 }
135 
136 static void psbfb_vm_close(struct vm_area_struct *vma)
137 {
138 }
139 
140 static const struct vm_operations_struct psbfb_vm_ops = {
141 	.fault	= psbfb_vm_fault,
142 	.open	= psbfb_vm_open,
143 	.close	= psbfb_vm_close
144 };
145 
146 static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
147 {
148 	struct drm_fb_helper *fb_helper = info->par;
149 	struct drm_framebuffer *fb = fb_helper->fb;
150 
151 	if (vma->vm_pgoff != 0)
152 		return -EINVAL;
153 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
154 		return -EINVAL;
155 
156 	/*
157 	 * If this is a GEM object then info->screen_base is the virtual
158 	 * kernel remapping of the object. FIXME: Review if this is
159 	 * suitable for our mmap work
160 	 */
161 	vma->vm_ops = &psbfb_vm_ops;
162 	vma->vm_private_data = (void *)fb;
163 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
164 	return 0;
165 }
166 
167 static struct fb_ops psbfb_ops = {
168 	.owner = THIS_MODULE,
169 	DRM_FB_HELPER_DEFAULT_OPS,
170 	.fb_setcolreg = psbfb_setcolreg,
171 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
172 	.fb_copyarea = psbfb_copyarea,
173 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
174 	.fb_mmap = psbfb_mmap,
175 	.fb_sync = psbfb_sync,
176 };
177 
178 static struct fb_ops psbfb_roll_ops = {
179 	.owner = THIS_MODULE,
180 	DRM_FB_HELPER_DEFAULT_OPS,
181 	.fb_setcolreg = psbfb_setcolreg,
182 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
183 	.fb_copyarea = drm_fb_helper_cfb_copyarea,
184 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
185 	.fb_pan_display = psbfb_pan,
186 	.fb_mmap = psbfb_mmap,
187 };
188 
189 static struct fb_ops psbfb_unaccel_ops = {
190 	.owner = THIS_MODULE,
191 	DRM_FB_HELPER_DEFAULT_OPS,
192 	.fb_setcolreg = psbfb_setcolreg,
193 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
194 	.fb_copyarea = drm_fb_helper_cfb_copyarea,
195 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
196 	.fb_mmap = psbfb_mmap,
197 };
198 
199 /**
200  *	psb_framebuffer_init	-	initialize a framebuffer
201  *	@dev: our DRM device
202  *	@fb: framebuffer to set up
203  *	@mode_cmd: mode description
204  *	@gt: backing object
205  *
206  *	Configure and fill in the boilerplate for our frame buffer. Return
207  *	0 on success or an error code if we fail.
208  */
209 static int psb_framebuffer_init(struct drm_device *dev,
210 					struct drm_framebuffer *fb,
211 					const struct drm_mode_fb_cmd2 *mode_cmd,
212 					struct drm_gem_object *obj)
213 {
214 	const struct drm_format_info *info;
215 	int ret;
216 
217 	/*
218 	 * Reject unknown formats, YUV formats, and formats with more than
219 	 * 4 bytes per pixel.
220 	 */
221 	info = drm_get_format_info(dev, mode_cmd);
222 	if (!info || !info->depth || info->cpp[0] > 4)
223 		return -EINVAL;
224 
225 	if (mode_cmd->pitches[0] & 63)
226 		return -EINVAL;
227 
228 	drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
229 	fb->obj[0] = obj;
230 	ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
231 	if (ret) {
232 		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
233 		return ret;
234 	}
235 	return 0;
236 }
237 
238 /**
239  *	psb_framebuffer_create	-	create a framebuffer backed by gt
240  *	@dev: our DRM device
241  *	@mode_cmd: the description of the requested mode
242  *	@gt: the backing object
243  *
244  *	Create a framebuffer object backed by the gt, and fill in the
245  *	boilerplate required
246  *
247  *	TODO: review object references
248  */
249 
250 static struct drm_framebuffer *psb_framebuffer_create
251 			(struct drm_device *dev,
252 			 const struct drm_mode_fb_cmd2 *mode_cmd,
253 			 struct drm_gem_object *obj)
254 {
255 	struct drm_framebuffer *fb;
256 	int ret;
257 
258 	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
259 	if (!fb)
260 		return ERR_PTR(-ENOMEM);
261 
262 	ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
263 	if (ret) {
264 		kfree(fb);
265 		return ERR_PTR(ret);
266 	}
267 	return fb;
268 }
269 
270 /**
271  *	psbfb_alloc		-	allocate frame buffer memory
272  *	@dev: the DRM device
273  *	@aligned_size: space needed
274  *
275  *	Allocate the frame buffer. In the usual case we get a GTT range that
276  *	is stolen memory backed and life is simple. If there isn't sufficient
277  *	we fail as we don't have the virtual mapping space to really vmap it
278  *	and the kernel console code can't handle non linear framebuffers.
279  *
280  *	Re-address this as and if the framebuffer layer grows this ability.
281  */
282 static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
283 {
284 	struct gtt_range *backing;
285 	/* Begin by trying to use stolen memory backing */
286 	backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
287 	if (backing) {
288 		drm_gem_private_object_init(dev, &backing->gem, aligned_size);
289 		return backing;
290 	}
291 	return NULL;
292 }
293 
294 /**
295  *	psbfb_create		-	create a framebuffer
296  *	@fbdev: the framebuffer device
297  *	@sizes: specification of the layout
298  *
299  *	Create a framebuffer to the specifications provided
300  */
301 static int psbfb_create(struct drm_fb_helper *fb_helper,
302 				struct drm_fb_helper_surface_size *sizes)
303 {
304 	struct drm_device *dev = fb_helper->dev;
305 	struct drm_psb_private *dev_priv = dev->dev_private;
306 	struct fb_info *info;
307 	struct drm_framebuffer *fb;
308 	struct drm_mode_fb_cmd2 mode_cmd;
309 	int size;
310 	int ret;
311 	struct gtt_range *backing;
312 	u32 bpp, depth;
313 	int gtt_roll = 0;
314 	int pitch_lines = 0;
315 
316 	mode_cmd.width = sizes->surface_width;
317 	mode_cmd.height = sizes->surface_height;
318 	bpp = sizes->surface_bpp;
319 	depth = sizes->surface_depth;
320 
321 	/* No 24bit packed */
322 	if (bpp == 24)
323 		bpp = 32;
324 
325 	do {
326 		/*
327 		 * Acceleration via the GTT requires pitch to be
328 		 * power of two aligned. Preferably page but less
329 		 * is ok with some fonts
330 		 */
331         	mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
332 
333         	size = mode_cmd.pitches[0] * mode_cmd.height;
334         	size = ALIGN(size, PAGE_SIZE);
335 
336 		/* Allocate the fb in the GTT with stolen page backing */
337 		backing = psbfb_alloc(dev, size);
338 
339 		if (pitch_lines)
340 			pitch_lines *= 2;
341 		else
342 			pitch_lines = 1;
343 		gtt_roll++;
344 	} while (backing == NULL && pitch_lines <= 16);
345 
346 	/* The final pitch we accepted if we succeeded */
347 	pitch_lines /= 2;
348 
349 	if (backing == NULL) {
350 		/*
351 		 *	We couldn't get the space we wanted, fall back to the
352 		 *	display engine requirement instead.  The HW requires
353 		 *	the pitch to be 64 byte aligned
354 		 */
355 
356 		gtt_roll = 0;	/* Don't use GTT accelerated scrolling */
357 		pitch_lines = 64;
358 
359 		mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
360 
361 		size = mode_cmd.pitches[0] * mode_cmd.height;
362 		size = ALIGN(size, PAGE_SIZE);
363 
364 		/* Allocate the framebuffer in the GTT with stolen page backing */
365 		backing = psbfb_alloc(dev, size);
366 		if (backing == NULL)
367 			return -ENOMEM;
368 	}
369 
370 	memset(dev_priv->vram_addr + backing->offset, 0, size);
371 
372 	info = drm_fb_helper_alloc_fbi(fb_helper);
373 	if (IS_ERR(info)) {
374 		ret = PTR_ERR(info);
375 		goto out;
376 	}
377 
378 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
379 
380 	fb = psb_framebuffer_create(dev, &mode_cmd, &backing->gem);
381 	if (IS_ERR(fb)) {
382 		ret = PTR_ERR(fb);
383 		goto out;
384 	}
385 
386 	fb_helper->fb = fb;
387 
388 	if (dev_priv->ops->accel_2d && pitch_lines > 8)	/* 2D engine */
389 		info->fbops = &psbfb_ops;
390 	else if (gtt_roll) {	/* GTT rolling seems best */
391 		info->fbops = &psbfb_roll_ops;
392 		info->flags |= FBINFO_HWACCEL_YPAN;
393 	} else	/* Software */
394 		info->fbops = &psbfb_unaccel_ops;
395 
396 	info->fix.smem_start = dev->mode_config.fb_base;
397 	info->fix.smem_len = size;
398 	info->fix.ywrapstep = gtt_roll;
399 	info->fix.ypanstep = 0;
400 
401 	/* Accessed stolen memory directly */
402 	info->screen_base = dev_priv->vram_addr + backing->offset;
403 	info->screen_size = size;
404 
405 	if (dev_priv->gtt.stolen_size) {
406 		info->apertures->ranges[0].base = dev->mode_config.fb_base;
407 		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
408 	}
409 
410 	drm_fb_helper_fill_info(info, fb_helper, sizes);
411 
412 	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
413 	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
414 
415 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
416 
417 	dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
418 
419 	return 0;
420 out:
421 	psb_gtt_free_range(dev, backing);
422 	return ret;
423 }
424 
425 /**
426  *	psb_user_framebuffer_create	-	create framebuffer
427  *	@dev: our DRM device
428  *	@filp: client file
429  *	@cmd: mode request
430  *
431  *	Create a new framebuffer backed by a userspace GEM object
432  */
433 static struct drm_framebuffer *psb_user_framebuffer_create
434 			(struct drm_device *dev, struct drm_file *filp,
435 			 const struct drm_mode_fb_cmd2 *cmd)
436 {
437 	struct drm_gem_object *obj;
438 
439 	/*
440 	 *	Find the GEM object and thus the gtt range object that is
441 	 *	to back this space
442 	 */
443 	obj = drm_gem_object_lookup(filp, cmd->handles[0]);
444 	if (obj == NULL)
445 		return ERR_PTR(-ENOENT);
446 
447 	/* Let the core code do all the work */
448 	return psb_framebuffer_create(dev, cmd, obj);
449 }
450 
451 static int psbfb_probe(struct drm_fb_helper *fb_helper,
452 				struct drm_fb_helper_surface_size *sizes)
453 {
454 	struct drm_device *dev = fb_helper->dev;
455 	struct drm_psb_private *dev_priv = dev->dev_private;
456 	unsigned int fb_size;
457 	int bytespp;
458 
459 	bytespp = sizes->surface_bpp / 8;
460 	if (bytespp == 3)	/* no 24bit packed */
461 		bytespp = 4;
462 
463 	/* If the mode will not fit in 32bit then switch to 16bit to get
464 	   a console on full resolution. The X mode setting server will
465 	   allocate its own 32bit GEM framebuffer */
466 	fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
467 		  sizes->surface_height;
468 	fb_size = ALIGN(fb_size, PAGE_SIZE);
469 
470 	if (fb_size > dev_priv->vram_stolen_size) {
471                 sizes->surface_bpp = 16;
472                 sizes->surface_depth = 16;
473         }
474 
475 	return psbfb_create(fb_helper, sizes);
476 }
477 
478 static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
479 	.fb_probe = psbfb_probe,
480 };
481 
482 static int psb_fbdev_destroy(struct drm_device *dev,
483 			     struct drm_fb_helper *fb_helper)
484 {
485 	struct drm_framebuffer *fb = fb_helper->fb;
486 
487 	drm_fb_helper_unregister_fbi(fb_helper);
488 
489 	drm_fb_helper_fini(fb_helper);
490 	drm_framebuffer_unregister_private(fb);
491 	drm_framebuffer_cleanup(fb);
492 
493 	if (fb->obj[0])
494 		drm_gem_object_put_unlocked(fb->obj[0]);
495 	kfree(fb);
496 
497 	return 0;
498 }
499 
500 int psb_fbdev_init(struct drm_device *dev)
501 {
502 	struct drm_fb_helper *fb_helper;
503 	struct drm_psb_private *dev_priv = dev->dev_private;
504 	int ret;
505 
506 	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
507 	if (!fb_helper) {
508 		dev_err(dev->dev, "no memory\n");
509 		return -ENOMEM;
510 	}
511 
512 	dev_priv->fb_helper = fb_helper;
513 
514 	drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
515 
516 	ret = drm_fb_helper_init(dev, fb_helper, INTELFB_CONN_LIMIT);
517 	if (ret)
518 		goto free;
519 
520 	ret = drm_fb_helper_single_add_all_connectors(fb_helper);
521 	if (ret)
522 		goto fini;
523 
524 	/* disable all the possible outputs/crtcs before entering KMS mode */
525 	drm_helper_disable_unused_functions(dev);
526 
527 	ret = drm_fb_helper_initial_config(fb_helper, 32);
528 	if (ret)
529 		goto fini;
530 
531 	return 0;
532 
533 fini:
534 	drm_fb_helper_fini(fb_helper);
535 free:
536 	kfree(fb_helper);
537 	return ret;
538 }
539 
540 static void psb_fbdev_fini(struct drm_device *dev)
541 {
542 	struct drm_psb_private *dev_priv = dev->dev_private;
543 
544 	if (!dev_priv->fb_helper)
545 		return;
546 
547 	psb_fbdev_destroy(dev, dev_priv->fb_helper);
548 	kfree(dev_priv->fb_helper);
549 	dev_priv->fb_helper = NULL;
550 }
551 
552 static const struct drm_mode_config_funcs psb_mode_funcs = {
553 	.fb_create = psb_user_framebuffer_create,
554 	.output_poll_changed = drm_fb_helper_output_poll_changed,
555 };
556 
557 static void psb_setup_outputs(struct drm_device *dev)
558 {
559 	struct drm_psb_private *dev_priv = dev->dev_private;
560 	struct drm_connector *connector;
561 
562 	drm_mode_create_scaling_mode_property(dev);
563 
564 	/* It is ok for this to fail - we just don't get backlight control */
565 	if (!dev_priv->backlight_property)
566 		dev_priv->backlight_property = drm_property_create_range(dev, 0,
567 							"backlight", 0, 100);
568 	dev_priv->ops->output_init(dev);
569 
570 	list_for_each_entry(connector, &dev->mode_config.connector_list,
571 			    head) {
572 		struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
573 		struct drm_encoder *encoder = &gma_encoder->base;
574 		int crtc_mask = 0, clone_mask = 0;
575 
576 		/* valid crtcs */
577 		switch (gma_encoder->type) {
578 		case INTEL_OUTPUT_ANALOG:
579 			crtc_mask = (1 << 0);
580 			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
581 			break;
582 		case INTEL_OUTPUT_SDVO:
583 			crtc_mask = dev_priv->ops->sdvo_mask;
584 			clone_mask = (1 << INTEL_OUTPUT_SDVO);
585 			break;
586 		case INTEL_OUTPUT_LVDS:
587 		        crtc_mask = dev_priv->ops->lvds_mask;
588 			clone_mask = (1 << INTEL_OUTPUT_LVDS);
589 			break;
590 		case INTEL_OUTPUT_MIPI:
591 			crtc_mask = (1 << 0);
592 			clone_mask = (1 << INTEL_OUTPUT_MIPI);
593 			break;
594 		case INTEL_OUTPUT_MIPI2:
595 			crtc_mask = (1 << 2);
596 			clone_mask = (1 << INTEL_OUTPUT_MIPI2);
597 			break;
598 		case INTEL_OUTPUT_HDMI:
599 		        crtc_mask = dev_priv->ops->hdmi_mask;
600 			clone_mask = (1 << INTEL_OUTPUT_HDMI);
601 			break;
602 		case INTEL_OUTPUT_DISPLAYPORT:
603 			crtc_mask = (1 << 0) | (1 << 1);
604 			clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
605 			break;
606 		case INTEL_OUTPUT_EDP:
607 			crtc_mask = (1 << 1);
608 			clone_mask = (1 << INTEL_OUTPUT_EDP);
609 		}
610 		encoder->possible_crtcs = crtc_mask;
611 		encoder->possible_clones =
612 		    gma_connector_clones(dev, clone_mask);
613 	}
614 }
615 
616 void psb_modeset_init(struct drm_device *dev)
617 {
618 	struct drm_psb_private *dev_priv = dev->dev_private;
619 	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
620 	int i;
621 
622 	drm_mode_config_init(dev);
623 
624 	dev->mode_config.min_width = 0;
625 	dev->mode_config.min_height = 0;
626 
627 	dev->mode_config.funcs = &psb_mode_funcs;
628 
629 	/* set memory base */
630 	/* Oaktrail and Poulsbo should use BAR 2*/
631 	pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
632 					&(dev->mode_config.fb_base));
633 
634 	/* num pipes is 2 for PSB but 1 for Mrst */
635 	for (i = 0; i < dev_priv->num_pipe; i++)
636 		psb_intel_crtc_init(dev, i, mode_dev);
637 
638 	dev->mode_config.max_width = 4096;
639 	dev->mode_config.max_height = 4096;
640 
641 	psb_setup_outputs(dev);
642 
643 	if (dev_priv->ops->errata)
644 	        dev_priv->ops->errata(dev);
645 
646         dev_priv->modeset = true;
647 }
648 
649 void psb_modeset_cleanup(struct drm_device *dev)
650 {
651 	struct drm_psb_private *dev_priv = dev->dev_private;
652 	if (dev_priv->modeset) {
653 		drm_kms_helper_poll_fini(dev);
654 		psb_fbdev_fini(dev);
655 		drm_mode_config_cleanup(dev);
656 	}
657 }
658