1 /*
2  * Copyright (C) 2008 Maarten Maathuis.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include <acpi/video.h>
28 
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_crtc_helper.h>
32 #include <drm/drm_fb_helper.h>
33 #include <drm/drm_fourcc.h>
34 #include <drm/drm_gem_framebuffer_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/drm_vblank.h>
37 
38 #include "nouveau_crtc.h"
39 #include "nouveau_gem.h"
40 #include "nouveau_connector.h"
41 #include "nv50_display.h"
42 
43 #include <nvif/class.h>
44 #include <nvif/if0011.h>
45 #include <nvif/if0013.h>
46 #include <dispnv50/crc.h>
47 
48 int
nouveau_display_vblank_enable(struct drm_crtc * crtc)49 nouveau_display_vblank_enable(struct drm_crtc *crtc)
50 {
51 	struct nouveau_crtc *nv_crtc;
52 
53 	nv_crtc = nouveau_crtc(crtc);
54 	nvif_event_allow(&nv_crtc->vblank);
55 
56 	return 0;
57 }
58 
59 void
nouveau_display_vblank_disable(struct drm_crtc * crtc)60 nouveau_display_vblank_disable(struct drm_crtc *crtc)
61 {
62 	struct nouveau_crtc *nv_crtc;
63 
64 	nv_crtc = nouveau_crtc(crtc);
65 	nvif_event_block(&nv_crtc->vblank);
66 }
67 
68 static inline int
calc(int blanks,int blanke,int total,int line)69 calc(int blanks, int blanke, int total, int line)
70 {
71 	if (blanke >= blanks) {
72 		if (line >= blanks)
73 			line -= total;
74 	} else {
75 		if (line >= blanks)
76 			line -= total;
77 		line -= blanke + 1;
78 	}
79 	return line;
80 }
81 
82 static bool
nouveau_display_scanoutpos_head(struct drm_crtc * crtc,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime)83 nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
84 				ktime_t *stime, ktime_t *etime)
85 {
86 	struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
87 	struct nvif_head *head = &nouveau_crtc(crtc)->head;
88 	struct nvif_head_scanoutpos_v0 args;
89 	int retry = 20;
90 	bool ret = false;
91 
92 	args.version = 0;
93 
94 	do {
95 		ret = nvif_mthd(&head->object, NVIF_HEAD_V0_SCANOUTPOS, &args, sizeof(args));
96 		if (ret != 0)
97 			return false;
98 
99 		if (args.vline) {
100 			ret = true;
101 			break;
102 		}
103 
104 		if (retry) ndelay(vblank->linedur_ns);
105 	} while (retry--);
106 
107 	*hpos = args.hline;
108 	*vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
109 	if (stime) *stime = ns_to_ktime(args.time[0]);
110 	if (etime) *etime = ns_to_ktime(args.time[1]);
111 
112 	return ret;
113 }
114 
115 bool
nouveau_display_scanoutpos(struct drm_crtc * crtc,bool in_vblank_irq,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)116 nouveau_display_scanoutpos(struct drm_crtc *crtc,
117 			   bool in_vblank_irq, int *vpos, int *hpos,
118 			   ktime_t *stime, ktime_t *etime,
119 			   const struct drm_display_mode *mode)
120 {
121 	return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
122 					       stime, etime);
123 }
124 
125 static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
126 	.destroy = drm_gem_fb_destroy,
127 	.create_handle = drm_gem_fb_create_handle,
128 };
129 
130 static void
nouveau_decode_mod(struct nouveau_drm * drm,uint64_t modifier,uint32_t * tile_mode,uint8_t * kind)131 nouveau_decode_mod(struct nouveau_drm *drm,
132 		   uint64_t modifier,
133 		   uint32_t *tile_mode,
134 		   uint8_t *kind)
135 {
136 	struct nouveau_display *disp = nouveau_display(drm->dev);
137 	BUG_ON(!tile_mode || !kind);
138 
139 	if (modifier == DRM_FORMAT_MOD_LINEAR) {
140 		/* tile_mode will not be used in this case */
141 		*tile_mode = 0;
142 		*kind = 0;
143 	} else {
144 		/*
145 		 * Extract the block height and kind from the corresponding
146 		 * modifier fields.  See drm_fourcc.h for details.
147 		 */
148 
149 		if ((modifier & (0xffull << 12)) == 0ull) {
150 			/* Legacy modifier.  Translate to this dev's 'kind.' */
151 			modifier |= disp->format_modifiers[0] & (0xffull << 12);
152 		}
153 
154 		*tile_mode = (uint32_t)(modifier & 0xF);
155 		*kind = (uint8_t)((modifier >> 12) & 0xFF);
156 
157 		if (drm->client.device.info.chipset >= 0xc0)
158 			*tile_mode <<= 4;
159 	}
160 }
161 
162 void
nouveau_framebuffer_get_layout(struct drm_framebuffer * fb,uint32_t * tile_mode,uint8_t * kind)163 nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
164 			       uint32_t *tile_mode,
165 			       uint8_t *kind)
166 {
167 	if (fb->flags & DRM_MODE_FB_MODIFIERS) {
168 		struct nouveau_drm *drm = nouveau_drm(fb->dev);
169 
170 		nouveau_decode_mod(drm, fb->modifier, tile_mode, kind);
171 	} else {
172 		const struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
173 
174 		*tile_mode = nvbo->mode;
175 		*kind = nvbo->kind;
176 	}
177 }
178 
179 static const u64 legacy_modifiers[] = {
180 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
181 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
182 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
183 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
184 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
185 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
186 	DRM_FORMAT_MOD_INVALID
187 };
188 
189 static int
nouveau_validate_decode_mod(struct nouveau_drm * drm,uint64_t modifier,uint32_t * tile_mode,uint8_t * kind)190 nouveau_validate_decode_mod(struct nouveau_drm *drm,
191 			    uint64_t modifier,
192 			    uint32_t *tile_mode,
193 			    uint8_t *kind)
194 {
195 	struct nouveau_display *disp = nouveau_display(drm->dev);
196 	int mod;
197 
198 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
199 		return -EINVAL;
200 	}
201 
202 	BUG_ON(!disp->format_modifiers);
203 
204 	for (mod = 0;
205 	     (disp->format_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
206 	     (disp->format_modifiers[mod] != modifier);
207 	     mod++);
208 
209 	if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) {
210 		for (mod = 0;
211 		     (legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
212 		     (legacy_modifiers[mod] != modifier);
213 		     mod++);
214 		if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
215 			return -EINVAL;
216 	}
217 
218 	nouveau_decode_mod(drm, modifier, tile_mode, kind);
219 
220 	return 0;
221 }
222 
223 static inline uint32_t
nouveau_get_width_in_blocks(uint32_t stride)224 nouveau_get_width_in_blocks(uint32_t stride)
225 {
226 	/* GOBs per block in the x direction is always one, and GOBs are
227 	 * 64 bytes wide
228 	 */
229 	static const uint32_t log_block_width = 6;
230 
231 	return (stride + (1 << log_block_width) - 1) >> log_block_width;
232 }
233 
234 static inline uint32_t
nouveau_get_height_in_blocks(struct nouveau_drm * drm,uint32_t height,uint32_t log_block_height_in_gobs)235 nouveau_get_height_in_blocks(struct nouveau_drm *drm,
236 			     uint32_t height,
237 			     uint32_t log_block_height_in_gobs)
238 {
239 	uint32_t log_gob_height;
240 	uint32_t log_block_height;
241 
242 	BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
243 
244 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
245 		log_gob_height = 2;
246 	else
247 		log_gob_height = 3;
248 
249 	log_block_height = log_block_height_in_gobs + log_gob_height;
250 
251 	return (height + (1 << log_block_height) - 1) >> log_block_height;
252 }
253 
254 static int
nouveau_check_bl_size(struct nouveau_drm * drm,struct nouveau_bo * nvbo,uint32_t offset,uint32_t stride,uint32_t h,uint32_t tile_mode)255 nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
256 		      uint32_t offset, uint32_t stride, uint32_t h,
257 		      uint32_t tile_mode)
258 {
259 	uint32_t gob_size, bw, bh;
260 	uint64_t bl_size;
261 
262 	BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
263 
264 	if (drm->client.device.info.chipset >= 0xc0) {
265 		if (tile_mode & 0xF)
266 			return -EINVAL;
267 		tile_mode >>= 4;
268 	}
269 
270 	if (tile_mode & 0xFFFFFFF0)
271 		return -EINVAL;
272 
273 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
274 		gob_size = 256;
275 	else
276 		gob_size = 512;
277 
278 	bw = nouveau_get_width_in_blocks(stride);
279 	bh = nouveau_get_height_in_blocks(drm, h, tile_mode);
280 
281 	bl_size = bw * bh * (1 << tile_mode) * gob_size;
282 
283 	DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
284 		      offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
285 		      nvbo->bo.base.size);
286 
287 	if (bl_size + offset > nvbo->bo.base.size)
288 		return -ERANGE;
289 
290 	return 0;
291 }
292 
293 int
nouveau_framebuffer_new(struct drm_device * dev,const struct drm_mode_fb_cmd2 * mode_cmd,struct drm_gem_object * gem,struct drm_framebuffer ** pfb)294 nouveau_framebuffer_new(struct drm_device *dev,
295 			const struct drm_mode_fb_cmd2 *mode_cmd,
296 			struct drm_gem_object *gem,
297 			struct drm_framebuffer **pfb)
298 {
299 	struct nouveau_drm *drm = nouveau_drm(dev);
300 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
301 	struct drm_framebuffer *fb;
302 	const struct drm_format_info *info;
303 	unsigned int height, i;
304 	uint32_t tile_mode;
305 	uint8_t kind;
306 	int ret;
307 
308         /* YUV overlays have special requirements pre-NV50 */
309 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
310 
311 	    (mode_cmd->pixel_format == DRM_FORMAT_YUYV ||
312 	     mode_cmd->pixel_format == DRM_FORMAT_UYVY ||
313 	     mode_cmd->pixel_format == DRM_FORMAT_NV12 ||
314 	     mode_cmd->pixel_format == DRM_FORMAT_NV21) &&
315 	    (mode_cmd->pitches[0] & 0x3f || /* align 64 */
316 	     mode_cmd->pitches[0] >= 0x10000 || /* at most 64k pitch */
317 	     (mode_cmd->pitches[1] && /* pitches for planes must match */
318 	      mode_cmd->pitches[0] != mode_cmd->pitches[1]))) {
319 		DRM_DEBUG_KMS("Unsuitable framebuffer: format: %p4cc; pitches: 0x%x\n 0x%x\n",
320 			      &mode_cmd->pixel_format,
321 			      mode_cmd->pitches[0], mode_cmd->pitches[1]);
322 		return -EINVAL;
323 	}
324 
325 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
326 		if (nouveau_validate_decode_mod(drm, mode_cmd->modifier[0],
327 						&tile_mode, &kind)) {
328 			DRM_DEBUG_KMS("Unsupported modifier: 0x%llx\n",
329 				      mode_cmd->modifier[0]);
330 			return -EINVAL;
331 		}
332 	} else {
333 		tile_mode = nvbo->mode;
334 		kind = nvbo->kind;
335 	}
336 
337 	info = drm_get_format_info(dev, mode_cmd);
338 
339 	for (i = 0; i < info->num_planes; i++) {
340 		height = drm_format_info_plane_height(info,
341 						      mode_cmd->height,
342 						      i);
343 
344 		if (kind) {
345 			ret = nouveau_check_bl_size(drm, nvbo,
346 						    mode_cmd->offsets[i],
347 						    mode_cmd->pitches[i],
348 						    height, tile_mode);
349 			if (ret)
350 				return ret;
351 		} else {
352 			uint32_t size = mode_cmd->pitches[i] * height;
353 
354 			if (size + mode_cmd->offsets[i] > nvbo->bo.base.size)
355 				return -ERANGE;
356 		}
357 	}
358 
359 	if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
360 		return -ENOMEM;
361 
362 	drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
363 	fb->obj[0] = gem;
364 
365 	ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
366 	if (ret)
367 		kfree(fb);
368 	return ret;
369 }
370 
371 struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)372 nouveau_user_framebuffer_create(struct drm_device *dev,
373 				struct drm_file *file_priv,
374 				const struct drm_mode_fb_cmd2 *mode_cmd)
375 {
376 	struct drm_framebuffer *fb;
377 	struct drm_gem_object *gem;
378 	int ret;
379 
380 	gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
381 	if (!gem)
382 		return ERR_PTR(-ENOENT);
383 
384 	ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
385 	if (ret == 0)
386 		return fb;
387 
388 	drm_gem_object_put(gem);
389 	return ERR_PTR(ret);
390 }
391 
392 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
393 	.fb_create = nouveau_user_framebuffer_create,
394 	.output_poll_changed = drm_fb_helper_output_poll_changed,
395 };
396 
397 
398 struct nouveau_drm_prop_enum_list {
399 	u8 gen_mask;
400 	int type;
401 	char *name;
402 };
403 
404 static struct nouveau_drm_prop_enum_list underscan[] = {
405 	{ 6, UNDERSCAN_AUTO, "auto" },
406 	{ 6, UNDERSCAN_OFF, "off" },
407 	{ 6, UNDERSCAN_ON, "on" },
408 	{}
409 };
410 
411 static struct nouveau_drm_prop_enum_list dither_mode[] = {
412 	{ 7, DITHERING_MODE_AUTO, "auto" },
413 	{ 7, DITHERING_MODE_OFF, "off" },
414 	{ 1, DITHERING_MODE_ON, "on" },
415 	{ 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
416 	{ 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
417 	{ 4, DITHERING_MODE_TEMPORAL, "temporal" },
418 	{}
419 };
420 
421 static struct nouveau_drm_prop_enum_list dither_depth[] = {
422 	{ 6, DITHERING_DEPTH_AUTO, "auto" },
423 	{ 6, DITHERING_DEPTH_6BPC, "6 bpc" },
424 	{ 6, DITHERING_DEPTH_8BPC, "8 bpc" },
425 	{}
426 };
427 
428 #define PROP_ENUM(p,gen,n,list) do {                                           \
429 	struct nouveau_drm_prop_enum_list *l = (list);                         \
430 	int c = 0;                                                             \
431 	while (l->gen_mask) {                                                  \
432 		if (l->gen_mask & (1 << (gen)))                                \
433 			c++;                                                   \
434 		l++;                                                           \
435 	}                                                                      \
436 	if (c) {                                                               \
437 		p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c);        \
438 		l = (list);                                                    \
439 		while (p && l->gen_mask) {                                     \
440 			if (l->gen_mask & (1 << (gen))) {                      \
441 				drm_property_add_enum(p, l->type, l->name);    \
442 			}                                                      \
443 			l++;                                                   \
444 		}                                                              \
445 	}                                                                      \
446 } while(0)
447 
448 void
nouveau_display_hpd_resume(struct drm_device * dev)449 nouveau_display_hpd_resume(struct drm_device *dev)
450 {
451 	struct nouveau_drm *drm = nouveau_drm(dev);
452 
453 	spin_lock_irq(&drm->hpd_lock);
454 	drm->hpd_pending = ~0;
455 	spin_unlock_irq(&drm->hpd_lock);
456 
457 	schedule_work(&drm->hpd_work);
458 }
459 
460 static void
nouveau_display_hpd_work(struct work_struct * work)461 nouveau_display_hpd_work(struct work_struct *work)
462 {
463 	struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
464 	struct drm_device *dev = drm->dev;
465 	struct drm_connector *connector;
466 	struct drm_connector_list_iter conn_iter;
467 	u32 pending;
468 	int changed = 0;
469 	struct drm_connector *first_changed_connector = NULL;
470 
471 	pm_runtime_get_sync(dev->dev);
472 
473 	spin_lock_irq(&drm->hpd_lock);
474 	pending = drm->hpd_pending;
475 	drm->hpd_pending = 0;
476 	spin_unlock_irq(&drm->hpd_lock);
477 
478 	/* Nothing to do, exit early without updating the last busy counter */
479 	if (!pending)
480 		goto noop;
481 
482 	mutex_lock(&dev->mode_config.mutex);
483 	drm_connector_list_iter_begin(dev, &conn_iter);
484 
485 	nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
486 		struct nouveau_connector *nv_connector = nouveau_connector(connector);
487 		enum drm_connector_status old_status = connector->status;
488 		u64 bits, old_epoch_counter = connector->epoch_counter;
489 
490 		if (!(pending & drm_connector_mask(connector)))
491 			continue;
492 
493 		spin_lock_irq(&drm->hpd_lock);
494 		bits = nv_connector->hpd_pending;
495 		nv_connector->hpd_pending = 0;
496 		spin_unlock_irq(&drm->hpd_lock);
497 
498 		drm_dbg_kms(dev, "[CONNECTOR:%d:%s] plug:%d unplug:%d irq:%d\n",
499 			    connector->base.id, connector->name,
500 			    !!(bits & NVIF_CONN_EVENT_V0_PLUG),
501 			    !!(bits & NVIF_CONN_EVENT_V0_UNPLUG),
502 			    !!(bits & NVIF_CONN_EVENT_V0_IRQ));
503 
504 		if (bits & NVIF_CONN_EVENT_V0_IRQ) {
505 			if (nouveau_dp_link_check(nv_connector))
506 				continue;
507 		}
508 
509 		connector->status = drm_helper_probe_detect(connector, NULL, false);
510 		if (old_epoch_counter == connector->epoch_counter)
511 			continue;
512 
513 		changed++;
514 		if (!first_changed_connector) {
515 			drm_connector_get(connector);
516 			first_changed_connector = connector;
517 		}
518 
519 		drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
520 			    connector->base.id, connector->name,
521 			    drm_get_connector_status_name(old_status),
522 			    drm_get_connector_status_name(connector->status),
523 			    old_epoch_counter, connector->epoch_counter);
524 	}
525 
526 	drm_connector_list_iter_end(&conn_iter);
527 	mutex_unlock(&dev->mode_config.mutex);
528 
529 	if (changed == 1)
530 		drm_kms_helper_connector_hotplug_event(first_changed_connector);
531 	else if (changed > 0)
532 		drm_kms_helper_hotplug_event(dev);
533 
534 	if (first_changed_connector)
535 		drm_connector_put(first_changed_connector);
536 
537 	pm_runtime_mark_last_busy(drm->dev->dev);
538 noop:
539 	pm_runtime_put_autosuspend(dev->dev);
540 }
541 
542 #ifdef CONFIG_ACPI
543 
544 static int
nouveau_display_acpi_ntfy(struct notifier_block * nb,unsigned long val,void * data)545 nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
546 			  void *data)
547 {
548 	struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
549 	struct acpi_bus_event *info = data;
550 	int ret;
551 
552 	if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
553 		if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
554 			ret = pm_runtime_get(drm->dev->dev);
555 			if (ret == 1 || ret == -EACCES) {
556 				/* If the GPU is already awake, or in a state
557 				 * where we can't wake it up, it can handle
558 				 * it's own hotplug events.
559 				 */
560 				pm_runtime_put_autosuspend(drm->dev->dev);
561 			} else if (ret == 0 || ret == -EINPROGRESS) {
562 				/* We've started resuming the GPU already, so
563 				 * it will handle scheduling a full reprobe
564 				 * itself
565 				 */
566 				NV_DEBUG(drm, "ACPI requested connector reprobe\n");
567 				pm_runtime_put_noidle(drm->dev->dev);
568 			} else {
569 				NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
570 					ret);
571 			}
572 
573 			/* acpi-video should not generate keypresses for this */
574 			return NOTIFY_BAD;
575 		}
576 	}
577 
578 	return NOTIFY_DONE;
579 }
580 #endif
581 
582 int
nouveau_display_init(struct drm_device * dev,bool resume,bool runtime)583 nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
584 {
585 	struct nouveau_display *disp = nouveau_display(dev);
586 	struct drm_connector *connector;
587 	struct drm_connector_list_iter conn_iter;
588 	int ret;
589 
590 	/*
591 	 * Enable hotplug interrupts (done as early as possible, since we need
592 	 * them for MST)
593 	 */
594 	drm_connector_list_iter_begin(dev, &conn_iter);
595 	nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
596 		struct nouveau_connector *conn = nouveau_connector(connector);
597 		nvif_event_allow(&conn->hpd);
598 		nvif_event_allow(&conn->irq);
599 	}
600 	drm_connector_list_iter_end(&conn_iter);
601 
602 	ret = disp->init(dev, resume, runtime);
603 	if (ret)
604 		return ret;
605 
606 	/* enable connector detection and polling for connectors without HPD
607 	 * support
608 	 */
609 	drm_kms_helper_poll_enable(dev);
610 
611 	return ret;
612 }
613 
614 void
nouveau_display_fini(struct drm_device * dev,bool suspend,bool runtime)615 nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
616 {
617 	struct nouveau_display *disp = nouveau_display(dev);
618 	struct nouveau_drm *drm = nouveau_drm(dev);
619 	struct drm_connector *connector;
620 	struct drm_connector_list_iter conn_iter;
621 
622 	if (!suspend) {
623 		if (drm_drv_uses_atomic_modeset(dev))
624 			drm_atomic_helper_shutdown(dev);
625 		else
626 			drm_helper_force_disable_all(dev);
627 	}
628 
629 	/* disable hotplug interrupts */
630 	drm_connector_list_iter_begin(dev, &conn_iter);
631 	nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
632 		struct nouveau_connector *conn = nouveau_connector(connector);
633 		nvif_event_block(&conn->irq);
634 		nvif_event_block(&conn->hpd);
635 	}
636 	drm_connector_list_iter_end(&conn_iter);
637 
638 	if (!runtime)
639 		cancel_work_sync(&drm->hpd_work);
640 
641 	drm_kms_helper_poll_disable(dev);
642 	disp->fini(dev, runtime, suspend);
643 }
644 
645 static void
nouveau_display_create_properties(struct drm_device * dev)646 nouveau_display_create_properties(struct drm_device *dev)
647 {
648 	struct nouveau_display *disp = nouveau_display(dev);
649 	int gen;
650 
651 	if (disp->disp.object.oclass < NV50_DISP)
652 		gen = 0;
653 	else
654 	if (disp->disp.object.oclass < GF110_DISP)
655 		gen = 1;
656 	else
657 		gen = 2;
658 
659 	PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
660 	PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
661 	PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
662 
663 	disp->underscan_hborder_property =
664 		drm_property_create_range(dev, 0, "underscan hborder", 0, 128);
665 
666 	disp->underscan_vborder_property =
667 		drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
668 
669 	if (gen < 1)
670 		return;
671 
672 	/* -90..+90 */
673 	disp->vibrant_hue_property =
674 		drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
675 
676 	/* -100..+100 */
677 	disp->color_vibrance_property =
678 		drm_property_create_range(dev, 0, "color vibrance", 0, 200);
679 }
680 
681 int
nouveau_display_create(struct drm_device * dev)682 nouveau_display_create(struct drm_device *dev)
683 {
684 	struct nouveau_drm *drm = nouveau_drm(dev);
685 	struct nouveau_display *disp;
686 	int ret;
687 
688 	disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
689 	if (!disp)
690 		return -ENOMEM;
691 
692 	drm_mode_config_init(dev);
693 	drm_mode_create_scaling_mode_property(dev);
694 	drm_mode_create_dvi_i_properties(dev);
695 
696 	dev->mode_config.funcs = &nouveau_mode_config_funcs;
697 
698 	dev->mode_config.min_width = 0;
699 	dev->mode_config.min_height = 0;
700 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
701 		dev->mode_config.max_width = 2048;
702 		dev->mode_config.max_height = 2048;
703 	} else
704 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
705 		dev->mode_config.max_width = 4096;
706 		dev->mode_config.max_height = 4096;
707 	} else
708 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) {
709 		dev->mode_config.max_width = 8192;
710 		dev->mode_config.max_height = 8192;
711 	} else {
712 		dev->mode_config.max_width = 16384;
713 		dev->mode_config.max_height = 16384;
714 	}
715 
716 	dev->mode_config.preferred_depth = 24;
717 	dev->mode_config.prefer_shadow = 1;
718 
719 	if (drm->client.device.info.chipset < 0x11)
720 		dev->mode_config.async_page_flip = false;
721 	else
722 		dev->mode_config.async_page_flip = true;
723 
724 	drm_kms_helper_poll_init(dev);
725 	drm_kms_helper_poll_disable(dev);
726 
727 	if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
728 		ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0,
729 				     &disp->disp);
730 		if (ret == 0) {
731 			nouveau_display_create_properties(dev);
732 			if (disp->disp.object.oclass < NV50_DISP) {
733 				dev->mode_config.fb_modifiers_not_supported = true;
734 				ret = nv04_display_create(dev);
735 			} else {
736 				ret = nv50_display_create(dev);
737 			}
738 		}
739 	} else {
740 		ret = 0;
741 	}
742 
743 	if (ret)
744 		goto disp_create_err;
745 
746 	drm_mode_config_reset(dev);
747 
748 	if (dev->mode_config.num_crtc) {
749 		ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
750 		if (ret)
751 			goto vblank_err;
752 
753 		if (disp->disp.object.oclass >= NV50_DISP)
754 			nv50_crc_init(dev);
755 	}
756 
757 	INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
758 	spin_lock_init(&drm->hpd_lock);
759 #ifdef CONFIG_ACPI
760 	drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
761 	register_acpi_notifier(&drm->acpi_nb);
762 #endif
763 
764 	return 0;
765 
766 vblank_err:
767 	disp->dtor(dev);
768 disp_create_err:
769 	drm_kms_helper_poll_fini(dev);
770 	drm_mode_config_cleanup(dev);
771 	return ret;
772 }
773 
774 void
nouveau_display_destroy(struct drm_device * dev)775 nouveau_display_destroy(struct drm_device *dev)
776 {
777 	struct nouveau_display *disp = nouveau_display(dev);
778 	struct nouveau_drm *drm = nouveau_drm(dev);
779 
780 #ifdef CONFIG_ACPI
781 	unregister_acpi_notifier(&drm->acpi_nb);
782 #endif
783 
784 	drm_kms_helper_poll_fini(dev);
785 	drm_mode_config_cleanup(dev);
786 
787 	if (disp->dtor)
788 		disp->dtor(dev);
789 
790 	nvif_disp_dtor(&disp->disp);
791 
792 	drm->display = NULL;
793 	kfree(disp);
794 }
795 
796 int
nouveau_display_suspend(struct drm_device * dev,bool runtime)797 nouveau_display_suspend(struct drm_device *dev, bool runtime)
798 {
799 	struct nouveau_display *disp = nouveau_display(dev);
800 
801 	/* Disable console. */
802 	drm_fb_helper_set_suspend_unlocked(dev->fb_helper, true);
803 
804 	if (drm_drv_uses_atomic_modeset(dev)) {
805 		if (!runtime) {
806 			disp->suspend = drm_atomic_helper_suspend(dev);
807 			if (IS_ERR(disp->suspend)) {
808 				int ret = PTR_ERR(disp->suspend);
809 				disp->suspend = NULL;
810 				return ret;
811 			}
812 		}
813 	}
814 
815 	nouveau_display_fini(dev, true, runtime);
816 	return 0;
817 }
818 
819 void
nouveau_display_resume(struct drm_device * dev,bool runtime)820 nouveau_display_resume(struct drm_device *dev, bool runtime)
821 {
822 	struct nouveau_display *disp = nouveau_display(dev);
823 
824 	nouveau_display_init(dev, true, runtime);
825 
826 	if (drm_drv_uses_atomic_modeset(dev)) {
827 		if (disp->suspend) {
828 			drm_atomic_helper_resume(dev, disp->suspend);
829 			disp->suspend = NULL;
830 		}
831 	}
832 
833 	/* Enable console. */
834 	drm_fb_helper_set_suspend_unlocked(dev->fb_helper, false);
835 }
836 
837 int
nouveau_display_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)838 nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
839 			    struct drm_mode_create_dumb *args)
840 {
841 	struct nouveau_cli *cli = nouveau_cli(file_priv);
842 	struct nouveau_bo *bo;
843 	uint32_t domain;
844 	int ret;
845 
846 	args->pitch = roundup(args->width * (args->bpp / 8), 256);
847 	args->size = args->pitch * args->height;
848 	args->size = roundup(args->size, PAGE_SIZE);
849 
850 	/* Use VRAM if there is any ; otherwise fallback to system memory */
851 	if (nouveau_drm(dev)->client.device.info.ram_size != 0)
852 		domain = NOUVEAU_GEM_DOMAIN_VRAM;
853 	else
854 		domain = NOUVEAU_GEM_DOMAIN_GART;
855 
856 	ret = nouveau_gem_new(cli, args->size, 0, domain, 0, 0, &bo);
857 	if (ret)
858 		return ret;
859 
860 	ret = drm_gem_handle_create(file_priv, &bo->bo.base, &args->handle);
861 	drm_gem_object_put(&bo->bo.base);
862 	return ret;
863 }
864