1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/pci.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/slab.h>
32 #include <linux/uaccess.h>
33 #include <linux/vga_switcheroo.h>
34 
35 #include <drm/drm_agpsupport.h>
36 #include <drm/drm_fb_helper.h>
37 #include <drm/drm_file.h>
38 #include <drm/drm_ioctl.h>
39 #include <drm/radeon_drm.h>
40 
41 #include "radeon.h"
42 #include "radeon_asic.h"
43 
44 #if defined(CONFIG_VGA_SWITCHEROO)
45 bool radeon_has_atpx(void);
46 #else
47 static inline bool radeon_has_atpx(void) { return false; }
48 #endif
49 
50 /**
51  * radeon_driver_unload_kms - Main unload function for KMS.
52  *
53  * @dev: drm dev pointer
54  *
55  * This is the main unload function for KMS (all asics).
56  * It calls radeon_modeset_fini() to tear down the
57  * displays, and radeon_device_fini() to tear down
58  * the rest of the device (CP, writeback, etc.).
59  * Returns 0 on success.
60  */
61 void radeon_driver_unload_kms(struct drm_device *dev)
62 {
63 	struct radeon_device *rdev = dev->dev_private;
64 
65 	if (rdev == NULL)
66 		return;
67 
68 	if (rdev->rmmio == NULL)
69 		goto done_free;
70 
71 	if (radeon_is_px(dev)) {
72 		pm_runtime_get_sync(dev->dev);
73 		pm_runtime_forbid(dev->dev);
74 	}
75 
76 	radeon_acpi_fini(rdev);
77 
78 	radeon_modeset_fini(rdev);
79 	radeon_device_fini(rdev);
80 
81 	if (dev->agp)
82 		arch_phys_wc_del(dev->agp->agp_mtrr);
83 	kfree(dev->agp);
84 	dev->agp = NULL;
85 
86 done_free:
87 	kfree(rdev);
88 	dev->dev_private = NULL;
89 }
90 
91 /**
92  * radeon_driver_load_kms - Main load function for KMS.
93  *
94  * @dev: drm dev pointer
95  * @flags: device flags
96  *
97  * This is the main load function for KMS (all asics).
98  * It calls radeon_device_init() to set up the non-display
99  * parts of the chip (asic init, CP, writeback, etc.), and
100  * radeon_modeset_init() to set up the display parts
101  * (crtcs, encoders, hotplug detect, etc.).
102  * Returns 0 on success, error on failure.
103  */
104 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
105 {
106 	struct radeon_device *rdev;
107 	int r, acpi_status;
108 
109 	rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
110 	if (rdev == NULL) {
111 		return -ENOMEM;
112 	}
113 	dev->dev_private = (void *)rdev;
114 
115 	/* update BUS flag */
116 	if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) {
117 		flags |= RADEON_IS_AGP;
118 	} else if (pci_is_pcie(dev->pdev)) {
119 		flags |= RADEON_IS_PCIE;
120 	} else {
121 		flags |= RADEON_IS_PCI;
122 	}
123 
124 	if ((radeon_runtime_pm != 0) &&
125 	    radeon_has_atpx() &&
126 	    ((flags & RADEON_IS_IGP) == 0) &&
127 	    !pci_is_thunderbolt_attached(dev->pdev))
128 		flags |= RADEON_IS_PX;
129 
130 	/* radeon_device_init should report only fatal error
131 	 * like memory allocation failure or iomapping failure,
132 	 * or memory manager initialization failure, it must
133 	 * properly initialize the GPU MC controller and permit
134 	 * VRAM allocation
135 	 */
136 	r = radeon_device_init(rdev, dev, dev->pdev, flags);
137 	if (r) {
138 		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
139 		goto out;
140 	}
141 
142 	/* Again modeset_init should fail only on fatal error
143 	 * otherwise it should provide enough functionalities
144 	 * for shadowfb to run
145 	 */
146 	r = radeon_modeset_init(rdev);
147 	if (r)
148 		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
149 
150 	/* Call ACPI methods: require modeset init
151 	 * but failure is not fatal
152 	 */
153 	if (!r) {
154 		acpi_status = radeon_acpi_init(rdev);
155 		if (acpi_status)
156 		dev_dbg(&dev->pdev->dev,
157 				"Error during ACPI methods call\n");
158 	}
159 
160 	if (radeon_is_px(dev)) {
161 		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
162 		pm_runtime_use_autosuspend(dev->dev);
163 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
164 		pm_runtime_set_active(dev->dev);
165 		pm_runtime_allow(dev->dev);
166 		pm_runtime_mark_last_busy(dev->dev);
167 		pm_runtime_put_autosuspend(dev->dev);
168 	}
169 
170 out:
171 	if (r)
172 		radeon_driver_unload_kms(dev);
173 
174 
175 	return r;
176 }
177 
178 /**
179  * radeon_set_filp_rights - Set filp right.
180  *
181  * @dev: drm dev pointer
182  * @owner: drm file
183  * @applier: drm file
184  * @value: value
185  *
186  * Sets the filp rights for the device (all asics).
187  */
188 static void radeon_set_filp_rights(struct drm_device *dev,
189 				   struct drm_file **owner,
190 				   struct drm_file *applier,
191 				   uint32_t *value)
192 {
193 	struct radeon_device *rdev = dev->dev_private;
194 
195 	mutex_lock(&rdev->gem.mutex);
196 	if (*value == 1) {
197 		/* wants rights */
198 		if (!*owner)
199 			*owner = applier;
200 	} else if (*value == 0) {
201 		/* revokes rights */
202 		if (*owner == applier)
203 			*owner = NULL;
204 	}
205 	*value = *owner == applier ? 1 : 0;
206 	mutex_unlock(&rdev->gem.mutex);
207 }
208 
209 /*
210  * Userspace get information ioctl
211  */
212 /**
213  * radeon_info_ioctl - answer a device specific request.
214  *
215  * @rdev: radeon device pointer
216  * @data: request object
217  * @filp: drm filp
218  *
219  * This function is used to pass device specific parameters to the userspace
220  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
221  * etc. (all asics).
222  * Returns 0 on success, -EINVAL on failure.
223  */
224 static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
225 {
226 	struct radeon_device *rdev = dev->dev_private;
227 	struct drm_radeon_info *info = data;
228 	struct radeon_mode_info *minfo = &rdev->mode_info;
229 	uint32_t *value, value_tmp, *value_ptr, value_size;
230 	uint64_t value64;
231 	struct drm_crtc *crtc;
232 	int i, found;
233 
234 	value_ptr = (uint32_t *)((unsigned long)info->value);
235 	value = &value_tmp;
236 	value_size = sizeof(uint32_t);
237 
238 	switch (info->request) {
239 	case RADEON_INFO_DEVICE_ID:
240 		*value = dev->pdev->device;
241 		break;
242 	case RADEON_INFO_NUM_GB_PIPES:
243 		*value = rdev->num_gb_pipes;
244 		break;
245 	case RADEON_INFO_NUM_Z_PIPES:
246 		*value = rdev->num_z_pipes;
247 		break;
248 	case RADEON_INFO_ACCEL_WORKING:
249 		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
250 		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
251 			*value = false;
252 		else
253 			*value = rdev->accel_working;
254 		break;
255 	case RADEON_INFO_CRTC_FROM_ID:
256 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
257 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
258 			return -EFAULT;
259 		}
260 		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
261 			crtc = (struct drm_crtc *)minfo->crtcs[i];
262 			if (crtc && crtc->base.id == *value) {
263 				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
264 				*value = radeon_crtc->crtc_id;
265 				found = 1;
266 				break;
267 			}
268 		}
269 		if (!found) {
270 			DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
271 			return -EINVAL;
272 		}
273 		break;
274 	case RADEON_INFO_ACCEL_WORKING2:
275 		if (rdev->family == CHIP_HAWAII) {
276 			if (rdev->accel_working) {
277 				if (rdev->new_fw)
278 					*value = 3;
279 				else
280 					*value = 2;
281 			} else {
282 				*value = 0;
283 			}
284 		} else {
285 			*value = rdev->accel_working;
286 		}
287 		break;
288 	case RADEON_INFO_TILING_CONFIG:
289 		if (rdev->family >= CHIP_BONAIRE)
290 			*value = rdev->config.cik.tile_config;
291 		else if (rdev->family >= CHIP_TAHITI)
292 			*value = rdev->config.si.tile_config;
293 		else if (rdev->family >= CHIP_CAYMAN)
294 			*value = rdev->config.cayman.tile_config;
295 		else if (rdev->family >= CHIP_CEDAR)
296 			*value = rdev->config.evergreen.tile_config;
297 		else if (rdev->family >= CHIP_RV770)
298 			*value = rdev->config.rv770.tile_config;
299 		else if (rdev->family >= CHIP_R600)
300 			*value = rdev->config.r600.tile_config;
301 		else {
302 			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
303 			return -EINVAL;
304 		}
305 		break;
306 	case RADEON_INFO_WANT_HYPERZ:
307 		/* The "value" here is both an input and output parameter.
308 		 * If the input value is 1, filp requests hyper-z access.
309 		 * If the input value is 0, filp revokes its hyper-z access.
310 		 *
311 		 * When returning, the value is 1 if filp owns hyper-z access,
312 		 * 0 otherwise. */
313 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
314 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
315 			return -EFAULT;
316 		}
317 		if (*value >= 2) {
318 			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
319 			return -EINVAL;
320 		}
321 		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
322 		break;
323 	case RADEON_INFO_WANT_CMASK:
324 		/* The same logic as Hyper-Z. */
325 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
326 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
327 			return -EFAULT;
328 		}
329 		if (*value >= 2) {
330 			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
331 			return -EINVAL;
332 		}
333 		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
334 		break;
335 	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
336 		/* return clock value in KHz */
337 		if (rdev->asic->get_xclk)
338 			*value = radeon_get_xclk(rdev) * 10;
339 		else
340 			*value = rdev->clock.spll.reference_freq * 10;
341 		break;
342 	case RADEON_INFO_NUM_BACKENDS:
343 		if (rdev->family >= CHIP_BONAIRE)
344 			*value = rdev->config.cik.max_backends_per_se *
345 				rdev->config.cik.max_shader_engines;
346 		else if (rdev->family >= CHIP_TAHITI)
347 			*value = rdev->config.si.max_backends_per_se *
348 				rdev->config.si.max_shader_engines;
349 		else if (rdev->family >= CHIP_CAYMAN)
350 			*value = rdev->config.cayman.max_backends_per_se *
351 				rdev->config.cayman.max_shader_engines;
352 		else if (rdev->family >= CHIP_CEDAR)
353 			*value = rdev->config.evergreen.max_backends;
354 		else if (rdev->family >= CHIP_RV770)
355 			*value = rdev->config.rv770.max_backends;
356 		else if (rdev->family >= CHIP_R600)
357 			*value = rdev->config.r600.max_backends;
358 		else {
359 			return -EINVAL;
360 		}
361 		break;
362 	case RADEON_INFO_NUM_TILE_PIPES:
363 		if (rdev->family >= CHIP_BONAIRE)
364 			*value = rdev->config.cik.max_tile_pipes;
365 		else if (rdev->family >= CHIP_TAHITI)
366 			*value = rdev->config.si.max_tile_pipes;
367 		else if (rdev->family >= CHIP_CAYMAN)
368 			*value = rdev->config.cayman.max_tile_pipes;
369 		else if (rdev->family >= CHIP_CEDAR)
370 			*value = rdev->config.evergreen.max_tile_pipes;
371 		else if (rdev->family >= CHIP_RV770)
372 			*value = rdev->config.rv770.max_tile_pipes;
373 		else if (rdev->family >= CHIP_R600)
374 			*value = rdev->config.r600.max_tile_pipes;
375 		else {
376 			return -EINVAL;
377 		}
378 		break;
379 	case RADEON_INFO_FUSION_GART_WORKING:
380 		*value = 1;
381 		break;
382 	case RADEON_INFO_BACKEND_MAP:
383 		if (rdev->family >= CHIP_BONAIRE)
384 			*value = rdev->config.cik.backend_map;
385 		else if (rdev->family >= CHIP_TAHITI)
386 			*value = rdev->config.si.backend_map;
387 		else if (rdev->family >= CHIP_CAYMAN)
388 			*value = rdev->config.cayman.backend_map;
389 		else if (rdev->family >= CHIP_CEDAR)
390 			*value = rdev->config.evergreen.backend_map;
391 		else if (rdev->family >= CHIP_RV770)
392 			*value = rdev->config.rv770.backend_map;
393 		else if (rdev->family >= CHIP_R600)
394 			*value = rdev->config.r600.backend_map;
395 		else {
396 			return -EINVAL;
397 		}
398 		break;
399 	case RADEON_INFO_VA_START:
400 		/* this is where we report if vm is supported or not */
401 		if (rdev->family < CHIP_CAYMAN)
402 			return -EINVAL;
403 		*value = RADEON_VA_RESERVED_SIZE;
404 		break;
405 	case RADEON_INFO_IB_VM_MAX_SIZE:
406 		/* this is where we report if vm is supported or not */
407 		if (rdev->family < CHIP_CAYMAN)
408 			return -EINVAL;
409 		*value = RADEON_IB_VM_MAX_SIZE;
410 		break;
411 	case RADEON_INFO_MAX_PIPES:
412 		if (rdev->family >= CHIP_BONAIRE)
413 			*value = rdev->config.cik.max_cu_per_sh;
414 		else if (rdev->family >= CHIP_TAHITI)
415 			*value = rdev->config.si.max_cu_per_sh;
416 		else if (rdev->family >= CHIP_CAYMAN)
417 			*value = rdev->config.cayman.max_pipes_per_simd;
418 		else if (rdev->family >= CHIP_CEDAR)
419 			*value = rdev->config.evergreen.max_pipes;
420 		else if (rdev->family >= CHIP_RV770)
421 			*value = rdev->config.rv770.max_pipes;
422 		else if (rdev->family >= CHIP_R600)
423 			*value = rdev->config.r600.max_pipes;
424 		else {
425 			return -EINVAL;
426 		}
427 		break;
428 	case RADEON_INFO_TIMESTAMP:
429 		if (rdev->family < CHIP_R600) {
430 			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
431 			return -EINVAL;
432 		}
433 		value = (uint32_t*)&value64;
434 		value_size = sizeof(uint64_t);
435 		value64 = radeon_get_gpu_clock_counter(rdev);
436 		break;
437 	case RADEON_INFO_MAX_SE:
438 		if (rdev->family >= CHIP_BONAIRE)
439 			*value = rdev->config.cik.max_shader_engines;
440 		else if (rdev->family >= CHIP_TAHITI)
441 			*value = rdev->config.si.max_shader_engines;
442 		else if (rdev->family >= CHIP_CAYMAN)
443 			*value = rdev->config.cayman.max_shader_engines;
444 		else if (rdev->family >= CHIP_CEDAR)
445 			*value = rdev->config.evergreen.num_ses;
446 		else
447 			*value = 1;
448 		break;
449 	case RADEON_INFO_MAX_SH_PER_SE:
450 		if (rdev->family >= CHIP_BONAIRE)
451 			*value = rdev->config.cik.max_sh_per_se;
452 		else if (rdev->family >= CHIP_TAHITI)
453 			*value = rdev->config.si.max_sh_per_se;
454 		else
455 			return -EINVAL;
456 		break;
457 	case RADEON_INFO_FASTFB_WORKING:
458 		*value = rdev->fastfb_working;
459 		break;
460 	case RADEON_INFO_RING_WORKING:
461 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
462 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
463 			return -EFAULT;
464 		}
465 		switch (*value) {
466 		case RADEON_CS_RING_GFX:
467 		case RADEON_CS_RING_COMPUTE:
468 			*value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
469 			break;
470 		case RADEON_CS_RING_DMA:
471 			*value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
472 			*value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
473 			break;
474 		case RADEON_CS_RING_UVD:
475 			*value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
476 			break;
477 		case RADEON_CS_RING_VCE:
478 			*value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
479 			break;
480 		default:
481 			return -EINVAL;
482 		}
483 		break;
484 	case RADEON_INFO_SI_TILE_MODE_ARRAY:
485 		if (rdev->family >= CHIP_BONAIRE) {
486 			value = rdev->config.cik.tile_mode_array;
487 			value_size = sizeof(uint32_t)*32;
488 		} else if (rdev->family >= CHIP_TAHITI) {
489 			value = rdev->config.si.tile_mode_array;
490 			value_size = sizeof(uint32_t)*32;
491 		} else {
492 			DRM_DEBUG_KMS("tile mode array is si+ only!\n");
493 			return -EINVAL;
494 		}
495 		break;
496 	case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
497 		if (rdev->family >= CHIP_BONAIRE) {
498 			value = rdev->config.cik.macrotile_mode_array;
499 			value_size = sizeof(uint32_t)*16;
500 		} else {
501 			DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
502 			return -EINVAL;
503 		}
504 		break;
505 	case RADEON_INFO_SI_CP_DMA_COMPUTE:
506 		*value = 1;
507 		break;
508 	case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
509 		if (rdev->family >= CHIP_BONAIRE) {
510 			*value = rdev->config.cik.backend_enable_mask;
511 		} else if (rdev->family >= CHIP_TAHITI) {
512 			*value = rdev->config.si.backend_enable_mask;
513 		} else {
514 			DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
515 		}
516 		break;
517 	case RADEON_INFO_MAX_SCLK:
518 		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
519 		    rdev->pm.dpm_enabled)
520 			*value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
521 		else
522 			*value = rdev->pm.default_sclk * 10;
523 		break;
524 	case RADEON_INFO_VCE_FW_VERSION:
525 		*value = rdev->vce.fw_version;
526 		break;
527 	case RADEON_INFO_VCE_FB_VERSION:
528 		*value = rdev->vce.fb_version;
529 		break;
530 	case RADEON_INFO_NUM_BYTES_MOVED:
531 		value = (uint32_t*)&value64;
532 		value_size = sizeof(uint64_t);
533 		value64 = atomic64_read(&rdev->num_bytes_moved);
534 		break;
535 	case RADEON_INFO_VRAM_USAGE:
536 		value = (uint32_t*)&value64;
537 		value_size = sizeof(uint64_t);
538 		value64 = atomic64_read(&rdev->vram_usage);
539 		break;
540 	case RADEON_INFO_GTT_USAGE:
541 		value = (uint32_t*)&value64;
542 		value_size = sizeof(uint64_t);
543 		value64 = atomic64_read(&rdev->gtt_usage);
544 		break;
545 	case RADEON_INFO_ACTIVE_CU_COUNT:
546 		if (rdev->family >= CHIP_BONAIRE)
547 			*value = rdev->config.cik.active_cus;
548 		else if (rdev->family >= CHIP_TAHITI)
549 			*value = rdev->config.si.active_cus;
550 		else if (rdev->family >= CHIP_CAYMAN)
551 			*value = rdev->config.cayman.active_simds;
552 		else if (rdev->family >= CHIP_CEDAR)
553 			*value = rdev->config.evergreen.active_simds;
554 		else if (rdev->family >= CHIP_RV770)
555 			*value = rdev->config.rv770.active_simds;
556 		else if (rdev->family >= CHIP_R600)
557 			*value = rdev->config.r600.active_simds;
558 		else
559 			*value = 1;
560 		break;
561 	case RADEON_INFO_CURRENT_GPU_TEMP:
562 		/* get temperature in millidegrees C */
563 		if (rdev->asic->pm.get_temperature)
564 			*value = radeon_get_temperature(rdev);
565 		else
566 			*value = 0;
567 		break;
568 	case RADEON_INFO_CURRENT_GPU_SCLK:
569 		/* get sclk in Mhz */
570 		if (rdev->pm.dpm_enabled)
571 			*value = radeon_dpm_get_current_sclk(rdev) / 100;
572 		else
573 			*value = rdev->pm.current_sclk / 100;
574 		break;
575 	case RADEON_INFO_CURRENT_GPU_MCLK:
576 		/* get mclk in Mhz */
577 		if (rdev->pm.dpm_enabled)
578 			*value = radeon_dpm_get_current_mclk(rdev) / 100;
579 		else
580 			*value = rdev->pm.current_mclk / 100;
581 		break;
582 	case RADEON_INFO_READ_REG:
583 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
584 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
585 			return -EFAULT;
586 		}
587 		if (radeon_get_allowed_info_register(rdev, *value, value))
588 			return -EINVAL;
589 		break;
590 	case RADEON_INFO_VA_UNMAP_WORKING:
591 		*value = true;
592 		break;
593 	case RADEON_INFO_GPU_RESET_COUNTER:
594 		*value = atomic_read(&rdev->gpu_reset_counter);
595 		break;
596 	default:
597 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
598 		return -EINVAL;
599 	}
600 	if (copy_to_user(value_ptr, (char*)value, value_size)) {
601 		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
602 		return -EFAULT;
603 	}
604 	return 0;
605 }
606 
607 
608 /*
609  * Outdated mess for old drm with Xorg being in charge (void function now).
610  */
611 /**
612  * radeon_driver_lastclose_kms - drm callback for last close
613  *
614  * @dev: drm dev pointer
615  *
616  * Switch vga_switcheroo state after last close (all asics).
617  */
618 void radeon_driver_lastclose_kms(struct drm_device *dev)
619 {
620 	drm_fb_helper_lastclose(dev);
621 	vga_switcheroo_process_delayed_switch();
622 }
623 
624 /**
625  * radeon_driver_open_kms - drm callback for open
626  *
627  * @dev: drm dev pointer
628  * @file_priv: drm file
629  *
630  * On device open, init vm on cayman+ (all asics).
631  * Returns 0 on success, error on failure.
632  */
633 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
634 {
635 	struct radeon_device *rdev = dev->dev_private;
636 	int r;
637 
638 	file_priv->driver_priv = NULL;
639 
640 	r = pm_runtime_get_sync(dev->dev);
641 	if (r < 0)
642 		return r;
643 
644 	/* new gpu have virtual address space support */
645 	if (rdev->family >= CHIP_CAYMAN) {
646 		struct radeon_fpriv *fpriv;
647 		struct radeon_vm *vm;
648 
649 		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
650 		if (unlikely(!fpriv)) {
651 			r = -ENOMEM;
652 			goto out_suspend;
653 		}
654 
655 		if (rdev->accel_working) {
656 			vm = &fpriv->vm;
657 			r = radeon_vm_init(rdev, vm);
658 			if (r) {
659 				kfree(fpriv);
660 				goto out_suspend;
661 			}
662 
663 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
664 			if (r) {
665 				radeon_vm_fini(rdev, vm);
666 				kfree(fpriv);
667 				goto out_suspend;
668 			}
669 
670 			/* map the ib pool buffer read only into
671 			 * virtual address space */
672 			vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
673 							rdev->ring_tmp_bo.bo);
674 			r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
675 						  RADEON_VA_IB_OFFSET,
676 						  RADEON_VM_PAGE_READABLE |
677 						  RADEON_VM_PAGE_SNOOPED);
678 			if (r) {
679 				radeon_vm_fini(rdev, vm);
680 				kfree(fpriv);
681 				goto out_suspend;
682 			}
683 		}
684 		file_priv->driver_priv = fpriv;
685 	}
686 
687 out_suspend:
688 	pm_runtime_mark_last_busy(dev->dev);
689 	pm_runtime_put_autosuspend(dev->dev);
690 	return r;
691 }
692 
693 /**
694  * radeon_driver_postclose_kms - drm callback for post close
695  *
696  * @dev: drm dev pointer
697  * @file_priv: drm file
698  *
699  * On device close, tear down hyperz and cmask filps on r1xx-r5xx
700  * (all asics).  And tear down vm on cayman+ (all asics).
701  */
702 void radeon_driver_postclose_kms(struct drm_device *dev,
703 				 struct drm_file *file_priv)
704 {
705 	struct radeon_device *rdev = dev->dev_private;
706 
707 	pm_runtime_get_sync(dev->dev);
708 
709 	mutex_lock(&rdev->gem.mutex);
710 	if (rdev->hyperz_filp == file_priv)
711 		rdev->hyperz_filp = NULL;
712 	if (rdev->cmask_filp == file_priv)
713 		rdev->cmask_filp = NULL;
714 	mutex_unlock(&rdev->gem.mutex);
715 
716 	radeon_uvd_free_handles(rdev, file_priv);
717 	radeon_vce_free_handles(rdev, file_priv);
718 
719 	/* new gpu have virtual address space support */
720 	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
721 		struct radeon_fpriv *fpriv = file_priv->driver_priv;
722 		struct radeon_vm *vm = &fpriv->vm;
723 		int r;
724 
725 		if (rdev->accel_working) {
726 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
727 			if (!r) {
728 				if (vm->ib_bo_va)
729 					radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
730 				radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
731 			}
732 			radeon_vm_fini(rdev, vm);
733 		}
734 
735 		kfree(fpriv);
736 		file_priv->driver_priv = NULL;
737 	}
738 	pm_runtime_mark_last_busy(dev->dev);
739 	pm_runtime_put_autosuspend(dev->dev);
740 }
741 
742 /*
743  * VBlank related functions.
744  */
745 /**
746  * radeon_get_vblank_counter_kms - get frame count
747  *
748  * @crtc: crtc to get the frame count from
749  *
750  * Gets the frame count on the requested crtc (all asics).
751  * Returns frame count on success, -EINVAL on failure.
752  */
753 u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc)
754 {
755 	struct drm_device *dev = crtc->dev;
756 	unsigned int pipe = crtc->index;
757 	int vpos, hpos, stat;
758 	u32 count;
759 	struct radeon_device *rdev = dev->dev_private;
760 
761 	if (pipe >= rdev->num_crtc) {
762 		DRM_ERROR("Invalid crtc %u\n", pipe);
763 		return -EINVAL;
764 	}
765 
766 	/* The hw increments its frame counter at start of vsync, not at start
767 	 * of vblank, as is required by DRM core vblank counter handling.
768 	 * Cook the hw count here to make it appear to the caller as if it
769 	 * incremented at start of vblank. We measure distance to start of
770 	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
771 	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
772 	 * result by 1 to give the proper appearance to caller.
773 	 */
774 	if (rdev->mode_info.crtcs[pipe]) {
775 		/* Repeat readout if needed to provide stable result if
776 		 * we cross start of vsync during the queries.
777 		 */
778 		do {
779 			count = radeon_get_vblank_counter(rdev, pipe);
780 			/* Ask radeon_get_crtc_scanoutpos to return vpos as
781 			 * distance to start of vblank, instead of regular
782 			 * vertical scanout pos.
783 			 */
784 			stat = radeon_get_crtc_scanoutpos(
785 				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
786 				&vpos, &hpos, NULL, NULL,
787 				&rdev->mode_info.crtcs[pipe]->base.hwmode);
788 		} while (count != radeon_get_vblank_counter(rdev, pipe));
789 
790 		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
791 		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
792 			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
793 		}
794 		else {
795 			DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n",
796 				      pipe, vpos);
797 
798 			/* Bump counter if we are at >= leading edge of vblank,
799 			 * but before vsync where vpos would turn negative and
800 			 * the hw counter really increments.
801 			 */
802 			if (vpos >= 0)
803 				count++;
804 		}
805 	}
806 	else {
807 	    /* Fallback to use value as is. */
808 	    count = radeon_get_vblank_counter(rdev, pipe);
809 	    DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
810 	}
811 
812 	return count;
813 }
814 
815 /**
816  * radeon_enable_vblank_kms - enable vblank interrupt
817  *
818  * @crtc: crtc to enable vblank interrupt for
819  *
820  * Enable the interrupt on the requested crtc (all asics).
821  * Returns 0 on success, -EINVAL on failure.
822  */
823 int radeon_enable_vblank_kms(struct drm_crtc *crtc)
824 {
825 	struct drm_device *dev = crtc->dev;
826 	unsigned int pipe = crtc->index;
827 	struct radeon_device *rdev = dev->dev_private;
828 	unsigned long irqflags;
829 	int r;
830 
831 	if (pipe < 0 || pipe >= rdev->num_crtc) {
832 		DRM_ERROR("Invalid crtc %d\n", pipe);
833 		return -EINVAL;
834 	}
835 
836 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
837 	rdev->irq.crtc_vblank_int[pipe] = true;
838 	r = radeon_irq_set(rdev);
839 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
840 	return r;
841 }
842 
843 /**
844  * radeon_disable_vblank_kms - disable vblank interrupt
845  *
846  * @crtc: crtc to disable vblank interrupt for
847  *
848  * Disable the interrupt on the requested crtc (all asics).
849  */
850 void radeon_disable_vblank_kms(struct drm_crtc *crtc)
851 {
852 	struct drm_device *dev = crtc->dev;
853 	unsigned int pipe = crtc->index;
854 	struct radeon_device *rdev = dev->dev_private;
855 	unsigned long irqflags;
856 
857 	if (pipe < 0 || pipe >= rdev->num_crtc) {
858 		DRM_ERROR("Invalid crtc %d\n", pipe);
859 		return;
860 	}
861 
862 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
863 	rdev->irq.crtc_vblank_int[pipe] = false;
864 	radeon_irq_set(rdev);
865 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
866 }
867 
868 const struct drm_ioctl_desc radeon_ioctls_kms[] = {
869 	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
870 	DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
871 	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
872 	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
873 	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH),
874 	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH),
875 	DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH),
876 	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH),
877 	DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH),
878 	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH),
879 	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH),
880 	DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH),
881 	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH),
882 	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH),
883 	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
884 	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH),
885 	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH),
886 	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH),
887 	DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH),
888 	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH),
889 	DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH),
890 	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
891 	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH),
892 	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH),
893 	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH),
894 	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH),
895 	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH),
896 	/* KMS */
897 	DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
898 	DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
899 	DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
900 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
901 	DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
902 	DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
903 	DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
904 	DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
905 	DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
906 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
907 	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
908 	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
909 	DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
910 	DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
911 	DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
912 };
913 int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
914