1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include "drmP.h" 29 #include "drm_sarea.h" 30 #include "radeon.h" 31 #include "radeon_drm.h" 32 33 #include <linux/vga_switcheroo.h> 34 #include <linux/slab.h> 35 36 int radeon_driver_unload_kms(struct drm_device *dev) 37 { 38 struct radeon_device *rdev = dev->dev_private; 39 40 if (rdev == NULL) 41 return 0; 42 radeon_modeset_fini(rdev); 43 radeon_device_fini(rdev); 44 kfree(rdev); 45 dev->dev_private = NULL; 46 return 0; 47 } 48 49 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) 50 { 51 struct radeon_device *rdev; 52 int r, acpi_status; 53 54 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); 55 if (rdev == NULL) { 56 return -ENOMEM; 57 } 58 dev->dev_private = (void *)rdev; 59 60 pci_set_master(dev->pdev); 61 62 /* update BUS flag */ 63 if (drm_pci_device_is_agp(dev)) { 64 flags |= RADEON_IS_AGP; 65 } else if (pci_is_pcie(dev->pdev)) { 66 flags |= RADEON_IS_PCIE; 67 } else { 68 flags |= RADEON_IS_PCI; 69 } 70 71 /* radeon_device_init should report only fatal error 72 * like memory allocation failure or iomapping failure, 73 * or memory manager initialization failure, it must 74 * properly initialize the GPU MC controller and permit 75 * VRAM allocation 76 */ 77 r = radeon_device_init(rdev, dev, dev->pdev, flags); 78 if (r) { 79 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 80 goto out; 81 } 82 83 /* Call ACPI methods */ 84 acpi_status = radeon_acpi_init(rdev); 85 if (acpi_status) 86 dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); 87 88 /* Again modeset_init should fail only on fatal error 89 * otherwise it should provide enough functionalities 90 * for shadowfb to run 91 */ 92 r = radeon_modeset_init(rdev); 93 if (r) 94 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); 95 out: 96 if (r) 97 radeon_driver_unload_kms(dev); 98 return r; 99 } 100 101 static void radeon_set_filp_rights(struct drm_device *dev, 102 struct drm_file **owner, 103 struct drm_file *applier, 104 uint32_t *value) 105 { 106 mutex_lock(&dev->struct_mutex); 107 if (*value == 1) { 108 /* wants rights */ 109 if (!*owner) 110 *owner = applier; 111 } else if (*value == 0) { 112 /* revokes rights */ 113 if (*owner == applier) 114 *owner = NULL; 115 } 116 *value = *owner == applier ? 1 : 0; 117 mutex_unlock(&dev->struct_mutex); 118 } 119 120 /* 121 * Userspace get information ioctl 122 */ 123 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 124 { 125 struct radeon_device *rdev = dev->dev_private; 126 struct drm_radeon_info *info; 127 struct radeon_mode_info *minfo = &rdev->mode_info; 128 uint32_t *value_ptr; 129 uint32_t value; 130 struct drm_crtc *crtc; 131 int i, found; 132 133 info = data; 134 value_ptr = (uint32_t *)((unsigned long)info->value); 135 if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) 136 return -EFAULT; 137 138 switch (info->request) { 139 case RADEON_INFO_DEVICE_ID: 140 value = dev->pci_device; 141 break; 142 case RADEON_INFO_NUM_GB_PIPES: 143 value = rdev->num_gb_pipes; 144 break; 145 case RADEON_INFO_NUM_Z_PIPES: 146 value = rdev->num_z_pipes; 147 break; 148 case RADEON_INFO_ACCEL_WORKING: 149 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ 150 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) 151 value = false; 152 else 153 value = rdev->accel_working; 154 break; 155 case RADEON_INFO_CRTC_FROM_ID: 156 for (i = 0, found = 0; i < rdev->num_crtc; i++) { 157 crtc = (struct drm_crtc *)minfo->crtcs[i]; 158 if (crtc && crtc->base.id == value) { 159 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 160 value = radeon_crtc->crtc_id; 161 found = 1; 162 break; 163 } 164 } 165 if (!found) { 166 DRM_DEBUG_KMS("unknown crtc id %d\n", value); 167 return -EINVAL; 168 } 169 break; 170 case RADEON_INFO_ACCEL_WORKING2: 171 value = rdev->accel_working; 172 break; 173 case RADEON_INFO_TILING_CONFIG: 174 if (rdev->family >= CHIP_TAHITI) 175 value = rdev->config.si.tile_config; 176 else if (rdev->family >= CHIP_CAYMAN) 177 value = rdev->config.cayman.tile_config; 178 else if (rdev->family >= CHIP_CEDAR) 179 value = rdev->config.evergreen.tile_config; 180 else if (rdev->family >= CHIP_RV770) 181 value = rdev->config.rv770.tile_config; 182 else if (rdev->family >= CHIP_R600) 183 value = rdev->config.r600.tile_config; 184 else { 185 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); 186 return -EINVAL; 187 } 188 break; 189 case RADEON_INFO_WANT_HYPERZ: 190 /* The "value" here is both an input and output parameter. 191 * If the input value is 1, filp requests hyper-z access. 192 * If the input value is 0, filp revokes its hyper-z access. 193 * 194 * When returning, the value is 1 if filp owns hyper-z access, 195 * 0 otherwise. */ 196 if (value >= 2) { 197 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value); 198 return -EINVAL; 199 } 200 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value); 201 break; 202 case RADEON_INFO_WANT_CMASK: 203 /* The same logic as Hyper-Z. */ 204 if (value >= 2) { 205 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value); 206 return -EINVAL; 207 } 208 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); 209 break; 210 case RADEON_INFO_CLOCK_CRYSTAL_FREQ: 211 /* return clock value in KHz */ 212 value = rdev->clock.spll.reference_freq * 10; 213 break; 214 case RADEON_INFO_NUM_BACKENDS: 215 if (rdev->family >= CHIP_TAHITI) 216 value = rdev->config.si.max_backends_per_se * 217 rdev->config.si.max_shader_engines; 218 else if (rdev->family >= CHIP_CAYMAN) 219 value = rdev->config.cayman.max_backends_per_se * 220 rdev->config.cayman.max_shader_engines; 221 else if (rdev->family >= CHIP_CEDAR) 222 value = rdev->config.evergreen.max_backends; 223 else if (rdev->family >= CHIP_RV770) 224 value = rdev->config.rv770.max_backends; 225 else if (rdev->family >= CHIP_R600) 226 value = rdev->config.r600.max_backends; 227 else { 228 return -EINVAL; 229 } 230 break; 231 case RADEON_INFO_NUM_TILE_PIPES: 232 if (rdev->family >= CHIP_TAHITI) 233 value = rdev->config.si.max_tile_pipes; 234 else if (rdev->family >= CHIP_CAYMAN) 235 value = rdev->config.cayman.max_tile_pipes; 236 else if (rdev->family >= CHIP_CEDAR) 237 value = rdev->config.evergreen.max_tile_pipes; 238 else if (rdev->family >= CHIP_RV770) 239 value = rdev->config.rv770.max_tile_pipes; 240 else if (rdev->family >= CHIP_R600) 241 value = rdev->config.r600.max_tile_pipes; 242 else { 243 return -EINVAL; 244 } 245 break; 246 case RADEON_INFO_FUSION_GART_WORKING: 247 value = 1; 248 break; 249 case RADEON_INFO_BACKEND_MAP: 250 if (rdev->family >= CHIP_TAHITI) 251 value = rdev->config.si.backend_map; 252 else if (rdev->family >= CHIP_CAYMAN) 253 value = rdev->config.cayman.backend_map; 254 else if (rdev->family >= CHIP_CEDAR) 255 value = rdev->config.evergreen.backend_map; 256 else if (rdev->family >= CHIP_RV770) 257 value = rdev->config.rv770.backend_map; 258 else if (rdev->family >= CHIP_R600) 259 value = rdev->config.r600.backend_map; 260 else { 261 return -EINVAL; 262 } 263 break; 264 case RADEON_INFO_VA_START: 265 /* this is where we report if vm is supported or not */ 266 if (rdev->family < CHIP_CAYMAN) 267 return -EINVAL; 268 value = RADEON_VA_RESERVED_SIZE; 269 break; 270 case RADEON_INFO_IB_VM_MAX_SIZE: 271 /* this is where we report if vm is supported or not */ 272 if (rdev->family < CHIP_CAYMAN) 273 return -EINVAL; 274 value = RADEON_IB_VM_MAX_SIZE; 275 break; 276 case RADEON_INFO_MAX_PIPES: 277 if (rdev->family >= CHIP_TAHITI) 278 value = rdev->config.si.max_pipes_per_simd; 279 else if (rdev->family >= CHIP_CAYMAN) 280 value = rdev->config.cayman.max_pipes_per_simd; 281 else if (rdev->family >= CHIP_CEDAR) 282 value = rdev->config.evergreen.max_pipes; 283 else if (rdev->family >= CHIP_RV770) 284 value = rdev->config.rv770.max_pipes; 285 else if (rdev->family >= CHIP_R600) 286 value = rdev->config.r600.max_pipes; 287 else { 288 return -EINVAL; 289 } 290 break; 291 default: 292 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 293 return -EINVAL; 294 } 295 if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) { 296 DRM_ERROR("copy_to_user\n"); 297 return -EFAULT; 298 } 299 return 0; 300 } 301 302 303 /* 304 * Outdated mess for old drm with Xorg being in charge (void function now). 305 */ 306 int radeon_driver_firstopen_kms(struct drm_device *dev) 307 { 308 return 0; 309 } 310 311 void radeon_driver_lastclose_kms(struct drm_device *dev) 312 { 313 vga_switcheroo_process_delayed_switch(); 314 } 315 316 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 317 { 318 struct radeon_device *rdev = dev->dev_private; 319 320 file_priv->driver_priv = NULL; 321 322 /* new gpu have virtual address space support */ 323 if (rdev->family >= CHIP_CAYMAN) { 324 struct radeon_fpriv *fpriv; 325 int r; 326 327 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 328 if (unlikely(!fpriv)) { 329 return -ENOMEM; 330 } 331 332 r = radeon_vm_init(rdev, &fpriv->vm); 333 if (r) { 334 radeon_vm_fini(rdev, &fpriv->vm); 335 kfree(fpriv); 336 return r; 337 } 338 339 file_priv->driver_priv = fpriv; 340 } 341 return 0; 342 } 343 344 void radeon_driver_postclose_kms(struct drm_device *dev, 345 struct drm_file *file_priv) 346 { 347 struct radeon_device *rdev = dev->dev_private; 348 349 /* new gpu have virtual address space support */ 350 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 351 struct radeon_fpriv *fpriv = file_priv->driver_priv; 352 353 radeon_vm_fini(rdev, &fpriv->vm); 354 kfree(fpriv); 355 file_priv->driver_priv = NULL; 356 } 357 } 358 359 void radeon_driver_preclose_kms(struct drm_device *dev, 360 struct drm_file *file_priv) 361 { 362 struct radeon_device *rdev = dev->dev_private; 363 if (rdev->hyperz_filp == file_priv) 364 rdev->hyperz_filp = NULL; 365 if (rdev->cmask_filp == file_priv) 366 rdev->cmask_filp = NULL; 367 } 368 369 /* 370 * VBlank related functions. 371 */ 372 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) 373 { 374 struct radeon_device *rdev = dev->dev_private; 375 376 if (crtc < 0 || crtc >= rdev->num_crtc) { 377 DRM_ERROR("Invalid crtc %d\n", crtc); 378 return -EINVAL; 379 } 380 381 return radeon_get_vblank_counter(rdev, crtc); 382 } 383 384 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) 385 { 386 struct radeon_device *rdev = dev->dev_private; 387 388 if (crtc < 0 || crtc >= rdev->num_crtc) { 389 DRM_ERROR("Invalid crtc %d\n", crtc); 390 return -EINVAL; 391 } 392 393 rdev->irq.crtc_vblank_int[crtc] = true; 394 395 return radeon_irq_set(rdev); 396 } 397 398 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) 399 { 400 struct radeon_device *rdev = dev->dev_private; 401 402 if (crtc < 0 || crtc >= rdev->num_crtc) { 403 DRM_ERROR("Invalid crtc %d\n", crtc); 404 return; 405 } 406 407 rdev->irq.crtc_vblank_int[crtc] = false; 408 409 radeon_irq_set(rdev); 410 } 411 412 int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, 413 int *max_error, 414 struct timeval *vblank_time, 415 unsigned flags) 416 { 417 struct drm_crtc *drmcrtc; 418 struct radeon_device *rdev = dev->dev_private; 419 420 if (crtc < 0 || crtc >= dev->num_crtcs) { 421 DRM_ERROR("Invalid crtc %d\n", crtc); 422 return -EINVAL; 423 } 424 425 /* Get associated drm_crtc: */ 426 drmcrtc = &rdev->mode_info.crtcs[crtc]->base; 427 428 /* Helper routine in DRM core does all the work: */ 429 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 430 vblank_time, flags, 431 drmcrtc); 432 } 433 434 /* 435 * IOCTL. 436 */ 437 int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, 438 struct drm_file *file_priv) 439 { 440 /* Not valid in KMS. */ 441 return -EINVAL; 442 } 443 444 #define KMS_INVALID_IOCTL(name) \ 445 int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\ 446 { \ 447 DRM_ERROR("invalid ioctl with kms %s\n", __func__); \ 448 return -EINVAL; \ 449 } 450 451 /* 452 * All these ioctls are invalid in kms world. 453 */ 454 KMS_INVALID_IOCTL(radeon_cp_init_kms) 455 KMS_INVALID_IOCTL(radeon_cp_start_kms) 456 KMS_INVALID_IOCTL(radeon_cp_stop_kms) 457 KMS_INVALID_IOCTL(radeon_cp_reset_kms) 458 KMS_INVALID_IOCTL(radeon_cp_idle_kms) 459 KMS_INVALID_IOCTL(radeon_cp_resume_kms) 460 KMS_INVALID_IOCTL(radeon_engine_reset_kms) 461 KMS_INVALID_IOCTL(radeon_fullscreen_kms) 462 KMS_INVALID_IOCTL(radeon_cp_swap_kms) 463 KMS_INVALID_IOCTL(radeon_cp_clear_kms) 464 KMS_INVALID_IOCTL(radeon_cp_vertex_kms) 465 KMS_INVALID_IOCTL(radeon_cp_indices_kms) 466 KMS_INVALID_IOCTL(radeon_cp_texture_kms) 467 KMS_INVALID_IOCTL(radeon_cp_stipple_kms) 468 KMS_INVALID_IOCTL(radeon_cp_indirect_kms) 469 KMS_INVALID_IOCTL(radeon_cp_vertex2_kms) 470 KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms) 471 KMS_INVALID_IOCTL(radeon_cp_getparam_kms) 472 KMS_INVALID_IOCTL(radeon_cp_flip_kms) 473 KMS_INVALID_IOCTL(radeon_mem_alloc_kms) 474 KMS_INVALID_IOCTL(radeon_mem_free_kms) 475 KMS_INVALID_IOCTL(radeon_mem_init_heap_kms) 476 KMS_INVALID_IOCTL(radeon_irq_emit_kms) 477 KMS_INVALID_IOCTL(radeon_irq_wait_kms) 478 KMS_INVALID_IOCTL(radeon_cp_setparam_kms) 479 KMS_INVALID_IOCTL(radeon_surface_alloc_kms) 480 KMS_INVALID_IOCTL(radeon_surface_free_kms) 481 482 483 struct drm_ioctl_desc radeon_ioctls_kms[] = { 484 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 485 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 486 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 487 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 488 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), 489 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), 490 DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), 491 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), 492 DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), 493 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), 494 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), 495 DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), 496 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), 497 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), 498 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 499 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), 500 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), 501 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), 502 DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), 503 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), 504 DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), 505 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 506 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), 507 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), 508 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), 509 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), 510 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), 511 /* KMS */ 512 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 513 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), 514 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), 515 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), 516 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), 517 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), 518 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), 519 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), 520 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 521 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 522 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 523 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 524 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED), 525 }; 526 int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 527