1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include "radeon.h" 30 #include <drm/radeon_drm.h> 31 #include "radeon_asic.h" 32 33 #include <linux/vga_switcheroo.h> 34 #include <linux/slab.h> 35 36 /** 37 * radeon_driver_unload_kms - Main unload function for KMS. 38 * 39 * @dev: drm dev pointer 40 * 41 * This is the main unload function for KMS (all asics). 42 * It calls radeon_modeset_fini() to tear down the 43 * displays, and radeon_device_fini() to tear down 44 * the rest of the device (CP, writeback, etc.). 45 * Returns 0 on success. 46 */ 47 int radeon_driver_unload_kms(struct drm_device *dev) 48 { 49 struct radeon_device *rdev = dev->dev_private; 50 51 if (rdev == NULL) 52 return 0; 53 radeon_acpi_fini(rdev); 54 radeon_modeset_fini(rdev); 55 radeon_device_fini(rdev); 56 kfree(rdev); 57 dev->dev_private = NULL; 58 return 0; 59 } 60 61 /** 62 * radeon_driver_load_kms - Main load function for KMS. 63 * 64 * @dev: drm dev pointer 65 * @flags: device flags 66 * 67 * This is the main load function for KMS (all asics). 68 * It calls radeon_device_init() to set up the non-display 69 * parts of the chip (asic init, CP, writeback, etc.), and 70 * radeon_modeset_init() to set up the display parts 71 * (crtcs, encoders, hotplug detect, etc.). 72 * Returns 0 on success, error on failure. 73 */ 74 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) 75 { 76 struct radeon_device *rdev; 77 int r, acpi_status; 78 79 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); 80 if (rdev == NULL) { 81 return -ENOMEM; 82 } 83 dev->dev_private = (void *)rdev; 84 85 /* update BUS flag */ 86 if (drm_pci_device_is_agp(dev)) { 87 flags |= RADEON_IS_AGP; 88 } else if (pci_is_pcie(dev->pdev)) { 89 flags |= RADEON_IS_PCIE; 90 } else { 91 flags |= RADEON_IS_PCI; 92 } 93 94 /* radeon_device_init should report only fatal error 95 * like memory allocation failure or iomapping failure, 96 * or memory manager initialization failure, it must 97 * properly initialize the GPU MC controller and permit 98 * VRAM allocation 99 */ 100 r = radeon_device_init(rdev, dev, dev->pdev, flags); 101 if (r) { 102 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 103 goto out; 104 } 105 106 /* Again modeset_init should fail only on fatal error 107 * otherwise it should provide enough functionalities 108 * for shadowfb to run 109 */ 110 r = radeon_modeset_init(rdev); 111 if (r) 112 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); 113 114 /* Call ACPI methods: require modeset init 115 * but failure is not fatal 116 */ 117 if (!r) { 118 acpi_status = radeon_acpi_init(rdev); 119 if (acpi_status) 120 dev_dbg(&dev->pdev->dev, 121 "Error during ACPI methods call\n"); 122 } 123 124 out: 125 if (r) 126 radeon_driver_unload_kms(dev); 127 return r; 128 } 129 130 /** 131 * radeon_set_filp_rights - Set filp right. 132 * 133 * @dev: drm dev pointer 134 * @owner: drm file 135 * @applier: drm file 136 * @value: value 137 * 138 * Sets the filp rights for the device (all asics). 139 */ 140 static void radeon_set_filp_rights(struct drm_device *dev, 141 struct drm_file **owner, 142 struct drm_file *applier, 143 uint32_t *value) 144 { 145 mutex_lock(&dev->struct_mutex); 146 if (*value == 1) { 147 /* wants rights */ 148 if (!*owner) 149 *owner = applier; 150 } else if (*value == 0) { 151 /* revokes rights */ 152 if (*owner == applier) 153 *owner = NULL; 154 } 155 *value = *owner == applier ? 1 : 0; 156 mutex_unlock(&dev->struct_mutex); 157 } 158 159 /* 160 * Userspace get information ioctl 161 */ 162 /** 163 * radeon_info_ioctl - answer a device specific request. 164 * 165 * @rdev: radeon device pointer 166 * @data: request object 167 * @filp: drm filp 168 * 169 * This function is used to pass device specific parameters to the userspace 170 * drivers. Examples include: pci device id, pipeline parms, tiling params, 171 * etc. (all asics). 172 * Returns 0 on success, -EINVAL on failure. 173 */ 174 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 175 { 176 struct radeon_device *rdev = dev->dev_private; 177 struct drm_radeon_info *info = data; 178 struct radeon_mode_info *minfo = &rdev->mode_info; 179 uint32_t value, *value_ptr; 180 uint64_t value64, *value_ptr64; 181 struct drm_crtc *crtc; 182 int i, found; 183 184 /* TIMESTAMP is a 64-bit value, needs special handling. */ 185 if (info->request == RADEON_INFO_TIMESTAMP) { 186 if (rdev->family >= CHIP_R600) { 187 value_ptr64 = (uint64_t*)((unsigned long)info->value); 188 value64 = radeon_get_gpu_clock_counter(rdev); 189 190 if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) { 191 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); 192 return -EFAULT; 193 } 194 return 0; 195 } else { 196 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n"); 197 return -EINVAL; 198 } 199 } 200 201 value_ptr = (uint32_t *)((unsigned long)info->value); 202 if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) { 203 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 204 return -EFAULT; 205 } 206 207 switch (info->request) { 208 case RADEON_INFO_DEVICE_ID: 209 value = dev->pci_device; 210 break; 211 case RADEON_INFO_NUM_GB_PIPES: 212 value = rdev->num_gb_pipes; 213 break; 214 case RADEON_INFO_NUM_Z_PIPES: 215 value = rdev->num_z_pipes; 216 break; 217 case RADEON_INFO_ACCEL_WORKING: 218 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ 219 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) 220 value = false; 221 else 222 value = rdev->accel_working; 223 break; 224 case RADEON_INFO_CRTC_FROM_ID: 225 for (i = 0, found = 0; i < rdev->num_crtc; i++) { 226 crtc = (struct drm_crtc *)minfo->crtcs[i]; 227 if (crtc && crtc->base.id == value) { 228 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 229 value = radeon_crtc->crtc_id; 230 found = 1; 231 break; 232 } 233 } 234 if (!found) { 235 DRM_DEBUG_KMS("unknown crtc id %d\n", value); 236 return -EINVAL; 237 } 238 break; 239 case RADEON_INFO_ACCEL_WORKING2: 240 value = rdev->accel_working; 241 break; 242 case RADEON_INFO_TILING_CONFIG: 243 if (rdev->family >= CHIP_TAHITI) 244 value = rdev->config.si.tile_config; 245 else if (rdev->family >= CHIP_CAYMAN) 246 value = rdev->config.cayman.tile_config; 247 else if (rdev->family >= CHIP_CEDAR) 248 value = rdev->config.evergreen.tile_config; 249 else if (rdev->family >= CHIP_RV770) 250 value = rdev->config.rv770.tile_config; 251 else if (rdev->family >= CHIP_R600) 252 value = rdev->config.r600.tile_config; 253 else { 254 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); 255 return -EINVAL; 256 } 257 break; 258 case RADEON_INFO_WANT_HYPERZ: 259 /* The "value" here is both an input and output parameter. 260 * If the input value is 1, filp requests hyper-z access. 261 * If the input value is 0, filp revokes its hyper-z access. 262 * 263 * When returning, the value is 1 if filp owns hyper-z access, 264 * 0 otherwise. */ 265 if (value >= 2) { 266 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value); 267 return -EINVAL; 268 } 269 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value); 270 break; 271 case RADEON_INFO_WANT_CMASK: 272 /* The same logic as Hyper-Z. */ 273 if (value >= 2) { 274 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value); 275 return -EINVAL; 276 } 277 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); 278 break; 279 case RADEON_INFO_CLOCK_CRYSTAL_FREQ: 280 /* return clock value in KHz */ 281 if (rdev->asic->get_xclk) 282 value = radeon_get_xclk(rdev) * 10; 283 else 284 value = rdev->clock.spll.reference_freq * 10; 285 break; 286 case RADEON_INFO_NUM_BACKENDS: 287 if (rdev->family >= CHIP_TAHITI) 288 value = rdev->config.si.max_backends_per_se * 289 rdev->config.si.max_shader_engines; 290 else if (rdev->family >= CHIP_CAYMAN) 291 value = rdev->config.cayman.max_backends_per_se * 292 rdev->config.cayman.max_shader_engines; 293 else if (rdev->family >= CHIP_CEDAR) 294 value = rdev->config.evergreen.max_backends; 295 else if (rdev->family >= CHIP_RV770) 296 value = rdev->config.rv770.max_backends; 297 else if (rdev->family >= CHIP_R600) 298 value = rdev->config.r600.max_backends; 299 else { 300 return -EINVAL; 301 } 302 break; 303 case RADEON_INFO_NUM_TILE_PIPES: 304 if (rdev->family >= CHIP_TAHITI) 305 value = rdev->config.si.max_tile_pipes; 306 else if (rdev->family >= CHIP_CAYMAN) 307 value = rdev->config.cayman.max_tile_pipes; 308 else if (rdev->family >= CHIP_CEDAR) 309 value = rdev->config.evergreen.max_tile_pipes; 310 else if (rdev->family >= CHIP_RV770) 311 value = rdev->config.rv770.max_tile_pipes; 312 else if (rdev->family >= CHIP_R600) 313 value = rdev->config.r600.max_tile_pipes; 314 else { 315 return -EINVAL; 316 } 317 break; 318 case RADEON_INFO_FUSION_GART_WORKING: 319 value = 1; 320 break; 321 case RADEON_INFO_BACKEND_MAP: 322 if (rdev->family >= CHIP_TAHITI) 323 value = rdev->config.si.backend_map; 324 else if (rdev->family >= CHIP_CAYMAN) 325 value = rdev->config.cayman.backend_map; 326 else if (rdev->family >= CHIP_CEDAR) 327 value = rdev->config.evergreen.backend_map; 328 else if (rdev->family >= CHIP_RV770) 329 value = rdev->config.rv770.backend_map; 330 else if (rdev->family >= CHIP_R600) 331 value = rdev->config.r600.backend_map; 332 else { 333 return -EINVAL; 334 } 335 break; 336 case RADEON_INFO_VA_START: 337 /* this is where we report if vm is supported or not */ 338 if (rdev->family < CHIP_CAYMAN) 339 return -EINVAL; 340 value = RADEON_VA_RESERVED_SIZE; 341 break; 342 case RADEON_INFO_IB_VM_MAX_SIZE: 343 /* this is where we report if vm is supported or not */ 344 if (rdev->family < CHIP_CAYMAN) 345 return -EINVAL; 346 value = RADEON_IB_VM_MAX_SIZE; 347 break; 348 case RADEON_INFO_MAX_PIPES: 349 if (rdev->family >= CHIP_TAHITI) 350 value = rdev->config.si.max_cu_per_sh; 351 else if (rdev->family >= CHIP_CAYMAN) 352 value = rdev->config.cayman.max_pipes_per_simd; 353 else if (rdev->family >= CHIP_CEDAR) 354 value = rdev->config.evergreen.max_pipes; 355 else if (rdev->family >= CHIP_RV770) 356 value = rdev->config.rv770.max_pipes; 357 else if (rdev->family >= CHIP_R600) 358 value = rdev->config.r600.max_pipes; 359 else { 360 return -EINVAL; 361 } 362 break; 363 case RADEON_INFO_MAX_SE: 364 if (rdev->family >= CHIP_TAHITI) 365 value = rdev->config.si.max_shader_engines; 366 else if (rdev->family >= CHIP_CAYMAN) 367 value = rdev->config.cayman.max_shader_engines; 368 else if (rdev->family >= CHIP_CEDAR) 369 value = rdev->config.evergreen.num_ses; 370 else 371 value = 1; 372 break; 373 case RADEON_INFO_MAX_SH_PER_SE: 374 if (rdev->family >= CHIP_TAHITI) 375 value = rdev->config.si.max_sh_per_se; 376 else 377 return -EINVAL; 378 break; 379 default: 380 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 381 return -EINVAL; 382 } 383 if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) { 384 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); 385 return -EFAULT; 386 } 387 return 0; 388 } 389 390 391 /* 392 * Outdated mess for old drm with Xorg being in charge (void function now). 393 */ 394 /** 395 * radeon_driver_firstopen_kms - drm callback for first open 396 * 397 * @dev: drm dev pointer 398 * 399 * Nothing to be done for KMS (all asics). 400 * Returns 0 on success. 401 */ 402 int radeon_driver_firstopen_kms(struct drm_device *dev) 403 { 404 return 0; 405 } 406 407 /** 408 * radeon_driver_firstopen_kms - drm callback for last close 409 * 410 * @dev: drm dev pointer 411 * 412 * Switch vga switcheroo state after last close (all asics). 413 */ 414 void radeon_driver_lastclose_kms(struct drm_device *dev) 415 { 416 vga_switcheroo_process_delayed_switch(); 417 } 418 419 /** 420 * radeon_driver_open_kms - drm callback for open 421 * 422 * @dev: drm dev pointer 423 * @file_priv: drm file 424 * 425 * On device open, init vm on cayman+ (all asics). 426 * Returns 0 on success, error on failure. 427 */ 428 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 429 { 430 struct radeon_device *rdev = dev->dev_private; 431 432 file_priv->driver_priv = NULL; 433 434 /* new gpu have virtual address space support */ 435 if (rdev->family >= CHIP_CAYMAN) { 436 struct radeon_fpriv *fpriv; 437 struct radeon_bo_va *bo_va; 438 int r; 439 440 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 441 if (unlikely(!fpriv)) { 442 return -ENOMEM; 443 } 444 445 radeon_vm_init(rdev, &fpriv->vm); 446 447 /* map the ib pool buffer read only into 448 * virtual address space */ 449 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, 450 rdev->ring_tmp_bo.bo); 451 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, 452 RADEON_VM_PAGE_READABLE | 453 RADEON_VM_PAGE_SNOOPED); 454 if (r) { 455 radeon_vm_fini(rdev, &fpriv->vm); 456 kfree(fpriv); 457 return r; 458 } 459 460 file_priv->driver_priv = fpriv; 461 } 462 return 0; 463 } 464 465 /** 466 * radeon_driver_postclose_kms - drm callback for post close 467 * 468 * @dev: drm dev pointer 469 * @file_priv: drm file 470 * 471 * On device post close, tear down vm on cayman+ (all asics). 472 */ 473 void radeon_driver_postclose_kms(struct drm_device *dev, 474 struct drm_file *file_priv) 475 { 476 struct radeon_device *rdev = dev->dev_private; 477 478 /* new gpu have virtual address space support */ 479 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 480 struct radeon_fpriv *fpriv = file_priv->driver_priv; 481 struct radeon_bo_va *bo_va; 482 int r; 483 484 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 485 if (!r) { 486 bo_va = radeon_vm_bo_find(&fpriv->vm, 487 rdev->ring_tmp_bo.bo); 488 if (bo_va) 489 radeon_vm_bo_rmv(rdev, bo_va); 490 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 491 } 492 493 radeon_vm_fini(rdev, &fpriv->vm); 494 kfree(fpriv); 495 file_priv->driver_priv = NULL; 496 } 497 } 498 499 /** 500 * radeon_driver_preclose_kms - drm callback for pre close 501 * 502 * @dev: drm dev pointer 503 * @file_priv: drm file 504 * 505 * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx 506 * (all asics). 507 */ 508 void radeon_driver_preclose_kms(struct drm_device *dev, 509 struct drm_file *file_priv) 510 { 511 struct radeon_device *rdev = dev->dev_private; 512 if (rdev->hyperz_filp == file_priv) 513 rdev->hyperz_filp = NULL; 514 if (rdev->cmask_filp == file_priv) 515 rdev->cmask_filp = NULL; 516 } 517 518 /* 519 * VBlank related functions. 520 */ 521 /** 522 * radeon_get_vblank_counter_kms - get frame count 523 * 524 * @dev: drm dev pointer 525 * @crtc: crtc to get the frame count from 526 * 527 * Gets the frame count on the requested crtc (all asics). 528 * Returns frame count on success, -EINVAL on failure. 529 */ 530 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) 531 { 532 struct radeon_device *rdev = dev->dev_private; 533 534 if (crtc < 0 || crtc >= rdev->num_crtc) { 535 DRM_ERROR("Invalid crtc %d\n", crtc); 536 return -EINVAL; 537 } 538 539 return radeon_get_vblank_counter(rdev, crtc); 540 } 541 542 /** 543 * radeon_enable_vblank_kms - enable vblank interrupt 544 * 545 * @dev: drm dev pointer 546 * @crtc: crtc to enable vblank interrupt for 547 * 548 * Enable the interrupt on the requested crtc (all asics). 549 * Returns 0 on success, -EINVAL on failure. 550 */ 551 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) 552 { 553 struct radeon_device *rdev = dev->dev_private; 554 unsigned long irqflags; 555 int r; 556 557 if (crtc < 0 || crtc >= rdev->num_crtc) { 558 DRM_ERROR("Invalid crtc %d\n", crtc); 559 return -EINVAL; 560 } 561 562 spin_lock_irqsave(&rdev->irq.lock, irqflags); 563 rdev->irq.crtc_vblank_int[crtc] = true; 564 r = radeon_irq_set(rdev); 565 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 566 return r; 567 } 568 569 /** 570 * radeon_disable_vblank_kms - disable vblank interrupt 571 * 572 * @dev: drm dev pointer 573 * @crtc: crtc to disable vblank interrupt for 574 * 575 * Disable the interrupt on the requested crtc (all asics). 576 */ 577 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) 578 { 579 struct radeon_device *rdev = dev->dev_private; 580 unsigned long irqflags; 581 582 if (crtc < 0 || crtc >= rdev->num_crtc) { 583 DRM_ERROR("Invalid crtc %d\n", crtc); 584 return; 585 } 586 587 spin_lock_irqsave(&rdev->irq.lock, irqflags); 588 rdev->irq.crtc_vblank_int[crtc] = false; 589 radeon_irq_set(rdev); 590 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 591 } 592 593 /** 594 * radeon_get_vblank_timestamp_kms - get vblank timestamp 595 * 596 * @dev: drm dev pointer 597 * @crtc: crtc to get the timestamp for 598 * @max_error: max error 599 * @vblank_time: time value 600 * @flags: flags passed to the driver 601 * 602 * Gets the timestamp on the requested crtc based on the 603 * scanout position. (all asics). 604 * Returns postive status flags on success, negative error on failure. 605 */ 606 int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, 607 int *max_error, 608 struct timeval *vblank_time, 609 unsigned flags) 610 { 611 struct drm_crtc *drmcrtc; 612 struct radeon_device *rdev = dev->dev_private; 613 614 if (crtc < 0 || crtc >= dev->num_crtcs) { 615 DRM_ERROR("Invalid crtc %d\n", crtc); 616 return -EINVAL; 617 } 618 619 /* Get associated drm_crtc: */ 620 drmcrtc = &rdev->mode_info.crtcs[crtc]->base; 621 622 /* Helper routine in DRM core does all the work: */ 623 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 624 vblank_time, flags, 625 drmcrtc); 626 } 627 628 /* 629 * IOCTL. 630 */ 631 int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, 632 struct drm_file *file_priv) 633 { 634 /* Not valid in KMS. */ 635 return -EINVAL; 636 } 637 638 #define KMS_INVALID_IOCTL(name) \ 639 int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\ 640 { \ 641 DRM_ERROR("invalid ioctl with kms %s\n", __func__); \ 642 return -EINVAL; \ 643 } 644 645 /* 646 * All these ioctls are invalid in kms world. 647 */ 648 KMS_INVALID_IOCTL(radeon_cp_init_kms) 649 KMS_INVALID_IOCTL(radeon_cp_start_kms) 650 KMS_INVALID_IOCTL(radeon_cp_stop_kms) 651 KMS_INVALID_IOCTL(radeon_cp_reset_kms) 652 KMS_INVALID_IOCTL(radeon_cp_idle_kms) 653 KMS_INVALID_IOCTL(radeon_cp_resume_kms) 654 KMS_INVALID_IOCTL(radeon_engine_reset_kms) 655 KMS_INVALID_IOCTL(radeon_fullscreen_kms) 656 KMS_INVALID_IOCTL(radeon_cp_swap_kms) 657 KMS_INVALID_IOCTL(radeon_cp_clear_kms) 658 KMS_INVALID_IOCTL(radeon_cp_vertex_kms) 659 KMS_INVALID_IOCTL(radeon_cp_indices_kms) 660 KMS_INVALID_IOCTL(radeon_cp_texture_kms) 661 KMS_INVALID_IOCTL(radeon_cp_stipple_kms) 662 KMS_INVALID_IOCTL(radeon_cp_indirect_kms) 663 KMS_INVALID_IOCTL(radeon_cp_vertex2_kms) 664 KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms) 665 KMS_INVALID_IOCTL(radeon_cp_getparam_kms) 666 KMS_INVALID_IOCTL(radeon_cp_flip_kms) 667 KMS_INVALID_IOCTL(radeon_mem_alloc_kms) 668 KMS_INVALID_IOCTL(radeon_mem_free_kms) 669 KMS_INVALID_IOCTL(radeon_mem_init_heap_kms) 670 KMS_INVALID_IOCTL(radeon_irq_emit_kms) 671 KMS_INVALID_IOCTL(radeon_irq_wait_kms) 672 KMS_INVALID_IOCTL(radeon_cp_setparam_kms) 673 KMS_INVALID_IOCTL(radeon_surface_alloc_kms) 674 KMS_INVALID_IOCTL(radeon_surface_free_kms) 675 676 677 struct drm_ioctl_desc radeon_ioctls_kms[] = { 678 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 679 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 680 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 681 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 682 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), 683 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), 684 DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), 685 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), 686 DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), 687 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), 688 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), 689 DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), 690 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), 691 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), 692 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 693 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), 694 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), 695 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), 696 DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), 697 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), 698 DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), 699 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 700 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), 701 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), 702 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), 703 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), 704 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), 705 /* KMS */ 706 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 707 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), 708 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), 709 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), 710 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), 711 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), 712 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), 713 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), 714 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 715 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 716 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 717 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 718 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED), 719 }; 720 int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 721