1 /* 2 * Copyright 2008 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Jerome Glisse <glisse@freedesktop.org> 26 */ 27 #include <drm/drmP.h> 28 #include <drm/radeon_drm.h> 29 #include "radeon_reg.h" 30 #include "radeon.h" 31 32 void r100_cs_dump_packet(struct radeon_cs_parser *p, 33 struct radeon_cs_packet *pkt); 34 35 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 36 { 37 struct drm_device *ddev = p->rdev->ddev; 38 struct radeon_cs_chunk *chunk; 39 unsigned i, j; 40 bool duplicate; 41 42 if (p->chunk_relocs_idx == -1) { 43 return 0; 44 } 45 chunk = &p->chunks[p->chunk_relocs_idx]; 46 p->dma_reloc_idx = 0; 47 /* FIXME: we assume that each relocs use 4 dwords */ 48 p->nrelocs = chunk->length_dw / 4; 49 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); 50 if (p->relocs_ptr == NULL) { 51 return -ENOMEM; 52 } 53 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL); 54 if (p->relocs == NULL) { 55 return -ENOMEM; 56 } 57 for (i = 0; i < p->nrelocs; i++) { 58 struct drm_radeon_cs_reloc *r; 59 60 duplicate = false; 61 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; 62 for (j = 0; j < i; j++) { 63 if (r->handle == p->relocs[j].handle) { 64 p->relocs_ptr[i] = &p->relocs[j]; 65 duplicate = true; 66 break; 67 } 68 } 69 if (!duplicate) { 70 p->relocs[i].gobj = drm_gem_object_lookup(ddev, 71 p->filp, 72 r->handle); 73 if (p->relocs[i].gobj == NULL) { 74 DRM_ERROR("gem object lookup failed 0x%x\n", 75 r->handle); 76 return -ENOENT; 77 } 78 p->relocs_ptr[i] = &p->relocs[i]; 79 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); 80 p->relocs[i].lobj.bo = p->relocs[i].robj; 81 p->relocs[i].lobj.wdomain = r->write_domain; 82 p->relocs[i].lobj.rdomain = r->read_domains; 83 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; 84 p->relocs[i].handle = r->handle; 85 p->relocs[i].flags = r->flags; 86 radeon_bo_list_add_object(&p->relocs[i].lobj, 87 &p->validated); 88 89 } else 90 p->relocs[i].handle = 0; 91 } 92 return radeon_bo_list_validate(&p->validated); 93 } 94 95 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) 96 { 97 p->priority = priority; 98 99 switch (ring) { 100 default: 101 DRM_ERROR("unknown ring id: %d\n", ring); 102 return -EINVAL; 103 case RADEON_CS_RING_GFX: 104 p->ring = RADEON_RING_TYPE_GFX_INDEX; 105 break; 106 case RADEON_CS_RING_COMPUTE: 107 if (p->rdev->family >= CHIP_TAHITI) { 108 if (p->priority > 0) 109 p->ring = CAYMAN_RING_TYPE_CP1_INDEX; 110 else 111 p->ring = CAYMAN_RING_TYPE_CP2_INDEX; 112 } else 113 p->ring = RADEON_RING_TYPE_GFX_INDEX; 114 break; 115 case RADEON_CS_RING_DMA: 116 if (p->rdev->family >= CHIP_CAYMAN) { 117 if (p->priority > 0) 118 p->ring = R600_RING_TYPE_DMA_INDEX; 119 else 120 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX; 121 } else if (p->rdev->family >= CHIP_R600) { 122 p->ring = R600_RING_TYPE_DMA_INDEX; 123 } else { 124 return -EINVAL; 125 } 126 break; 127 } 128 return 0; 129 } 130 131 static void radeon_cs_sync_to(struct radeon_cs_parser *p, 132 struct radeon_fence *fence) 133 { 134 struct radeon_fence *other; 135 136 if (!fence) 137 return; 138 139 other = p->ib.sync_to[fence->ring]; 140 p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other); 141 } 142 143 static void radeon_cs_sync_rings(struct radeon_cs_parser *p) 144 { 145 int i; 146 147 for (i = 0; i < p->nrelocs; i++) { 148 if (!p->relocs[i].robj) 149 continue; 150 151 radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj); 152 } 153 } 154 155 /* XXX: note that this is called from the legacy UMS CS ioctl as well */ 156 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 157 { 158 struct drm_radeon_cs *cs = data; 159 uint64_t *chunk_array_ptr; 160 unsigned size, i; 161 u32 ring = RADEON_CS_RING_GFX; 162 s32 priority = 0; 163 164 if (!cs->num_chunks) { 165 return 0; 166 } 167 /* get chunks */ 168 INIT_LIST_HEAD(&p->validated); 169 p->idx = 0; 170 p->ib.sa_bo = NULL; 171 p->ib.semaphore = NULL; 172 p->const_ib.sa_bo = NULL; 173 p->const_ib.semaphore = NULL; 174 p->chunk_ib_idx = -1; 175 p->chunk_relocs_idx = -1; 176 p->chunk_flags_idx = -1; 177 p->chunk_const_ib_idx = -1; 178 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); 179 if (p->chunks_array == NULL) { 180 return -ENOMEM; 181 } 182 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); 183 if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr, 184 sizeof(uint64_t)*cs->num_chunks)) { 185 return -EFAULT; 186 } 187 p->cs_flags = 0; 188 p->nchunks = cs->num_chunks; 189 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); 190 if (p->chunks == NULL) { 191 return -ENOMEM; 192 } 193 for (i = 0; i < p->nchunks; i++) { 194 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL; 195 struct drm_radeon_cs_chunk user_chunk; 196 uint32_t __user *cdata; 197 198 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i]; 199 if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr, 200 sizeof(struct drm_radeon_cs_chunk))) { 201 return -EFAULT; 202 } 203 p->chunks[i].length_dw = user_chunk.length_dw; 204 p->chunks[i].kdata = NULL; 205 p->chunks[i].chunk_id = user_chunk.chunk_id; 206 207 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { 208 p->chunk_relocs_idx = i; 209 } 210 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { 211 p->chunk_ib_idx = i; 212 /* zero length IB isn't useful */ 213 if (p->chunks[i].length_dw == 0) 214 return -EINVAL; 215 } 216 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) { 217 p->chunk_const_ib_idx = i; 218 /* zero length CONST IB isn't useful */ 219 if (p->chunks[i].length_dw == 0) 220 return -EINVAL; 221 } 222 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 223 p->chunk_flags_idx = i; 224 /* zero length flags aren't useful */ 225 if (p->chunks[i].length_dw == 0) 226 return -EINVAL; 227 } 228 229 p->chunks[i].length_dw = user_chunk.length_dw; 230 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; 231 232 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; 233 if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) || 234 (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) { 235 size = p->chunks[i].length_dw * sizeof(uint32_t); 236 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); 237 if (p->chunks[i].kdata == NULL) { 238 return -ENOMEM; 239 } 240 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, 241 p->chunks[i].user_ptr, size)) { 242 return -EFAULT; 243 } 244 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 245 p->cs_flags = p->chunks[i].kdata[0]; 246 if (p->chunks[i].length_dw > 1) 247 ring = p->chunks[i].kdata[1]; 248 if (p->chunks[i].length_dw > 2) 249 priority = (s32)p->chunks[i].kdata[2]; 250 } 251 } 252 } 253 254 /* these are KMS only */ 255 if (p->rdev) { 256 if ((p->cs_flags & RADEON_CS_USE_VM) && 257 !p->rdev->vm_manager.enabled) { 258 DRM_ERROR("VM not active on asic!\n"); 259 return -EINVAL; 260 } 261 262 /* we only support VM on SI+ */ 263 if ((p->rdev->family >= CHIP_TAHITI) && 264 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { 265 DRM_ERROR("VM required on SI+!\n"); 266 return -EINVAL; 267 } 268 269 if (radeon_cs_get_ring(p, ring, priority)) 270 return -EINVAL; 271 } 272 273 /* deal with non-vm */ 274 if ((p->chunk_ib_idx != -1) && 275 ((p->cs_flags & RADEON_CS_USE_VM) == 0) && 276 (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) { 277 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { 278 DRM_ERROR("cs IB too big: %d\n", 279 p->chunks[p->chunk_ib_idx].length_dw); 280 return -EINVAL; 281 } 282 if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) { 283 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); 284 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); 285 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL || 286 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) { 287 kfree(p->chunks[p->chunk_ib_idx].kpage[0]); 288 kfree(p->chunks[p->chunk_ib_idx].kpage[1]); 289 p->chunks[p->chunk_ib_idx].kpage[0] = NULL; 290 p->chunks[p->chunk_ib_idx].kpage[1] = NULL; 291 return -ENOMEM; 292 } 293 } 294 p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1; 295 p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1; 296 p->chunks[p->chunk_ib_idx].last_copied_page = -1; 297 p->chunks[p->chunk_ib_idx].last_page_index = 298 ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE; 299 } 300 301 return 0; 302 } 303 304 /** 305 * cs_parser_fini() - clean parser states 306 * @parser: parser structure holding parsing context. 307 * @error: error number 308 * 309 * If error is set than unvalidate buffer, otherwise just free memory 310 * used by parsing context. 311 **/ 312 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) 313 { 314 unsigned i; 315 316 if (!error) { 317 ttm_eu_fence_buffer_objects(&parser->validated, 318 parser->ib.fence); 319 } else { 320 ttm_eu_backoff_reservation(&parser->validated); 321 } 322 323 if (parser->relocs != NULL) { 324 for (i = 0; i < parser->nrelocs; i++) { 325 if (parser->relocs[i].gobj) 326 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); 327 } 328 } 329 kfree(parser->track); 330 kfree(parser->relocs); 331 kfree(parser->relocs_ptr); 332 for (i = 0; i < parser->nchunks; i++) { 333 kfree(parser->chunks[i].kdata); 334 if ((parser->rdev->flags & RADEON_IS_AGP)) { 335 kfree(parser->chunks[i].kpage[0]); 336 kfree(parser->chunks[i].kpage[1]); 337 } 338 } 339 kfree(parser->chunks); 340 kfree(parser->chunks_array); 341 radeon_ib_free(parser->rdev, &parser->ib); 342 radeon_ib_free(parser->rdev, &parser->const_ib); 343 } 344 345 static int radeon_cs_ib_chunk(struct radeon_device *rdev, 346 struct radeon_cs_parser *parser) 347 { 348 struct radeon_cs_chunk *ib_chunk; 349 int r; 350 351 if (parser->chunk_ib_idx == -1) 352 return 0; 353 354 if (parser->cs_flags & RADEON_CS_USE_VM) 355 return 0; 356 357 ib_chunk = &parser->chunks[parser->chunk_ib_idx]; 358 /* Copy the packet into the IB, the parser will read from the 359 * input memory (cached) and write to the IB (which can be 360 * uncached). 361 */ 362 r = radeon_ib_get(rdev, parser->ring, &parser->ib, 363 NULL, ib_chunk->length_dw * 4); 364 if (r) { 365 DRM_ERROR("Failed to get ib !\n"); 366 return r; 367 } 368 parser->ib.length_dw = ib_chunk->length_dw; 369 r = radeon_cs_parse(rdev, parser->ring, parser); 370 if (r || parser->parser_error) { 371 DRM_ERROR("Invalid command stream !\n"); 372 return r; 373 } 374 r = radeon_cs_finish_pages(parser); 375 if (r) { 376 DRM_ERROR("Invalid command stream !\n"); 377 return r; 378 } 379 radeon_cs_sync_rings(parser); 380 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 381 if (r) { 382 DRM_ERROR("Failed to schedule IB !\n"); 383 } 384 return r; 385 } 386 387 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser, 388 struct radeon_vm *vm) 389 { 390 struct radeon_device *rdev = parser->rdev; 391 struct radeon_bo_list *lobj; 392 struct radeon_bo *bo; 393 int r; 394 395 r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); 396 if (r) { 397 return r; 398 } 399 list_for_each_entry(lobj, &parser->validated, tv.head) { 400 bo = lobj->bo; 401 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem); 402 if (r) { 403 return r; 404 } 405 } 406 return 0; 407 } 408 409 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, 410 struct radeon_cs_parser *parser) 411 { 412 struct radeon_cs_chunk *ib_chunk; 413 struct radeon_fpriv *fpriv = parser->filp->driver_priv; 414 struct radeon_vm *vm = &fpriv->vm; 415 int r; 416 417 if (parser->chunk_ib_idx == -1) 418 return 0; 419 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) 420 return 0; 421 422 if ((rdev->family >= CHIP_TAHITI) && 423 (parser->chunk_const_ib_idx != -1)) { 424 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx]; 425 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { 426 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw); 427 return -EINVAL; 428 } 429 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib, 430 vm, ib_chunk->length_dw * 4); 431 if (r) { 432 DRM_ERROR("Failed to get const ib !\n"); 433 return r; 434 } 435 parser->const_ib.is_const_ib = true; 436 parser->const_ib.length_dw = ib_chunk->length_dw; 437 /* Copy the packet into the IB */ 438 if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr, 439 ib_chunk->length_dw * 4)) { 440 return -EFAULT; 441 } 442 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib); 443 if (r) { 444 return r; 445 } 446 } 447 448 ib_chunk = &parser->chunks[parser->chunk_ib_idx]; 449 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { 450 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw); 451 return -EINVAL; 452 } 453 r = radeon_ib_get(rdev, parser->ring, &parser->ib, 454 vm, ib_chunk->length_dw * 4); 455 if (r) { 456 DRM_ERROR("Failed to get ib !\n"); 457 return r; 458 } 459 parser->ib.length_dw = ib_chunk->length_dw; 460 /* Copy the packet into the IB */ 461 if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, 462 ib_chunk->length_dw * 4)) { 463 return -EFAULT; 464 } 465 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib); 466 if (r) { 467 return r; 468 } 469 470 mutex_lock(&rdev->vm_manager.lock); 471 mutex_lock(&vm->mutex); 472 r = radeon_vm_alloc_pt(rdev, vm); 473 if (r) { 474 goto out; 475 } 476 r = radeon_bo_vm_update_pte(parser, vm); 477 if (r) { 478 goto out; 479 } 480 radeon_cs_sync_rings(parser); 481 radeon_cs_sync_to(parser, vm->fence); 482 radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring)); 483 484 if ((rdev->family >= CHIP_TAHITI) && 485 (parser->chunk_const_ib_idx != -1)) { 486 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); 487 } else { 488 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 489 } 490 491 if (!r) { 492 radeon_vm_fence(rdev, vm, parser->ib.fence); 493 } 494 495 out: 496 radeon_vm_add_to_lru(rdev, vm); 497 mutex_unlock(&vm->mutex); 498 mutex_unlock(&rdev->vm_manager.lock); 499 return r; 500 } 501 502 static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r) 503 { 504 if (r == -EDEADLK) { 505 r = radeon_gpu_reset(rdev); 506 if (!r) 507 r = -EAGAIN; 508 } 509 return r; 510 } 511 512 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 513 { 514 struct radeon_device *rdev = dev->dev_private; 515 struct radeon_cs_parser parser; 516 int r; 517 518 down_read(&rdev->exclusive_lock); 519 if (!rdev->accel_working) { 520 up_read(&rdev->exclusive_lock); 521 return -EBUSY; 522 } 523 /* initialize parser */ 524 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 525 parser.filp = filp; 526 parser.rdev = rdev; 527 parser.dev = rdev->dev; 528 parser.family = rdev->family; 529 r = radeon_cs_parser_init(&parser, data); 530 if (r) { 531 DRM_ERROR("Failed to initialize parser !\n"); 532 radeon_cs_parser_fini(&parser, r); 533 up_read(&rdev->exclusive_lock); 534 r = radeon_cs_handle_lockup(rdev, r); 535 return r; 536 } 537 r = radeon_cs_parser_relocs(&parser); 538 if (r) { 539 if (r != -ERESTARTSYS) 540 DRM_ERROR("Failed to parse relocation %d!\n", r); 541 radeon_cs_parser_fini(&parser, r); 542 up_read(&rdev->exclusive_lock); 543 r = radeon_cs_handle_lockup(rdev, r); 544 return r; 545 } 546 r = radeon_cs_ib_chunk(rdev, &parser); 547 if (r) { 548 goto out; 549 } 550 r = radeon_cs_ib_vm_chunk(rdev, &parser); 551 if (r) { 552 goto out; 553 } 554 out: 555 radeon_cs_parser_fini(&parser, r); 556 up_read(&rdev->exclusive_lock); 557 r = radeon_cs_handle_lockup(rdev, r); 558 return r; 559 } 560 561 int radeon_cs_finish_pages(struct radeon_cs_parser *p) 562 { 563 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 564 int i; 565 int size = PAGE_SIZE; 566 567 for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) { 568 if (i == ibc->last_page_index) { 569 size = (ibc->length_dw * 4) % PAGE_SIZE; 570 if (size == 0) 571 size = PAGE_SIZE; 572 } 573 574 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)), 575 ibc->user_ptr + (i * PAGE_SIZE), 576 size)) 577 return -EFAULT; 578 } 579 return 0; 580 } 581 582 static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx) 583 { 584 int new_page; 585 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 586 int i; 587 int size = PAGE_SIZE; 588 bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ? 589 false : true; 590 591 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { 592 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)), 593 ibc->user_ptr + (i * PAGE_SIZE), 594 PAGE_SIZE)) { 595 p->parser_error = -EFAULT; 596 return 0; 597 } 598 } 599 600 if (pg_idx == ibc->last_page_index) { 601 size = (ibc->length_dw * 4) % PAGE_SIZE; 602 if (size == 0) 603 size = PAGE_SIZE; 604 } 605 606 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1; 607 if (copy1) 608 ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4)); 609 610 if (DRM_COPY_FROM_USER(ibc->kpage[new_page], 611 ibc->user_ptr + (pg_idx * PAGE_SIZE), 612 size)) { 613 p->parser_error = -EFAULT; 614 return 0; 615 } 616 617 /* copy to IB for non single case */ 618 if (!copy1) 619 memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size); 620 621 ibc->last_copied_page = pg_idx; 622 ibc->kpage_idx[new_page] = pg_idx; 623 624 return new_page; 625 } 626 627 u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) 628 { 629 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 630 u32 pg_idx, pg_offset; 631 u32 idx_value = 0; 632 int new_page; 633 634 pg_idx = (idx * 4) / PAGE_SIZE; 635 pg_offset = (idx * 4) % PAGE_SIZE; 636 637 if (ibc->kpage_idx[0] == pg_idx) 638 return ibc->kpage[0][pg_offset/4]; 639 if (ibc->kpage_idx[1] == pg_idx) 640 return ibc->kpage[1][pg_offset/4]; 641 642 new_page = radeon_cs_update_pages(p, pg_idx); 643 if (new_page < 0) { 644 p->parser_error = new_page; 645 return 0; 646 } 647 648 idx_value = ibc->kpage[new_page][pg_offset/4]; 649 return idx_value; 650 } 651