1 /* 2 * channel program interfaces 3 * 4 * Copyright IBM Corp. 2017 5 * 6 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> 7 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> 8 */ 9 10 #include <linux/mm.h> 11 #include <linux/slab.h> 12 #include <linux/iommu.h> 13 #include <linux/vfio.h> 14 #include <asm/idals.h> 15 16 #include "vfio_ccw_cp.h" 17 18 /* 19 * Max length for ccw chain. 20 * XXX: Limit to 256, need to check more? 21 */ 22 #define CCWCHAIN_LEN_MAX 256 23 24 struct pfn_array { 25 unsigned long pa_iova; 26 unsigned long *pa_iova_pfn; 27 unsigned long *pa_pfn; 28 int pa_nr; 29 }; 30 31 struct pfn_array_table { 32 struct pfn_array *pat_pa; 33 int pat_nr; 34 }; 35 36 struct ccwchain { 37 struct list_head next; 38 struct ccw1 *ch_ccw; 39 /* Guest physical address of the current chain. */ 40 u64 ch_iova; 41 /* Count of the valid ccws in chain. */ 42 int ch_len; 43 /* Pinned PAGEs for the original data. */ 44 struct pfn_array_table *ch_pat; 45 }; 46 47 /* 48 * pfn_array_pin() - pin user pages in memory 49 * @pa: pfn_array on which to perform the operation 50 * @mdev: the mediated device to perform pin/unpin operations 51 * 52 * Attempt to pin user pages in memory. 53 * 54 * Usage of pfn_array: 55 * @pa->pa_iova starting guest physical I/O address. Assigned by caller. 56 * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated 57 * by caller. 58 * @pa->pa_pfn array that receives PFNs of the pages pinned. Allocated by 59 * caller. 60 * @pa->pa_nr number of pages from @pa->pa_iova to pin. Assigned by 61 * caller. 62 * number of pages pinned. Assigned by callee. 63 * 64 * Returns: 65 * Number of pages pinned on success. 66 * If @pa->pa_nr is 0 or negative, returns 0. 67 * If no pages were pinned, returns -errno. 68 */ 69 static int pfn_array_pin(struct pfn_array *pa, struct device *mdev) 70 { 71 int i, ret; 72 73 if (pa->pa_nr <= 0) { 74 pa->pa_nr = 0; 75 return 0; 76 } 77 78 pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; 79 for (i = 1; i < pa->pa_nr; i++) 80 pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1; 81 82 ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr, 83 IOMMU_READ | IOMMU_WRITE, pa->pa_pfn); 84 85 if (ret > 0 && ret != pa->pa_nr) { 86 vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret); 87 pa->pa_nr = 0; 88 return 0; 89 } 90 91 return ret; 92 } 93 94 /* Unpin the pages before releasing the memory. */ 95 static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev) 96 { 97 vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr); 98 pa->pa_nr = 0; 99 kfree(pa->pa_iova_pfn); 100 } 101 102 /* Alloc memory for PFNs, then pin pages with them. */ 103 static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, 104 u64 iova, unsigned int len) 105 { 106 int ret = 0; 107 108 if (!len || pa->pa_nr) 109 return -EINVAL; 110 111 pa->pa_iova = iova; 112 113 pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 114 if (!pa->pa_nr) 115 return -EINVAL; 116 117 pa->pa_iova_pfn = kcalloc(pa->pa_nr, 118 sizeof(*pa->pa_iova_pfn) + 119 sizeof(*pa->pa_pfn), 120 GFP_KERNEL); 121 if (unlikely(!pa->pa_iova_pfn)) 122 return -ENOMEM; 123 pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; 124 125 ret = pfn_array_pin(pa, mdev); 126 127 if (ret > 0) 128 return ret; 129 else if (!ret) 130 ret = -EINVAL; 131 132 kfree(pa->pa_iova_pfn); 133 134 return ret; 135 } 136 137 static int pfn_array_table_init(struct pfn_array_table *pat, int nr) 138 { 139 pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL); 140 if (unlikely(ZERO_OR_NULL_PTR(pat->pat_pa))) { 141 pat->pat_nr = 0; 142 return -ENOMEM; 143 } 144 145 pat->pat_nr = nr; 146 147 return 0; 148 } 149 150 static void pfn_array_table_unpin_free(struct pfn_array_table *pat, 151 struct device *mdev) 152 { 153 int i; 154 155 for (i = 0; i < pat->pat_nr; i++) 156 pfn_array_unpin_free(pat->pat_pa + i, mdev); 157 158 if (pat->pat_nr) { 159 kfree(pat->pat_pa); 160 pat->pat_pa = NULL; 161 pat->pat_nr = 0; 162 } 163 } 164 165 static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat, 166 unsigned long iova) 167 { 168 struct pfn_array *pa = pat->pat_pa; 169 unsigned long iova_pfn = iova >> PAGE_SHIFT; 170 int i, j; 171 172 for (i = 0; i < pat->pat_nr; i++, pa++) 173 for (j = 0; j < pa->pa_nr; j++) 174 if (pa->pa_iova_pfn[i] == iova_pfn) 175 return true; 176 177 return false; 178 } 179 /* Create the list idal words for a pfn_array_table. */ 180 static inline void pfn_array_table_idal_create_words( 181 struct pfn_array_table *pat, 182 unsigned long *idaws) 183 { 184 struct pfn_array *pa; 185 int i, j, k; 186 187 /* 188 * Idal words (execept the first one) rely on the memory being 4k 189 * aligned. If a user virtual address is 4K aligned, then it's 190 * corresponding kernel physical address will also be 4K aligned. Thus 191 * there will be no problem here to simply use the phys to create an 192 * idaw. 193 */ 194 k = 0; 195 for (i = 0; i < pat->pat_nr; i++) { 196 pa = pat->pat_pa + i; 197 for (j = 0; j < pa->pa_nr; j++) { 198 idaws[k] = pa->pa_pfn[j] << PAGE_SHIFT; 199 if (k == 0) 200 idaws[k] += pa->pa_iova & (PAGE_SIZE - 1); 201 k++; 202 } 203 } 204 } 205 206 207 /* 208 * Within the domain (@mdev), copy @n bytes from a guest physical 209 * address (@iova) to a host physical address (@to). 210 */ 211 static long copy_from_iova(struct device *mdev, 212 void *to, u64 iova, 213 unsigned long n) 214 { 215 struct pfn_array pa = {0}; 216 u64 from; 217 int i, ret; 218 unsigned long l, m; 219 220 ret = pfn_array_alloc_pin(&pa, mdev, iova, n); 221 if (ret <= 0) 222 return ret; 223 224 l = n; 225 for (i = 0; i < pa.pa_nr; i++) { 226 from = pa.pa_pfn[i] << PAGE_SHIFT; 227 m = PAGE_SIZE; 228 if (i == 0) { 229 from += iova & (PAGE_SIZE - 1); 230 m -= iova & (PAGE_SIZE - 1); 231 } 232 233 m = min(l, m); 234 memcpy(to + (n - l), (void *)from, m); 235 236 l -= m; 237 if (l == 0) 238 break; 239 } 240 241 pfn_array_unpin_free(&pa, mdev); 242 243 return l; 244 } 245 246 static long copy_ccw_from_iova(struct channel_program *cp, 247 struct ccw1 *to, u64 iova, 248 unsigned long len) 249 { 250 struct ccw0 ccw0; 251 struct ccw1 *pccw1; 252 int ret; 253 int i; 254 255 ret = copy_from_iova(cp->mdev, to, iova, len * sizeof(struct ccw1)); 256 if (ret) 257 return ret; 258 259 if (!cp->orb.cmd.fmt) { 260 pccw1 = to; 261 for (i = 0; i < len; i++) { 262 ccw0 = *(struct ccw0 *)pccw1; 263 if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) { 264 pccw1->cmd_code = CCW_CMD_TIC; 265 pccw1->flags = 0; 266 pccw1->count = 0; 267 } else { 268 pccw1->cmd_code = ccw0.cmd_code; 269 pccw1->flags = ccw0.flags; 270 pccw1->count = ccw0.count; 271 } 272 pccw1->cda = ccw0.cda; 273 pccw1++; 274 } 275 } 276 277 return ret; 278 } 279 280 /* 281 * Helpers to operate ccwchain. 282 */ 283 #define ccw_is_test(_ccw) (((_ccw)->cmd_code & 0x0F) == 0) 284 285 #define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP) 286 287 #define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC) 288 289 #define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA) 290 291 292 #define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC)) 293 294 static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len) 295 { 296 struct ccwchain *chain; 297 void *data; 298 size_t size; 299 300 /* Make ccw address aligned to 8. */ 301 size = ((sizeof(*chain) + 7L) & -8L) + 302 sizeof(*chain->ch_ccw) * len + 303 sizeof(*chain->ch_pat) * len; 304 chain = kzalloc(size, GFP_DMA | GFP_KERNEL); 305 if (!chain) 306 return NULL; 307 308 data = (u8 *)chain + ((sizeof(*chain) + 7L) & -8L); 309 chain->ch_ccw = (struct ccw1 *)data; 310 311 data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len; 312 chain->ch_pat = (struct pfn_array_table *)data; 313 314 chain->ch_len = len; 315 316 list_add_tail(&chain->next, &cp->ccwchain_list); 317 318 return chain; 319 } 320 321 static void ccwchain_free(struct ccwchain *chain) 322 { 323 list_del(&chain->next); 324 kfree(chain); 325 } 326 327 /* Free resource for a ccw that allocated memory for its cda. */ 328 static void ccwchain_cda_free(struct ccwchain *chain, int idx) 329 { 330 struct ccw1 *ccw = chain->ch_ccw + idx; 331 332 if (!ccw->count) 333 return; 334 335 kfree((void *)(u64)ccw->cda); 336 } 337 338 /* Unpin the pages then free the memory resources. */ 339 static void cp_unpin_free(struct channel_program *cp) 340 { 341 struct ccwchain *chain, *temp; 342 int i; 343 344 list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) { 345 for (i = 0; i < chain->ch_len; i++) { 346 pfn_array_table_unpin_free(chain->ch_pat + i, 347 cp->mdev); 348 ccwchain_cda_free(chain, i); 349 } 350 ccwchain_free(chain); 351 } 352 } 353 354 /** 355 * ccwchain_calc_length - calculate the length of the ccw chain. 356 * @iova: guest physical address of the target ccw chain 357 * @cp: channel_program on which to perform the operation 358 * 359 * This is the chain length not considering any TICs. 360 * You need to do a new round for each TIC target. 361 * 362 * Returns: the length of the ccw chain or -errno. 363 */ 364 static int ccwchain_calc_length(u64 iova, struct channel_program *cp) 365 { 366 struct ccw1 *ccw, *p; 367 int cnt; 368 369 /* 370 * Copy current chain from guest to host kernel. 371 * Currently the chain length is limited to CCWCHAIN_LEN_MAX (256). 372 * So copying 2K is enough (safe). 373 */ 374 p = ccw = kcalloc(CCWCHAIN_LEN_MAX, sizeof(*ccw), GFP_KERNEL); 375 if (!ccw) 376 return -ENOMEM; 377 378 cnt = copy_ccw_from_iova(cp, ccw, iova, CCWCHAIN_LEN_MAX); 379 if (cnt) { 380 kfree(ccw); 381 return cnt; 382 } 383 384 cnt = 0; 385 do { 386 cnt++; 387 388 if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) 389 break; 390 391 ccw++; 392 } while (cnt < CCWCHAIN_LEN_MAX + 1); 393 394 if (cnt == CCWCHAIN_LEN_MAX + 1) 395 cnt = -EINVAL; 396 397 kfree(p); 398 return cnt; 399 } 400 401 static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp) 402 { 403 struct ccwchain *chain; 404 u32 ccw_head, ccw_tail; 405 406 list_for_each_entry(chain, &cp->ccwchain_list, next) { 407 ccw_head = chain->ch_iova; 408 ccw_tail = ccw_head + (chain->ch_len - 1) * sizeof(struct ccw1); 409 410 if ((ccw_head <= tic->cda) && (tic->cda <= ccw_tail)) 411 return 1; 412 } 413 414 return 0; 415 } 416 417 static int ccwchain_loop_tic(struct ccwchain *chain, 418 struct channel_program *cp); 419 420 static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp) 421 { 422 struct ccwchain *chain; 423 int len, ret; 424 425 /* May transfer to an existing chain. */ 426 if (tic_target_chain_exists(tic, cp)) 427 return 0; 428 429 /* Get chain length. */ 430 len = ccwchain_calc_length(tic->cda, cp); 431 if (len < 0) 432 return len; 433 434 /* Need alloc a new chain for this one. */ 435 chain = ccwchain_alloc(cp, len); 436 if (!chain) 437 return -ENOMEM; 438 chain->ch_iova = tic->cda; 439 440 /* Copy the new chain from user. */ 441 ret = copy_ccw_from_iova(cp, chain->ch_ccw, tic->cda, len); 442 if (ret) { 443 ccwchain_free(chain); 444 return ret; 445 } 446 447 /* Loop for tics on this new chain. */ 448 return ccwchain_loop_tic(chain, cp); 449 } 450 451 /* Loop for TICs. */ 452 static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp) 453 { 454 struct ccw1 *tic; 455 int i, ret; 456 457 for (i = 0; i < chain->ch_len; i++) { 458 tic = chain->ch_ccw + i; 459 460 if (!ccw_is_tic(tic)) 461 continue; 462 463 ret = ccwchain_handle_tic(tic, cp); 464 if (ret) 465 return ret; 466 } 467 468 return 0; 469 } 470 471 static int ccwchain_fetch_tic(struct ccwchain *chain, 472 int idx, 473 struct channel_program *cp) 474 { 475 struct ccw1 *ccw = chain->ch_ccw + idx; 476 struct ccwchain *iter; 477 u32 ccw_head, ccw_tail; 478 479 list_for_each_entry(iter, &cp->ccwchain_list, next) { 480 ccw_head = iter->ch_iova; 481 ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1); 482 483 if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) { 484 ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) + 485 (ccw->cda - ccw_head)); 486 return 0; 487 } 488 } 489 490 return -EFAULT; 491 } 492 493 static int ccwchain_fetch_direct(struct ccwchain *chain, 494 int idx, 495 struct channel_program *cp) 496 { 497 struct ccw1 *ccw; 498 struct pfn_array_table *pat; 499 unsigned long *idaws; 500 int idaw_nr; 501 502 ccw = chain->ch_ccw + idx; 503 504 /* 505 * Pin data page(s) in memory. 506 * The number of pages actually is the count of the idaws which will be 507 * needed when translating a direct ccw to a idal ccw. 508 */ 509 pat = chain->ch_pat + idx; 510 if (pfn_array_table_init(pat, 1)) 511 return -ENOMEM; 512 idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, 513 ccw->cda, ccw->count); 514 if (idaw_nr < 0) 515 return idaw_nr; 516 517 /* Translate this direct ccw to a idal ccw. */ 518 idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL); 519 if (!idaws) { 520 pfn_array_table_unpin_free(pat, cp->mdev); 521 return -ENOMEM; 522 } 523 ccw->cda = (__u32) virt_to_phys(idaws); 524 ccw->flags |= CCW_FLAG_IDA; 525 526 pfn_array_table_idal_create_words(pat, idaws); 527 528 return 0; 529 } 530 531 static int ccwchain_fetch_idal(struct ccwchain *chain, 532 int idx, 533 struct channel_program *cp) 534 { 535 struct ccw1 *ccw; 536 struct pfn_array_table *pat; 537 unsigned long *idaws; 538 u64 idaw_iova; 539 unsigned int idaw_nr, idaw_len; 540 int i, ret; 541 542 ccw = chain->ch_ccw + idx; 543 544 /* Calculate size of idaws. */ 545 ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova)); 546 if (ret) 547 return ret; 548 idaw_nr = idal_nr_words((void *)(idaw_iova), ccw->count); 549 idaw_len = idaw_nr * sizeof(*idaws); 550 551 /* Pin data page(s) in memory. */ 552 pat = chain->ch_pat + idx; 553 ret = pfn_array_table_init(pat, idaw_nr); 554 if (ret) 555 return ret; 556 557 /* Translate idal ccw to use new allocated idaws. */ 558 idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL); 559 if (!idaws) { 560 ret = -ENOMEM; 561 goto out_unpin; 562 } 563 564 ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idaw_len); 565 if (ret) 566 goto out_free_idaws; 567 568 ccw->cda = virt_to_phys(idaws); 569 570 for (i = 0; i < idaw_nr; i++) { 571 idaw_iova = *(idaws + i); 572 if (IS_ERR_VALUE(idaw_iova)) { 573 ret = -EFAULT; 574 goto out_free_idaws; 575 } 576 577 ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev, 578 idaw_iova, 1); 579 if (ret < 0) 580 goto out_free_idaws; 581 } 582 583 pfn_array_table_idal_create_words(pat, idaws); 584 585 return 0; 586 587 out_free_idaws: 588 kfree(idaws); 589 out_unpin: 590 pfn_array_table_unpin_free(pat, cp->mdev); 591 return ret; 592 } 593 594 /* 595 * Fetch one ccw. 596 * To reduce memory copy, we'll pin the cda page in memory, 597 * and to get rid of the cda 2G limitiaion of ccw1, we'll translate 598 * direct ccws to idal ccws. 599 */ 600 static int ccwchain_fetch_one(struct ccwchain *chain, 601 int idx, 602 struct channel_program *cp) 603 { 604 struct ccw1 *ccw = chain->ch_ccw + idx; 605 606 if (ccw_is_test(ccw) || ccw_is_noop(ccw)) 607 return 0; 608 609 if (ccw_is_tic(ccw)) 610 return ccwchain_fetch_tic(chain, idx, cp); 611 612 if (ccw_is_idal(ccw)) 613 return ccwchain_fetch_idal(chain, idx, cp); 614 615 return ccwchain_fetch_direct(chain, idx, cp); 616 } 617 618 /** 619 * cp_init() - allocate ccwchains for a channel program. 620 * @cp: channel_program on which to perform the operation 621 * @mdev: the mediated device to perform pin/unpin operations 622 * @orb: control block for the channel program from the guest 623 * 624 * This creates one or more ccwchain(s), and copies the raw data of 625 * the target channel program from @orb->cmd.iova to the new ccwchain(s). 626 * 627 * Limitations: 628 * 1. Supports only prefetch enabled mode. 629 * 2. Supports idal(c64) ccw chaining. 630 * 3. Supports 4k idaw. 631 * 632 * Returns: 633 * %0 on success and a negative error value on failure. 634 */ 635 int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) 636 { 637 u64 iova = orb->cmd.cpa; 638 struct ccwchain *chain; 639 int len, ret; 640 641 /* 642 * XXX: 643 * Only support prefetch enable mode now. 644 * Only support 64bit addressing idal. 645 * Only support 4k IDAW. 646 */ 647 if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k) 648 return -EOPNOTSUPP; 649 650 INIT_LIST_HEAD(&cp->ccwchain_list); 651 memcpy(&cp->orb, orb, sizeof(*orb)); 652 cp->mdev = mdev; 653 654 /* Get chain length. */ 655 len = ccwchain_calc_length(iova, cp); 656 if (len < 0) 657 return len; 658 659 /* Alloc mem for the head chain. */ 660 chain = ccwchain_alloc(cp, len); 661 if (!chain) 662 return -ENOMEM; 663 chain->ch_iova = iova; 664 665 /* Copy the head chain from guest. */ 666 ret = copy_ccw_from_iova(cp, chain->ch_ccw, iova, len); 667 if (ret) { 668 ccwchain_free(chain); 669 return ret; 670 } 671 672 /* Now loop for its TICs. */ 673 ret = ccwchain_loop_tic(chain, cp); 674 if (ret) 675 cp_unpin_free(cp); 676 677 return ret; 678 } 679 680 681 /** 682 * cp_free() - free resources for channel program. 683 * @cp: channel_program on which to perform the operation 684 * 685 * This unpins the memory pages and frees the memory space occupied by 686 * @cp, which must have been returned by a previous call to cp_init(). 687 * Otherwise, undefined behavior occurs. 688 */ 689 void cp_free(struct channel_program *cp) 690 { 691 cp_unpin_free(cp); 692 } 693 694 /** 695 * cp_prefetch() - translate a guest physical address channel program to 696 * a real-device runnable channel program. 697 * @cp: channel_program on which to perform the operation 698 * 699 * This function translates the guest-physical-address channel program 700 * and stores the result to ccwchain list. @cp must have been 701 * initialized by a previous call with cp_init(). Otherwise, undefined 702 * behavior occurs. 703 * 704 * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced 705 * as helpers to do ccw chain translation inside the kernel. Basically 706 * they accept a channel program issued by a virtual machine, and 707 * translate the channel program to a real-device runnable channel 708 * program. 709 * 710 * These APIs will copy the ccws into kernel-space buffers, and update 711 * the guest phsical addresses with their corresponding host physical 712 * addresses. Then channel I/O device drivers could issue the 713 * translated channel program to real devices to perform an I/O 714 * operation. 715 * 716 * These interfaces are designed to support translation only for 717 * channel programs, which are generated and formatted by a 718 * guest. Thus this will make it possible for things like VFIO to 719 * leverage the interfaces to passthrough a channel I/O mediated 720 * device in QEMU. 721 * 722 * We support direct ccw chaining by translating them to idal ccws. 723 * 724 * Returns: 725 * %0 on success and a negative error value on failure. 726 */ 727 int cp_prefetch(struct channel_program *cp) 728 { 729 struct ccwchain *chain; 730 int len, idx, ret; 731 732 list_for_each_entry(chain, &cp->ccwchain_list, next) { 733 len = chain->ch_len; 734 for (idx = 0; idx < len; idx++) { 735 ret = ccwchain_fetch_one(chain, idx, cp); 736 if (ret) 737 return ret; 738 } 739 } 740 741 return 0; 742 } 743 744 /** 745 * cp_get_orb() - get the orb of the channel program 746 * @cp: channel_program on which to perform the operation 747 * @intparm: new intparm for the returned orb 748 * @lpm: candidate value of the logical-path mask for the returned orb 749 * 750 * This function returns the address of the updated orb of the channel 751 * program. Channel I/O device drivers could use this orb to issue a 752 * ssch. 753 */ 754 union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm) 755 { 756 union orb *orb; 757 struct ccwchain *chain; 758 struct ccw1 *cpa; 759 760 orb = &cp->orb; 761 762 orb->cmd.intparm = intparm; 763 orb->cmd.fmt = 1; 764 orb->cmd.key = PAGE_DEFAULT_KEY >> 4; 765 766 if (orb->cmd.lpm == 0) 767 orb->cmd.lpm = lpm; 768 769 chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next); 770 cpa = chain->ch_ccw; 771 orb->cmd.cpa = (__u32) __pa(cpa); 772 773 return orb; 774 } 775 776 /** 777 * cp_update_scsw() - update scsw for a channel program. 778 * @cp: channel_program on which to perform the operation 779 * @scsw: I/O results of the channel program and also the target to be 780 * updated 781 * 782 * @scsw contains the I/O results of the channel program that pointed 783 * to by @cp. However what @scsw->cpa stores is a host physical 784 * address, which is meaningless for the guest, which is waiting for 785 * the I/O results. 786 * 787 * This function updates @scsw->cpa to its coressponding guest physical 788 * address. 789 */ 790 void cp_update_scsw(struct channel_program *cp, union scsw *scsw) 791 { 792 struct ccwchain *chain; 793 u32 cpa = scsw->cmd.cpa; 794 u32 ccw_head, ccw_tail; 795 796 /* 797 * LATER: 798 * For now, only update the cmd.cpa part. We may need to deal with 799 * other portions of the schib as well, even if we don't return them 800 * in the ioctl directly. Path status changes etc. 801 */ 802 list_for_each_entry(chain, &cp->ccwchain_list, next) { 803 ccw_head = (u32)(u64)chain->ch_ccw; 804 ccw_tail = (u32)(u64)(chain->ch_ccw + chain->ch_len - 1); 805 806 if ((ccw_head <= cpa) && (cpa <= ccw_tail)) { 807 /* 808 * (cpa - ccw_head) is the offset value of the host 809 * physical ccw to its chain head. 810 * Adding this value to the guest physical ccw chain 811 * head gets us the guest cpa. 812 */ 813 cpa = chain->ch_iova + (cpa - ccw_head); 814 break; 815 } 816 } 817 818 scsw->cmd.cpa = cpa; 819 } 820 821 /** 822 * cp_iova_pinned() - check if an iova is pinned for a ccw chain. 823 * @cmd: ccwchain command on which to perform the operation 824 * @iova: the iova to check 825 * 826 * If the @iova is currently pinned for the ccw chain, return true; 827 * else return false. 828 */ 829 bool cp_iova_pinned(struct channel_program *cp, u64 iova) 830 { 831 struct ccwchain *chain; 832 int i; 833 834 list_for_each_entry(chain, &cp->ccwchain_list, next) { 835 for (i = 0; i < chain->ch_len; i++) 836 if (pfn_array_table_iova_pinned(chain->ch_pat + i, 837 iova)) 838 return true; 839 } 840 841 return false; 842 } 843