1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * channel program interfaces 4 * 5 * Copyright IBM Corp. 2017 6 * 7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> 8 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/slab.h> 13 #include <linux/iommu.h> 14 #include <linux/vfio.h> 15 #include <asm/idals.h> 16 17 #include "vfio_ccw_cp.h" 18 19 /* 20 * Max length for ccw chain. 21 * XXX: Limit to 256, need to check more? 22 */ 23 #define CCWCHAIN_LEN_MAX 256 24 25 struct pfn_array { 26 unsigned long pa_iova; 27 unsigned long *pa_iova_pfn; 28 unsigned long *pa_pfn; 29 int pa_nr; 30 }; 31 32 struct pfn_array_table { 33 struct pfn_array *pat_pa; 34 int pat_nr; 35 }; 36 37 struct ccwchain { 38 struct list_head next; 39 struct ccw1 *ch_ccw; 40 /* Guest physical address of the current chain. */ 41 u64 ch_iova; 42 /* Count of the valid ccws in chain. */ 43 int ch_len; 44 /* Pinned PAGEs for the original data. */ 45 struct pfn_array_table *ch_pat; 46 }; 47 48 /* 49 * pfn_array_pin() - pin user pages in memory 50 * @pa: pfn_array on which to perform the operation 51 * @mdev: the mediated device to perform pin/unpin operations 52 * 53 * Attempt to pin user pages in memory. 54 * 55 * Usage of pfn_array: 56 * @pa->pa_iova starting guest physical I/O address. Assigned by caller. 57 * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated 58 * by caller. 59 * @pa->pa_pfn array that receives PFNs of the pages pinned. Allocated by 60 * caller. 61 * @pa->pa_nr number of pages from @pa->pa_iova to pin. Assigned by 62 * caller. 63 * number of pages pinned. Assigned by callee. 64 * 65 * Returns: 66 * Number of pages pinned on success. 67 * If @pa->pa_nr is 0 or negative, returns 0. 68 * If no pages were pinned, returns -errno. 69 */ 70 static int pfn_array_pin(struct pfn_array *pa, struct device *mdev) 71 { 72 int i, ret; 73 74 if (pa->pa_nr <= 0) { 75 pa->pa_nr = 0; 76 return 0; 77 } 78 79 pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; 80 for (i = 1; i < pa->pa_nr; i++) 81 pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1; 82 83 ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr, 84 IOMMU_READ | IOMMU_WRITE, pa->pa_pfn); 85 86 if (ret > 0 && ret != pa->pa_nr) { 87 vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret); 88 pa->pa_nr = 0; 89 return 0; 90 } 91 92 return ret; 93 } 94 95 /* Unpin the pages before releasing the memory. */ 96 static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev) 97 { 98 vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr); 99 pa->pa_nr = 0; 100 kfree(pa->pa_iova_pfn); 101 } 102 103 /* Alloc memory for PFNs, then pin pages with them. */ 104 static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, 105 u64 iova, unsigned int len) 106 { 107 int ret = 0; 108 109 if (!len) 110 return 0; 111 112 if (pa->pa_nr) 113 return -EINVAL; 114 115 pa->pa_iova = iova; 116 117 pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 118 if (!pa->pa_nr) 119 return -EINVAL; 120 121 pa->pa_iova_pfn = kcalloc(pa->pa_nr, 122 sizeof(*pa->pa_iova_pfn) + 123 sizeof(*pa->pa_pfn), 124 GFP_KERNEL); 125 if (unlikely(!pa->pa_iova_pfn)) 126 return -ENOMEM; 127 pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; 128 129 ret = pfn_array_pin(pa, mdev); 130 131 if (ret > 0) 132 return ret; 133 else if (!ret) 134 ret = -EINVAL; 135 136 kfree(pa->pa_iova_pfn); 137 138 return ret; 139 } 140 141 static int pfn_array_table_init(struct pfn_array_table *pat, int nr) 142 { 143 pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL); 144 if (unlikely(ZERO_OR_NULL_PTR(pat->pat_pa))) { 145 pat->pat_nr = 0; 146 return -ENOMEM; 147 } 148 149 pat->pat_nr = nr; 150 151 return 0; 152 } 153 154 static void pfn_array_table_unpin_free(struct pfn_array_table *pat, 155 struct device *mdev) 156 { 157 int i; 158 159 for (i = 0; i < pat->pat_nr; i++) 160 pfn_array_unpin_free(pat->pat_pa + i, mdev); 161 162 if (pat->pat_nr) { 163 kfree(pat->pat_pa); 164 pat->pat_pa = NULL; 165 pat->pat_nr = 0; 166 } 167 } 168 169 static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat, 170 unsigned long iova) 171 { 172 struct pfn_array *pa = pat->pat_pa; 173 unsigned long iova_pfn = iova >> PAGE_SHIFT; 174 int i, j; 175 176 for (i = 0; i < pat->pat_nr; i++, pa++) 177 for (j = 0; j < pa->pa_nr; j++) 178 if (pa->pa_iova_pfn[i] == iova_pfn) 179 return true; 180 181 return false; 182 } 183 /* Create the list idal words for a pfn_array_table. */ 184 static inline void pfn_array_table_idal_create_words( 185 struct pfn_array_table *pat, 186 unsigned long *idaws) 187 { 188 struct pfn_array *pa; 189 int i, j, k; 190 191 /* 192 * Idal words (execept the first one) rely on the memory being 4k 193 * aligned. If a user virtual address is 4K aligned, then it's 194 * corresponding kernel physical address will also be 4K aligned. Thus 195 * there will be no problem here to simply use the phys to create an 196 * idaw. 197 */ 198 k = 0; 199 for (i = 0; i < pat->pat_nr; i++) { 200 pa = pat->pat_pa + i; 201 for (j = 0; j < pa->pa_nr; j++) { 202 idaws[k] = pa->pa_pfn[j] << PAGE_SHIFT; 203 if (k == 0) 204 idaws[k] += pa->pa_iova & (PAGE_SIZE - 1); 205 k++; 206 } 207 } 208 } 209 210 211 /* 212 * Within the domain (@mdev), copy @n bytes from a guest physical 213 * address (@iova) to a host physical address (@to). 214 */ 215 static long copy_from_iova(struct device *mdev, 216 void *to, u64 iova, 217 unsigned long n) 218 { 219 struct pfn_array pa = {0}; 220 u64 from; 221 int i, ret; 222 unsigned long l, m; 223 224 ret = pfn_array_alloc_pin(&pa, mdev, iova, n); 225 if (ret <= 0) 226 return ret; 227 228 l = n; 229 for (i = 0; i < pa.pa_nr; i++) { 230 from = pa.pa_pfn[i] << PAGE_SHIFT; 231 m = PAGE_SIZE; 232 if (i == 0) { 233 from += iova & (PAGE_SIZE - 1); 234 m -= iova & (PAGE_SIZE - 1); 235 } 236 237 m = min(l, m); 238 memcpy(to + (n - l), (void *)from, m); 239 240 l -= m; 241 if (l == 0) 242 break; 243 } 244 245 pfn_array_unpin_free(&pa, mdev); 246 247 return l; 248 } 249 250 static long copy_ccw_from_iova(struct channel_program *cp, 251 struct ccw1 *to, u64 iova, 252 unsigned long len) 253 { 254 struct ccw0 ccw0; 255 struct ccw1 *pccw1; 256 int ret; 257 int i; 258 259 ret = copy_from_iova(cp->mdev, to, iova, len * sizeof(struct ccw1)); 260 if (ret) 261 return ret; 262 263 if (!cp->orb.cmd.fmt) { 264 pccw1 = to; 265 for (i = 0; i < len; i++) { 266 ccw0 = *(struct ccw0 *)pccw1; 267 if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) { 268 pccw1->cmd_code = CCW_CMD_TIC; 269 pccw1->flags = 0; 270 pccw1->count = 0; 271 } else { 272 pccw1->cmd_code = ccw0.cmd_code; 273 pccw1->flags = ccw0.flags; 274 pccw1->count = ccw0.count; 275 } 276 pccw1->cda = ccw0.cda; 277 pccw1++; 278 } 279 } 280 281 return ret; 282 } 283 284 /* 285 * Helpers to operate ccwchain. 286 */ 287 #define ccw_is_test(_ccw) (((_ccw)->cmd_code & 0x0F) == 0) 288 289 #define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP) 290 291 #define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC) 292 293 #define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA) 294 295 296 #define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC)) 297 298 static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len) 299 { 300 struct ccwchain *chain; 301 void *data; 302 size_t size; 303 304 /* Make ccw address aligned to 8. */ 305 size = ((sizeof(*chain) + 7L) & -8L) + 306 sizeof(*chain->ch_ccw) * len + 307 sizeof(*chain->ch_pat) * len; 308 chain = kzalloc(size, GFP_DMA | GFP_KERNEL); 309 if (!chain) 310 return NULL; 311 312 data = (u8 *)chain + ((sizeof(*chain) + 7L) & -8L); 313 chain->ch_ccw = (struct ccw1 *)data; 314 315 data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len; 316 chain->ch_pat = (struct pfn_array_table *)data; 317 318 chain->ch_len = len; 319 320 list_add_tail(&chain->next, &cp->ccwchain_list); 321 322 return chain; 323 } 324 325 static void ccwchain_free(struct ccwchain *chain) 326 { 327 list_del(&chain->next); 328 kfree(chain); 329 } 330 331 /* Free resource for a ccw that allocated memory for its cda. */ 332 static void ccwchain_cda_free(struct ccwchain *chain, int idx) 333 { 334 struct ccw1 *ccw = chain->ch_ccw + idx; 335 336 if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw)) 337 return; 338 if (!ccw->count) 339 return; 340 341 kfree((void *)(u64)ccw->cda); 342 } 343 344 /* Unpin the pages then free the memory resources. */ 345 static void cp_unpin_free(struct channel_program *cp) 346 { 347 struct ccwchain *chain, *temp; 348 int i; 349 350 list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) { 351 for (i = 0; i < chain->ch_len; i++) { 352 pfn_array_table_unpin_free(chain->ch_pat + i, 353 cp->mdev); 354 ccwchain_cda_free(chain, i); 355 } 356 ccwchain_free(chain); 357 } 358 } 359 360 /** 361 * ccwchain_calc_length - calculate the length of the ccw chain. 362 * @iova: guest physical address of the target ccw chain 363 * @cp: channel_program on which to perform the operation 364 * 365 * This is the chain length not considering any TICs. 366 * You need to do a new round for each TIC target. 367 * 368 * Returns: the length of the ccw chain or -errno. 369 */ 370 static int ccwchain_calc_length(u64 iova, struct channel_program *cp) 371 { 372 struct ccw1 *ccw, *p; 373 int cnt; 374 375 /* 376 * Copy current chain from guest to host kernel. 377 * Currently the chain length is limited to CCWCHAIN_LEN_MAX (256). 378 * So copying 2K is enough (safe). 379 */ 380 p = ccw = kcalloc(CCWCHAIN_LEN_MAX, sizeof(*ccw), GFP_KERNEL); 381 if (!ccw) 382 return -ENOMEM; 383 384 cnt = copy_ccw_from_iova(cp, ccw, iova, CCWCHAIN_LEN_MAX); 385 if (cnt) { 386 kfree(ccw); 387 return cnt; 388 } 389 390 cnt = 0; 391 do { 392 cnt++; 393 394 if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) 395 break; 396 397 ccw++; 398 } while (cnt < CCWCHAIN_LEN_MAX + 1); 399 400 if (cnt == CCWCHAIN_LEN_MAX + 1) 401 cnt = -EINVAL; 402 403 kfree(p); 404 return cnt; 405 } 406 407 static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp) 408 { 409 struct ccwchain *chain; 410 u32 ccw_head, ccw_tail; 411 412 list_for_each_entry(chain, &cp->ccwchain_list, next) { 413 ccw_head = chain->ch_iova; 414 ccw_tail = ccw_head + (chain->ch_len - 1) * sizeof(struct ccw1); 415 416 if ((ccw_head <= tic->cda) && (tic->cda <= ccw_tail)) 417 return 1; 418 } 419 420 return 0; 421 } 422 423 static int ccwchain_loop_tic(struct ccwchain *chain, 424 struct channel_program *cp); 425 426 static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp) 427 { 428 struct ccwchain *chain; 429 int len, ret; 430 431 /* May transfer to an existing chain. */ 432 if (tic_target_chain_exists(tic, cp)) 433 return 0; 434 435 /* Get chain length. */ 436 len = ccwchain_calc_length(tic->cda, cp); 437 if (len < 0) 438 return len; 439 440 /* Need alloc a new chain for this one. */ 441 chain = ccwchain_alloc(cp, len); 442 if (!chain) 443 return -ENOMEM; 444 chain->ch_iova = tic->cda; 445 446 /* Copy the new chain from user. */ 447 ret = copy_ccw_from_iova(cp, chain->ch_ccw, tic->cda, len); 448 if (ret) { 449 ccwchain_free(chain); 450 return ret; 451 } 452 453 /* Loop for tics on this new chain. */ 454 return ccwchain_loop_tic(chain, cp); 455 } 456 457 /* Loop for TICs. */ 458 static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp) 459 { 460 struct ccw1 *tic; 461 int i, ret; 462 463 for (i = 0; i < chain->ch_len; i++) { 464 tic = chain->ch_ccw + i; 465 466 if (!ccw_is_tic(tic)) 467 continue; 468 469 ret = ccwchain_handle_tic(tic, cp); 470 if (ret) 471 return ret; 472 } 473 474 return 0; 475 } 476 477 static int ccwchain_fetch_tic(struct ccwchain *chain, 478 int idx, 479 struct channel_program *cp) 480 { 481 struct ccw1 *ccw = chain->ch_ccw + idx; 482 struct ccwchain *iter; 483 u32 ccw_head, ccw_tail; 484 485 list_for_each_entry(iter, &cp->ccwchain_list, next) { 486 ccw_head = iter->ch_iova; 487 ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1); 488 489 if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) { 490 ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) + 491 (ccw->cda - ccw_head)); 492 return 0; 493 } 494 } 495 496 return -EFAULT; 497 } 498 499 static int ccwchain_fetch_direct(struct ccwchain *chain, 500 int idx, 501 struct channel_program *cp) 502 { 503 struct ccw1 *ccw; 504 struct pfn_array_table *pat; 505 unsigned long *idaws; 506 int idaw_nr; 507 508 ccw = chain->ch_ccw + idx; 509 510 if (!ccw->count) { 511 /* 512 * We just want the translation result of any direct ccw 513 * to be an IDA ccw, so let's add the IDA flag for it. 514 * Although the flag will be ignored by firmware. 515 */ 516 ccw->flags |= CCW_FLAG_IDA; 517 return 0; 518 } 519 520 /* 521 * Pin data page(s) in memory. 522 * The number of pages actually is the count of the idaws which will be 523 * needed when translating a direct ccw to a idal ccw. 524 */ 525 pat = chain->ch_pat + idx; 526 if (pfn_array_table_init(pat, 1)) 527 return -ENOMEM; 528 idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, 529 ccw->cda, ccw->count); 530 if (idaw_nr < 0) 531 return idaw_nr; 532 533 /* Translate this direct ccw to a idal ccw. */ 534 idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL); 535 if (!idaws) { 536 pfn_array_table_unpin_free(pat, cp->mdev); 537 return -ENOMEM; 538 } 539 ccw->cda = (__u32) virt_to_phys(idaws); 540 ccw->flags |= CCW_FLAG_IDA; 541 542 pfn_array_table_idal_create_words(pat, idaws); 543 544 return 0; 545 } 546 547 static int ccwchain_fetch_idal(struct ccwchain *chain, 548 int idx, 549 struct channel_program *cp) 550 { 551 struct ccw1 *ccw; 552 struct pfn_array_table *pat; 553 unsigned long *idaws; 554 u64 idaw_iova; 555 unsigned int idaw_nr, idaw_len; 556 int i, ret; 557 558 ccw = chain->ch_ccw + idx; 559 560 if (!ccw->count) 561 return 0; 562 563 /* Calculate size of idaws. */ 564 ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova)); 565 if (ret) 566 return ret; 567 idaw_nr = idal_nr_words((void *)(idaw_iova), ccw->count); 568 idaw_len = idaw_nr * sizeof(*idaws); 569 570 /* Pin data page(s) in memory. */ 571 pat = chain->ch_pat + idx; 572 ret = pfn_array_table_init(pat, idaw_nr); 573 if (ret) 574 return ret; 575 576 /* Translate idal ccw to use new allocated idaws. */ 577 idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL); 578 if (!idaws) { 579 ret = -ENOMEM; 580 goto out_unpin; 581 } 582 583 ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idaw_len); 584 if (ret) 585 goto out_free_idaws; 586 587 ccw->cda = virt_to_phys(idaws); 588 589 for (i = 0; i < idaw_nr; i++) { 590 idaw_iova = *(idaws + i); 591 592 ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev, 593 idaw_iova, 1); 594 if (ret < 0) 595 goto out_free_idaws; 596 } 597 598 pfn_array_table_idal_create_words(pat, idaws); 599 600 return 0; 601 602 out_free_idaws: 603 kfree(idaws); 604 out_unpin: 605 pfn_array_table_unpin_free(pat, cp->mdev); 606 return ret; 607 } 608 609 /* 610 * Fetch one ccw. 611 * To reduce memory copy, we'll pin the cda page in memory, 612 * and to get rid of the cda 2G limitiaion of ccw1, we'll translate 613 * direct ccws to idal ccws. 614 */ 615 static int ccwchain_fetch_one(struct ccwchain *chain, 616 int idx, 617 struct channel_program *cp) 618 { 619 struct ccw1 *ccw = chain->ch_ccw + idx; 620 621 if (ccw_is_test(ccw) || ccw_is_noop(ccw)) 622 return 0; 623 624 if (ccw_is_tic(ccw)) 625 return ccwchain_fetch_tic(chain, idx, cp); 626 627 if (ccw_is_idal(ccw)) 628 return ccwchain_fetch_idal(chain, idx, cp); 629 630 return ccwchain_fetch_direct(chain, idx, cp); 631 } 632 633 /** 634 * cp_init() - allocate ccwchains for a channel program. 635 * @cp: channel_program on which to perform the operation 636 * @mdev: the mediated device to perform pin/unpin operations 637 * @orb: control block for the channel program from the guest 638 * 639 * This creates one or more ccwchain(s), and copies the raw data of 640 * the target channel program from @orb->cmd.iova to the new ccwchain(s). 641 * 642 * Limitations: 643 * 1. Supports only prefetch enabled mode. 644 * 2. Supports idal(c64) ccw chaining. 645 * 3. Supports 4k idaw. 646 * 647 * Returns: 648 * %0 on success and a negative error value on failure. 649 */ 650 int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) 651 { 652 u64 iova = orb->cmd.cpa; 653 struct ccwchain *chain; 654 int len, ret; 655 656 /* 657 * XXX: 658 * Only support prefetch enable mode now. 659 * Only support 64bit addressing idal. 660 * Only support 4k IDAW. 661 */ 662 if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k) 663 return -EOPNOTSUPP; 664 665 INIT_LIST_HEAD(&cp->ccwchain_list); 666 memcpy(&cp->orb, orb, sizeof(*orb)); 667 cp->mdev = mdev; 668 669 /* Get chain length. */ 670 len = ccwchain_calc_length(iova, cp); 671 if (len < 0) 672 return len; 673 674 /* Alloc mem for the head chain. */ 675 chain = ccwchain_alloc(cp, len); 676 if (!chain) 677 return -ENOMEM; 678 chain->ch_iova = iova; 679 680 /* Copy the head chain from guest. */ 681 ret = copy_ccw_from_iova(cp, chain->ch_ccw, iova, len); 682 if (ret) { 683 ccwchain_free(chain); 684 return ret; 685 } 686 687 /* Now loop for its TICs. */ 688 ret = ccwchain_loop_tic(chain, cp); 689 if (ret) 690 cp_unpin_free(cp); 691 692 return ret; 693 } 694 695 696 /** 697 * cp_free() - free resources for channel program. 698 * @cp: channel_program on which to perform the operation 699 * 700 * This unpins the memory pages and frees the memory space occupied by 701 * @cp, which must have been returned by a previous call to cp_init(). 702 * Otherwise, undefined behavior occurs. 703 */ 704 void cp_free(struct channel_program *cp) 705 { 706 cp_unpin_free(cp); 707 } 708 709 /** 710 * cp_prefetch() - translate a guest physical address channel program to 711 * a real-device runnable channel program. 712 * @cp: channel_program on which to perform the operation 713 * 714 * This function translates the guest-physical-address channel program 715 * and stores the result to ccwchain list. @cp must have been 716 * initialized by a previous call with cp_init(). Otherwise, undefined 717 * behavior occurs. 718 * 719 * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced 720 * as helpers to do ccw chain translation inside the kernel. Basically 721 * they accept a channel program issued by a virtual machine, and 722 * translate the channel program to a real-device runnable channel 723 * program. 724 * 725 * These APIs will copy the ccws into kernel-space buffers, and update 726 * the guest phsical addresses with their corresponding host physical 727 * addresses. Then channel I/O device drivers could issue the 728 * translated channel program to real devices to perform an I/O 729 * operation. 730 * 731 * These interfaces are designed to support translation only for 732 * channel programs, which are generated and formatted by a 733 * guest. Thus this will make it possible for things like VFIO to 734 * leverage the interfaces to passthrough a channel I/O mediated 735 * device in QEMU. 736 * 737 * We support direct ccw chaining by translating them to idal ccws. 738 * 739 * Returns: 740 * %0 on success and a negative error value on failure. 741 */ 742 int cp_prefetch(struct channel_program *cp) 743 { 744 struct ccwchain *chain; 745 int len, idx, ret; 746 747 list_for_each_entry(chain, &cp->ccwchain_list, next) { 748 len = chain->ch_len; 749 for (idx = 0; idx < len; idx++) { 750 ret = ccwchain_fetch_one(chain, idx, cp); 751 if (ret) 752 return ret; 753 } 754 } 755 756 return 0; 757 } 758 759 /** 760 * cp_get_orb() - get the orb of the channel program 761 * @cp: channel_program on which to perform the operation 762 * @intparm: new intparm for the returned orb 763 * @lpm: candidate value of the logical-path mask for the returned orb 764 * 765 * This function returns the address of the updated orb of the channel 766 * program. Channel I/O device drivers could use this orb to issue a 767 * ssch. 768 */ 769 union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm) 770 { 771 union orb *orb; 772 struct ccwchain *chain; 773 struct ccw1 *cpa; 774 775 orb = &cp->orb; 776 777 orb->cmd.intparm = intparm; 778 orb->cmd.fmt = 1; 779 orb->cmd.key = PAGE_DEFAULT_KEY >> 4; 780 781 if (orb->cmd.lpm == 0) 782 orb->cmd.lpm = lpm; 783 784 chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next); 785 cpa = chain->ch_ccw; 786 orb->cmd.cpa = (__u32) __pa(cpa); 787 788 return orb; 789 } 790 791 /** 792 * cp_update_scsw() - update scsw for a channel program. 793 * @cp: channel_program on which to perform the operation 794 * @scsw: I/O results of the channel program and also the target to be 795 * updated 796 * 797 * @scsw contains the I/O results of the channel program that pointed 798 * to by @cp. However what @scsw->cpa stores is a host physical 799 * address, which is meaningless for the guest, which is waiting for 800 * the I/O results. 801 * 802 * This function updates @scsw->cpa to its coressponding guest physical 803 * address. 804 */ 805 void cp_update_scsw(struct channel_program *cp, union scsw *scsw) 806 { 807 struct ccwchain *chain; 808 u32 cpa = scsw->cmd.cpa; 809 u32 ccw_head, ccw_tail; 810 811 /* 812 * LATER: 813 * For now, only update the cmd.cpa part. We may need to deal with 814 * other portions of the schib as well, even if we don't return them 815 * in the ioctl directly. Path status changes etc. 816 */ 817 list_for_each_entry(chain, &cp->ccwchain_list, next) { 818 ccw_head = (u32)(u64)chain->ch_ccw; 819 ccw_tail = (u32)(u64)(chain->ch_ccw + chain->ch_len - 1); 820 821 if ((ccw_head <= cpa) && (cpa <= ccw_tail)) { 822 /* 823 * (cpa - ccw_head) is the offset value of the host 824 * physical ccw to its chain head. 825 * Adding this value to the guest physical ccw chain 826 * head gets us the guest cpa. 827 */ 828 cpa = chain->ch_iova + (cpa - ccw_head); 829 break; 830 } 831 } 832 833 scsw->cmd.cpa = cpa; 834 } 835 836 /** 837 * cp_iova_pinned() - check if an iova is pinned for a ccw chain. 838 * @cp: channel_program on which to perform the operation 839 * @iova: the iova to check 840 * 841 * If the @iova is currently pinned for the ccw chain, return true; 842 * else return false. 843 */ 844 bool cp_iova_pinned(struct channel_program *cp, u64 iova) 845 { 846 struct ccwchain *chain; 847 int i; 848 849 list_for_each_entry(chain, &cp->ccwchain_list, next) { 850 for (i = 0; i < chain->ch_len; i++) 851 if (pfn_array_table_iova_pinned(chain->ch_pat + i, 852 iova)) 853 return true; 854 } 855 856 return false; 857 } 858