xref: /openbmc/linux/drivers/s390/cio/vfio_ccw_cp.c (revision 465191d6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * channel program interfaces
4  *
5  * Copyright IBM Corp. 2017
6  *
7  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8  *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9  */
10 
11 #include <linux/ratelimit.h>
12 #include <linux/mm.h>
13 #include <linux/slab.h>
14 #include <linux/iommu.h>
15 #include <linux/vfio.h>
16 #include <asm/idals.h>
17 
18 #include "vfio_ccw_cp.h"
19 #include "vfio_ccw_private.h"
20 
21 struct pfn_array {
22 	/* Starting guest physical I/O address. */
23 	unsigned long		pa_iova;
24 	/* Array that stores PFNs of the pages need to pin. */
25 	unsigned long		*pa_iova_pfn;
26 	/* Array that receives PFNs of the pages pinned. */
27 	unsigned long		*pa_pfn;
28 	/* Number of pages pinned from @pa_iova. */
29 	int			pa_nr;
30 };
31 
32 struct ccwchain {
33 	struct list_head	next;
34 	struct ccw1		*ch_ccw;
35 	/* Guest physical address of the current chain. */
36 	u64			ch_iova;
37 	/* Count of the valid ccws in chain. */
38 	int			ch_len;
39 	/* Pinned PAGEs for the original data. */
40 	struct pfn_array	*ch_pa;
41 };
42 
43 /*
44  * pfn_array_alloc() - alloc memory for PFNs
45  * @pa: pfn_array on which to perform the operation
46  * @iova: target guest physical address
47  * @len: number of bytes that should be pinned from @iova
48  *
49  * Attempt to allocate memory for PFNs.
50  *
51  * Usage of pfn_array:
52  * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
53  * this structure will be filled in by this function.
54  *
55  * Returns:
56  *         0 if PFNs are allocated
57  *   -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL
58  *   -ENOMEM if alloc failed
59  */
60 static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
61 {
62 	int i;
63 
64 	if (pa->pa_nr || pa->pa_iova_pfn)
65 		return -EINVAL;
66 
67 	pa->pa_iova = iova;
68 
69 	pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
70 	if (!pa->pa_nr)
71 		return -EINVAL;
72 
73 	pa->pa_iova_pfn = kcalloc(pa->pa_nr,
74 				  sizeof(*pa->pa_iova_pfn) +
75 				  sizeof(*pa->pa_pfn),
76 				  GFP_KERNEL);
77 	if (unlikely(!pa->pa_iova_pfn)) {
78 		pa->pa_nr = 0;
79 		return -ENOMEM;
80 	}
81 	pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
82 
83 	pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
84 	pa->pa_pfn[0] = -1ULL;
85 	for (i = 1; i < pa->pa_nr; i++) {
86 		pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
87 		pa->pa_pfn[i] = -1ULL;
88 	}
89 
90 	return 0;
91 }
92 
93 /*
94  * pfn_array_pin() - Pin user pages in memory
95  * @pa: pfn_array on which to perform the operation
96  * @mdev: the mediated device to perform pin operations
97  *
98  * Returns number of pages pinned upon success.
99  * If the pin request partially succeeds, or fails completely,
100  * all pages are left unpinned and a negative error value is returned.
101  */
102 static int pfn_array_pin(struct pfn_array *pa, struct vfio_device *vdev)
103 {
104 	int ret = 0;
105 
106 	ret = vfio_pin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr,
107 			     IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
108 
109 	if (ret < 0) {
110 		goto err_out;
111 	} else if (ret > 0 && ret != pa->pa_nr) {
112 		vfio_unpin_pages(vdev, pa->pa_iova_pfn, ret);
113 		ret = -EINVAL;
114 		goto err_out;
115 	}
116 
117 	return ret;
118 
119 err_out:
120 	pa->pa_nr = 0;
121 
122 	return ret;
123 }
124 
125 /* Unpin the pages before releasing the memory. */
126 static void pfn_array_unpin_free(struct pfn_array *pa, struct vfio_device *vdev)
127 {
128 	/* Only unpin if any pages were pinned to begin with */
129 	if (pa->pa_nr)
130 		vfio_unpin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr);
131 	pa->pa_nr = 0;
132 	kfree(pa->pa_iova_pfn);
133 }
134 
135 static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
136 {
137 	unsigned long iova_pfn = iova >> PAGE_SHIFT;
138 	int i;
139 
140 	for (i = 0; i < pa->pa_nr; i++)
141 		if (pa->pa_iova_pfn[i] == iova_pfn)
142 			return true;
143 
144 	return false;
145 }
146 /* Create the list of IDAL words for a pfn_array. */
147 static inline void pfn_array_idal_create_words(
148 	struct pfn_array *pa,
149 	unsigned long *idaws)
150 {
151 	int i;
152 
153 	/*
154 	 * Idal words (execept the first one) rely on the memory being 4k
155 	 * aligned. If a user virtual address is 4K aligned, then it's
156 	 * corresponding kernel physical address will also be 4K aligned. Thus
157 	 * there will be no problem here to simply use the phys to create an
158 	 * idaw.
159 	 */
160 
161 	for (i = 0; i < pa->pa_nr; i++)
162 		idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
163 
164 	/* Adjust the first IDAW, since it may not start on a page boundary */
165 	idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
166 }
167 
168 static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
169 {
170 	struct ccw0 ccw0;
171 	struct ccw1 *pccw1 = source;
172 	int i;
173 
174 	for (i = 0; i < len; i++) {
175 		ccw0 = *(struct ccw0 *)pccw1;
176 		if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
177 			pccw1->cmd_code = CCW_CMD_TIC;
178 			pccw1->flags = 0;
179 			pccw1->count = 0;
180 		} else {
181 			pccw1->cmd_code = ccw0.cmd_code;
182 			pccw1->flags = ccw0.flags;
183 			pccw1->count = ccw0.count;
184 		}
185 		pccw1->cda = ccw0.cda;
186 		pccw1++;
187 	}
188 }
189 
190 /*
191  * Within the domain (@mdev), copy @n bytes from a guest physical
192  * address (@iova) to a host physical address (@to).
193  */
194 static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
195 			   unsigned long n)
196 {
197 	struct pfn_array pa = {0};
198 	u64 from;
199 	int i, ret;
200 	unsigned long l, m;
201 
202 	ret = pfn_array_alloc(&pa, iova, n);
203 	if (ret < 0)
204 		return ret;
205 
206 	ret = pfn_array_pin(&pa, vdev);
207 	if (ret < 0) {
208 		pfn_array_unpin_free(&pa, vdev);
209 		return ret;
210 	}
211 
212 	l = n;
213 	for (i = 0; i < pa.pa_nr; i++) {
214 		from = pa.pa_pfn[i] << PAGE_SHIFT;
215 		m = PAGE_SIZE;
216 		if (i == 0) {
217 			from += iova & (PAGE_SIZE - 1);
218 			m -= iova & (PAGE_SIZE - 1);
219 		}
220 
221 		m = min(l, m);
222 		memcpy(to + (n - l), (void *)from, m);
223 
224 		l -= m;
225 		if (l == 0)
226 			break;
227 	}
228 
229 	pfn_array_unpin_free(&pa, vdev);
230 
231 	return l;
232 }
233 
234 /*
235  * Helpers to operate ccwchain.
236  */
237 #define ccw_is_read(_ccw) (((_ccw)->cmd_code & 0x03) == 0x02)
238 #define ccw_is_read_backward(_ccw) (((_ccw)->cmd_code & 0x0F) == 0x0C)
239 #define ccw_is_sense(_ccw) (((_ccw)->cmd_code & 0x0F) == CCW_CMD_BASIC_SENSE)
240 
241 #define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP)
242 
243 #define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC)
244 
245 #define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA)
246 #define ccw_is_skip(_ccw) ((_ccw)->flags & CCW_FLAG_SKIP)
247 
248 #define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
249 
250 /*
251  * ccw_does_data_transfer()
252  *
253  * Determine whether a CCW will move any data, such that the guest pages
254  * would need to be pinned before performing the I/O.
255  *
256  * Returns 1 if yes, 0 if no.
257  */
258 static inline int ccw_does_data_transfer(struct ccw1 *ccw)
259 {
260 	/* If the count field is zero, then no data will be transferred */
261 	if (ccw->count == 0)
262 		return 0;
263 
264 	/* If the command is a NOP, then no data will be transferred */
265 	if (ccw_is_noop(ccw))
266 		return 0;
267 
268 	/* If the skip flag is off, then data will be transferred */
269 	if (!ccw_is_skip(ccw))
270 		return 1;
271 
272 	/*
273 	 * If the skip flag is on, it is only meaningful if the command
274 	 * code is a read, read backward, sense, or sense ID.  In those
275 	 * cases, no data will be transferred.
276 	 */
277 	if (ccw_is_read(ccw) || ccw_is_read_backward(ccw))
278 		return 0;
279 
280 	if (ccw_is_sense(ccw))
281 		return 0;
282 
283 	/* The skip flag is on, but it is ignored for this command code. */
284 	return 1;
285 }
286 
287 /*
288  * is_cpa_within_range()
289  *
290  * @cpa: channel program address being questioned
291  * @head: address of the beginning of a CCW chain
292  * @len: number of CCWs within the chain
293  *
294  * Determine whether the address of a CCW (whether a new chain,
295  * or the target of a TIC) falls within a range (including the end points).
296  *
297  * Returns 1 if yes, 0 if no.
298  */
299 static inline int is_cpa_within_range(u32 cpa, u32 head, int len)
300 {
301 	u32 tail = head + (len - 1) * sizeof(struct ccw1);
302 
303 	return (head <= cpa && cpa <= tail);
304 }
305 
306 static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len)
307 {
308 	if (!ccw_is_tic(ccw))
309 		return 0;
310 
311 	return is_cpa_within_range(ccw->cda, head, len);
312 }
313 
314 static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
315 {
316 	struct ccwchain *chain;
317 	void *data;
318 	size_t size;
319 
320 	/* Make ccw address aligned to 8. */
321 	size = ((sizeof(*chain) + 7L) & -8L) +
322 		sizeof(*chain->ch_ccw) * len +
323 		sizeof(*chain->ch_pa) * len;
324 	chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
325 	if (!chain)
326 		return NULL;
327 
328 	data = (u8 *)chain + ((sizeof(*chain) + 7L) & -8L);
329 	chain->ch_ccw = (struct ccw1 *)data;
330 
331 	data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
332 	chain->ch_pa = (struct pfn_array *)data;
333 
334 	chain->ch_len = len;
335 
336 	list_add_tail(&chain->next, &cp->ccwchain_list);
337 
338 	return chain;
339 }
340 
341 static void ccwchain_free(struct ccwchain *chain)
342 {
343 	list_del(&chain->next);
344 	kfree(chain);
345 }
346 
347 /* Free resource for a ccw that allocated memory for its cda. */
348 static void ccwchain_cda_free(struct ccwchain *chain, int idx)
349 {
350 	struct ccw1 *ccw = chain->ch_ccw + idx;
351 
352 	if (ccw_is_tic(ccw))
353 		return;
354 
355 	kfree((void *)(u64)ccw->cda);
356 }
357 
358 /**
359  * ccwchain_calc_length - calculate the length of the ccw chain.
360  * @iova: guest physical address of the target ccw chain
361  * @cp: channel_program on which to perform the operation
362  *
363  * This is the chain length not considering any TICs.
364  * You need to do a new round for each TIC target.
365  *
366  * The program is also validated for absence of not yet supported
367  * indirect data addressing scenarios.
368  *
369  * Returns: the length of the ccw chain or -errno.
370  */
371 static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
372 {
373 	struct ccw1 *ccw = cp->guest_cp;
374 	int cnt = 0;
375 
376 	do {
377 		cnt++;
378 
379 		/*
380 		 * As we don't want to fail direct addressing even if the
381 		 * orb specified one of the unsupported formats, we defer
382 		 * checking for IDAWs in unsupported formats to here.
383 		 */
384 		if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
385 			return -EOPNOTSUPP;
386 
387 		/*
388 		 * We want to keep counting if the current CCW has the
389 		 * command-chaining flag enabled, or if it is a TIC CCW
390 		 * that loops back into the current chain.  The latter
391 		 * is used for device orientation, where the CCW PRIOR to
392 		 * the TIC can either jump to the TIC or a CCW immediately
393 		 * after the TIC, depending on the results of its operation.
394 		 */
395 		if (!ccw_is_chain(ccw) && !is_tic_within_range(ccw, iova, cnt))
396 			break;
397 
398 		ccw++;
399 	} while (cnt < CCWCHAIN_LEN_MAX + 1);
400 
401 	if (cnt == CCWCHAIN_LEN_MAX + 1)
402 		cnt = -EINVAL;
403 
404 	return cnt;
405 }
406 
407 static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
408 {
409 	struct ccwchain *chain;
410 	u32 ccw_head;
411 
412 	list_for_each_entry(chain, &cp->ccwchain_list, next) {
413 		ccw_head = chain->ch_iova;
414 		if (is_cpa_within_range(tic->cda, ccw_head, chain->ch_len))
415 			return 1;
416 	}
417 
418 	return 0;
419 }
420 
421 static int ccwchain_loop_tic(struct ccwchain *chain,
422 			     struct channel_program *cp);
423 
424 static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
425 {
426 	struct vfio_device *vdev =
427 		&container_of(cp, struct vfio_ccw_private, cp)->vdev;
428 	struct ccwchain *chain;
429 	int len, ret;
430 
431 	/* Copy 2K (the most we support today) of possible CCWs */
432 	len = copy_from_iova(vdev, cp->guest_cp, cda,
433 			     CCWCHAIN_LEN_MAX * sizeof(struct ccw1));
434 	if (len)
435 		return len;
436 
437 	/* Convert any Format-0 CCWs to Format-1 */
438 	if (!cp->orb.cmd.fmt)
439 		convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX);
440 
441 	/* Count the CCWs in the current chain */
442 	len = ccwchain_calc_length(cda, cp);
443 	if (len < 0)
444 		return len;
445 
446 	/* Need alloc a new chain for this one. */
447 	chain = ccwchain_alloc(cp, len);
448 	if (!chain)
449 		return -ENOMEM;
450 	chain->ch_iova = cda;
451 
452 	/* Copy the actual CCWs into the new chain */
453 	memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
454 
455 	/* Loop for tics on this new chain. */
456 	ret = ccwchain_loop_tic(chain, cp);
457 
458 	if (ret)
459 		ccwchain_free(chain);
460 
461 	return ret;
462 }
463 
464 /* Loop for TICs. */
465 static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
466 {
467 	struct ccw1 *tic;
468 	int i, ret;
469 
470 	for (i = 0; i < chain->ch_len; i++) {
471 		tic = chain->ch_ccw + i;
472 
473 		if (!ccw_is_tic(tic))
474 			continue;
475 
476 		/* May transfer to an existing chain. */
477 		if (tic_target_chain_exists(tic, cp))
478 			continue;
479 
480 		/* Build a ccwchain for the next segment */
481 		ret = ccwchain_handle_ccw(tic->cda, cp);
482 		if (ret)
483 			return ret;
484 	}
485 
486 	return 0;
487 }
488 
489 static int ccwchain_fetch_tic(struct ccwchain *chain,
490 			      int idx,
491 			      struct channel_program *cp)
492 {
493 	struct ccw1 *ccw = chain->ch_ccw + idx;
494 	struct ccwchain *iter;
495 	u32 ccw_head;
496 
497 	list_for_each_entry(iter, &cp->ccwchain_list, next) {
498 		ccw_head = iter->ch_iova;
499 		if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
500 			ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
501 						     (ccw->cda - ccw_head));
502 			return 0;
503 		}
504 	}
505 
506 	return -EFAULT;
507 }
508 
509 static int ccwchain_fetch_direct(struct ccwchain *chain,
510 				 int idx,
511 				 struct channel_program *cp)
512 {
513 	struct vfio_device *vdev =
514 		&container_of(cp, struct vfio_ccw_private, cp)->vdev;
515 	struct ccw1 *ccw;
516 	struct pfn_array *pa;
517 	u64 iova;
518 	unsigned long *idaws;
519 	int ret;
520 	int bytes = 1;
521 	int idaw_nr, idal_len;
522 	int i;
523 
524 	ccw = chain->ch_ccw + idx;
525 
526 	if (ccw->count)
527 		bytes = ccw->count;
528 
529 	/* Calculate size of IDAL */
530 	if (ccw_is_idal(ccw)) {
531 		/* Read first IDAW to see if it's 4K-aligned or not. */
532 		/* All subsequent IDAws will be 4K-aligned. */
533 		ret = copy_from_iova(vdev, &iova, ccw->cda, sizeof(iova));
534 		if (ret)
535 			return ret;
536 	} else {
537 		iova = ccw->cda;
538 	}
539 	idaw_nr = idal_nr_words((void *)iova, bytes);
540 	idal_len = idaw_nr * sizeof(*idaws);
541 
542 	/* Allocate an IDAL from host storage */
543 	idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
544 	if (!idaws) {
545 		ret = -ENOMEM;
546 		goto out_init;
547 	}
548 
549 	/*
550 	 * Allocate an array of pfn's for pages to pin/translate.
551 	 * The number of pages is actually the count of the idaws
552 	 * required for the data transfer, since we only only support
553 	 * 4K IDAWs today.
554 	 */
555 	pa = chain->ch_pa + idx;
556 	ret = pfn_array_alloc(pa, iova, bytes);
557 	if (ret < 0)
558 		goto out_free_idaws;
559 
560 	if (ccw_is_idal(ccw)) {
561 		/* Copy guest IDAL into host IDAL */
562 		ret = copy_from_iova(vdev, idaws, ccw->cda, idal_len);
563 		if (ret)
564 			goto out_unpin;
565 
566 		/*
567 		 * Copy guest IDAWs into pfn_array, in case the memory they
568 		 * occupy is not contiguous.
569 		 */
570 		for (i = 0; i < idaw_nr; i++)
571 			pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
572 	} else {
573 		/*
574 		 * No action is required here; the iova addresses in pfn_array
575 		 * were initialized sequentially in pfn_array_alloc() beginning
576 		 * with the contents of ccw->cda.
577 		 */
578 	}
579 
580 	if (ccw_does_data_transfer(ccw)) {
581 		ret = pfn_array_pin(pa, vdev);
582 		if (ret < 0)
583 			goto out_unpin;
584 	} else {
585 		pa->pa_nr = 0;
586 	}
587 
588 	ccw->cda = (__u32) virt_to_phys(idaws);
589 	ccw->flags |= CCW_FLAG_IDA;
590 
591 	/* Populate the IDAL with pinned/translated addresses from pfn */
592 	pfn_array_idal_create_words(pa, idaws);
593 
594 	return 0;
595 
596 out_unpin:
597 	pfn_array_unpin_free(pa, vdev);
598 out_free_idaws:
599 	kfree(idaws);
600 out_init:
601 	ccw->cda = 0;
602 	return ret;
603 }
604 
605 /*
606  * Fetch one ccw.
607  * To reduce memory copy, we'll pin the cda page in memory,
608  * and to get rid of the cda 2G limitiaion of ccw1, we'll translate
609  * direct ccws to idal ccws.
610  */
611 static int ccwchain_fetch_one(struct ccwchain *chain,
612 			      int idx,
613 			      struct channel_program *cp)
614 {
615 	struct ccw1 *ccw = chain->ch_ccw + idx;
616 
617 	if (ccw_is_tic(ccw))
618 		return ccwchain_fetch_tic(chain, idx, cp);
619 
620 	return ccwchain_fetch_direct(chain, idx, cp);
621 }
622 
623 /**
624  * cp_init() - allocate ccwchains for a channel program.
625  * @cp: channel_program on which to perform the operation
626  * @mdev: the mediated device to perform pin/unpin operations
627  * @orb: control block for the channel program from the guest
628  *
629  * This creates one or more ccwchain(s), and copies the raw data of
630  * the target channel program from @orb->cmd.iova to the new ccwchain(s).
631  *
632  * Limitations:
633  * 1. Supports idal(c64) ccw chaining.
634  * 2. Supports 4k idaw.
635  *
636  * Returns:
637  *   %0 on success and a negative error value on failure.
638  */
639 int cp_init(struct channel_program *cp, union orb *orb)
640 {
641 	struct vfio_device *vdev =
642 		&container_of(cp, struct vfio_ccw_private, cp)->vdev;
643 	/* custom ratelimit used to avoid flood during guest IPL */
644 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
645 	int ret;
646 
647 	/* this is an error in the caller */
648 	if (cp->initialized)
649 		return -EBUSY;
650 
651 	/*
652 	 * We only support prefetching the channel program. We assume all channel
653 	 * programs executed by supported guests likewise support prefetching.
654 	 * Executing a channel program that does not specify prefetching will
655 	 * typically not cause an error, but a warning is issued to help identify
656 	 * the problem if something does break.
657 	 */
658 	if (!orb->cmd.pfch && __ratelimit(&ratelimit_state))
659 		dev_warn(
660 			vdev->dev,
661 			"Prefetching channel program even though prefetch not specified in ORB");
662 
663 	INIT_LIST_HEAD(&cp->ccwchain_list);
664 	memcpy(&cp->orb, orb, sizeof(*orb));
665 
666 	/* Build a ccwchain for the first CCW segment */
667 	ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
668 
669 	if (!ret) {
670 		cp->initialized = true;
671 
672 		/* It is safe to force: if it was not set but idals used
673 		 * ccwchain_calc_length would have returned an error.
674 		 */
675 		cp->orb.cmd.c64 = 1;
676 	}
677 
678 	return ret;
679 }
680 
681 
682 /**
683  * cp_free() - free resources for channel program.
684  * @cp: channel_program on which to perform the operation
685  *
686  * This unpins the memory pages and frees the memory space occupied by
687  * @cp, which must have been returned by a previous call to cp_init().
688  * Otherwise, undefined behavior occurs.
689  */
690 void cp_free(struct channel_program *cp)
691 {
692 	struct vfio_device *vdev =
693 		&container_of(cp, struct vfio_ccw_private, cp)->vdev;
694 	struct ccwchain *chain, *temp;
695 	int i;
696 
697 	if (!cp->initialized)
698 		return;
699 
700 	cp->initialized = false;
701 	list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
702 		for (i = 0; i < chain->ch_len; i++) {
703 			pfn_array_unpin_free(chain->ch_pa + i, vdev);
704 			ccwchain_cda_free(chain, i);
705 		}
706 		ccwchain_free(chain);
707 	}
708 }
709 
710 /**
711  * cp_prefetch() - translate a guest physical address channel program to
712  *                 a real-device runnable channel program.
713  * @cp: channel_program on which to perform the operation
714  *
715  * This function translates the guest-physical-address channel program
716  * and stores the result to ccwchain list. @cp must have been
717  * initialized by a previous call with cp_init(). Otherwise, undefined
718  * behavior occurs.
719  * For each chain composing the channel program:
720  * - On entry ch_len holds the count of CCWs to be translated.
721  * - On exit ch_len is adjusted to the count of successfully translated CCWs.
722  * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
723  *
724  * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
725  * as helpers to do ccw chain translation inside the kernel. Basically
726  * they accept a channel program issued by a virtual machine, and
727  * translate the channel program to a real-device runnable channel
728  * program.
729  *
730  * These APIs will copy the ccws into kernel-space buffers, and update
731  * the guest phsical addresses with their corresponding host physical
732  * addresses.  Then channel I/O device drivers could issue the
733  * translated channel program to real devices to perform an I/O
734  * operation.
735  *
736  * These interfaces are designed to support translation only for
737  * channel programs, which are generated and formatted by a
738  * guest. Thus this will make it possible for things like VFIO to
739  * leverage the interfaces to passthrough a channel I/O mediated
740  * device in QEMU.
741  *
742  * We support direct ccw chaining by translating them to idal ccws.
743  *
744  * Returns:
745  *   %0 on success and a negative error value on failure.
746  */
747 int cp_prefetch(struct channel_program *cp)
748 {
749 	struct ccwchain *chain;
750 	int len, idx, ret;
751 
752 	/* this is an error in the caller */
753 	if (!cp->initialized)
754 		return -EINVAL;
755 
756 	list_for_each_entry(chain, &cp->ccwchain_list, next) {
757 		len = chain->ch_len;
758 		for (idx = 0; idx < len; idx++) {
759 			ret = ccwchain_fetch_one(chain, idx, cp);
760 			if (ret)
761 				goto out_err;
762 		}
763 	}
764 
765 	return 0;
766 out_err:
767 	/* Only cleanup the chain elements that were actually translated. */
768 	chain->ch_len = idx;
769 	list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
770 		chain->ch_len = 0;
771 	}
772 	return ret;
773 }
774 
775 /**
776  * cp_get_orb() - get the orb of the channel program
777  * @cp: channel_program on which to perform the operation
778  * @intparm: new intparm for the returned orb
779  * @lpm: candidate value of the logical-path mask for the returned orb
780  *
781  * This function returns the address of the updated orb of the channel
782  * program. Channel I/O device drivers could use this orb to issue a
783  * ssch.
784  */
785 union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
786 {
787 	union orb *orb;
788 	struct ccwchain *chain;
789 	struct ccw1 *cpa;
790 
791 	/* this is an error in the caller */
792 	if (!cp->initialized)
793 		return NULL;
794 
795 	orb = &cp->orb;
796 
797 	orb->cmd.intparm = intparm;
798 	orb->cmd.fmt = 1;
799 	orb->cmd.key = PAGE_DEFAULT_KEY >> 4;
800 
801 	if (orb->cmd.lpm == 0)
802 		orb->cmd.lpm = lpm;
803 
804 	chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
805 	cpa = chain->ch_ccw;
806 	orb->cmd.cpa = (__u32) __pa(cpa);
807 
808 	return orb;
809 }
810 
811 /**
812  * cp_update_scsw() - update scsw for a channel program.
813  * @cp: channel_program on which to perform the operation
814  * @scsw: I/O results of the channel program and also the target to be
815  *        updated
816  *
817  * @scsw contains the I/O results of the channel program that pointed
818  * to by @cp. However what @scsw->cpa stores is a host physical
819  * address, which is meaningless for the guest, which is waiting for
820  * the I/O results.
821  *
822  * This function updates @scsw->cpa to its coressponding guest physical
823  * address.
824  */
825 void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
826 {
827 	struct ccwchain *chain;
828 	u32 cpa = scsw->cmd.cpa;
829 	u32 ccw_head;
830 
831 	if (!cp->initialized)
832 		return;
833 
834 	/*
835 	 * LATER:
836 	 * For now, only update the cmd.cpa part. We may need to deal with
837 	 * other portions of the schib as well, even if we don't return them
838 	 * in the ioctl directly. Path status changes etc.
839 	 */
840 	list_for_each_entry(chain, &cp->ccwchain_list, next) {
841 		ccw_head = (u32)(u64)chain->ch_ccw;
842 		/*
843 		 * On successful execution, cpa points just beyond the end
844 		 * of the chain.
845 		 */
846 		if (is_cpa_within_range(cpa, ccw_head, chain->ch_len + 1)) {
847 			/*
848 			 * (cpa - ccw_head) is the offset value of the host
849 			 * physical ccw to its chain head.
850 			 * Adding this value to the guest physical ccw chain
851 			 * head gets us the guest cpa.
852 			 */
853 			cpa = chain->ch_iova + (cpa - ccw_head);
854 			break;
855 		}
856 	}
857 
858 	scsw->cmd.cpa = cpa;
859 }
860 
861 /**
862  * cp_iova_pinned() - check if an iova is pinned for a ccw chain.
863  * @cp: channel_program on which to perform the operation
864  * @iova: the iova to check
865  *
866  * If the @iova is currently pinned for the ccw chain, return true;
867  * else return false.
868  */
869 bool cp_iova_pinned(struct channel_program *cp, u64 iova)
870 {
871 	struct ccwchain *chain;
872 	int i;
873 
874 	if (!cp->initialized)
875 		return false;
876 
877 	list_for_each_entry(chain, &cp->ccwchain_list, next) {
878 		for (i = 0; i < chain->ch_len; i++)
879 			if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
880 				return true;
881 	}
882 
883 	return false;
884 }
885