xref: /openbmc/linux/drivers/scsi/cxlflash/vlun.c (revision 4da722ca)
1 /*
2  * CXL Flash Device Driver
3  *
4  * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) 2015 IBM Corporation
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/syscalls.h>
16 #include <misc/cxl.h>
17 #include <asm/unaligned.h>
18 #include <asm/bitsperlong.h>
19 
20 #include <scsi/scsi_cmnd.h>
21 #include <scsi/scsi_host.h>
22 #include <uapi/scsi/cxlflash_ioctl.h>
23 
24 #include "sislite.h"
25 #include "common.h"
26 #include "vlun.h"
27 #include "superpipe.h"
28 
29 /**
30  * marshal_virt_to_resize() - translate uvirtual to resize structure
31  * @virt:	Source structure from which to translate/copy.
32  * @resize:	Destination structure for the translate/copy.
33  */
34 static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
35 				   struct dk_cxlflash_resize *resize)
36 {
37 	resize->hdr = virt->hdr;
38 	resize->context_id = virt->context_id;
39 	resize->rsrc_handle = virt->rsrc_handle;
40 	resize->req_size = virt->lun_size;
41 	resize->last_lba = virt->last_lba;
42 }
43 
44 /**
45  * marshal_clone_to_rele() - translate clone to release structure
46  * @clone:	Source structure from which to translate/copy.
47  * @rele:	Destination structure for the translate/copy.
48  */
49 static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
50 				  struct dk_cxlflash_release *release)
51 {
52 	release->hdr = clone->hdr;
53 	release->context_id = clone->context_id_dst;
54 }
55 
56 /**
57  * ba_init() - initializes a block allocator
58  * @ba_lun:	Block allocator to initialize.
59  *
60  * Return: 0 on success, -errno on failure
61  */
62 static int ba_init(struct ba_lun *ba_lun)
63 {
64 	struct ba_lun_info *bali = NULL;
65 	int lun_size_au = 0, i = 0;
66 	int last_word_underflow = 0;
67 	u64 *lam;
68 
69 	pr_debug("%s: Initializing LUN: lun_id=%016llx "
70 		 "ba_lun->lsize=%lx ba_lun->au_size=%lX\n",
71 		__func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
72 
73 	/* Calculate bit map size */
74 	lun_size_au = ba_lun->lsize / ba_lun->au_size;
75 	if (lun_size_au == 0) {
76 		pr_debug("%s: Requested LUN size of 0!\n", __func__);
77 		return -EINVAL;
78 	}
79 
80 	/* Allocate lun information container */
81 	bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
82 	if (unlikely(!bali)) {
83 		pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n",
84 		       __func__, ba_lun->lun_id);
85 		return -ENOMEM;
86 	}
87 
88 	bali->total_aus = lun_size_au;
89 	bali->lun_bmap_size = lun_size_au / BITS_PER_LONG;
90 
91 	if (lun_size_au % BITS_PER_LONG)
92 		bali->lun_bmap_size++;
93 
94 	/* Allocate bitmap space */
95 	bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)),
96 				      GFP_KERNEL);
97 	if (unlikely(!bali->lun_alloc_map)) {
98 		pr_err("%s: Failed to allocate lun allocation map: "
99 		       "lun_id=%016llx\n", __func__, ba_lun->lun_id);
100 		kfree(bali);
101 		return -ENOMEM;
102 	}
103 
104 	/* Initialize the bit map size and set all bits to '1' */
105 	bali->free_aun_cnt = lun_size_au;
106 
107 	for (i = 0; i < bali->lun_bmap_size; i++)
108 		bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL;
109 
110 	/* If the last word not fully utilized, mark extra bits as allocated */
111 	last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG);
112 	last_word_underflow -= bali->free_aun_cnt;
113 	if (last_word_underflow > 0) {
114 		lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1];
115 		for (i = (HIBIT - last_word_underflow + 1);
116 		     i < BITS_PER_LONG;
117 		     i++)
118 			clear_bit(i, (ulong *)lam);
119 	}
120 
121 	/* Initialize high elevator index, low/curr already at 0 from kzalloc */
122 	bali->free_high_idx = bali->lun_bmap_size;
123 
124 	/* Allocate clone map */
125 	bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
126 				      GFP_KERNEL);
127 	if (unlikely(!bali->aun_clone_map)) {
128 		pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n",
129 		       __func__, ba_lun->lun_id);
130 		kfree(bali->lun_alloc_map);
131 		kfree(bali);
132 		return -ENOMEM;
133 	}
134 
135 	/* Pass the allocated LUN info as a handle to the user */
136 	ba_lun->ba_lun_handle = bali;
137 
138 	pr_debug("%s: Successfully initialized the LUN: "
139 		 "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n",
140 		__func__, ba_lun->lun_id, bali->lun_bmap_size,
141 		bali->free_aun_cnt);
142 	return 0;
143 }
144 
145 /**
146  * find_free_range() - locates a free bit within the block allocator
147  * @low:	First word in block allocator to start search.
148  * @high:	Last word in block allocator to search.
149  * @bali:	LUN information structure owning the block allocator to search.
150  * @bit_word:	Passes back the word in the block allocator owning the free bit.
151  *
152  * Return: The bit position within the passed back word, -1 on failure
153  */
154 static int find_free_range(u32 low,
155 			   u32 high,
156 			   struct ba_lun_info *bali, int *bit_word)
157 {
158 	int i;
159 	u64 bit_pos = -1;
160 	ulong *lam, num_bits;
161 
162 	for (i = low; i < high; i++)
163 		if (bali->lun_alloc_map[i] != 0) {
164 			lam = (ulong *)&bali->lun_alloc_map[i];
165 			num_bits = (sizeof(*lam) * BITS_PER_BYTE);
166 			bit_pos = find_first_bit(lam, num_bits);
167 
168 			pr_devel("%s: Found free bit %llu in LUN "
169 				 "map entry %016llx at bitmap index = %d\n",
170 				 __func__, bit_pos, bali->lun_alloc_map[i], i);
171 
172 			*bit_word = i;
173 			bali->free_aun_cnt--;
174 			clear_bit(bit_pos, lam);
175 			break;
176 		}
177 
178 	return bit_pos;
179 }
180 
181 /**
182  * ba_alloc() - allocates a block from the block allocator
183  * @ba_lun:	Block allocator from which to allocate a block.
184  *
185  * Return: The allocated block, -1 on failure
186  */
187 static u64 ba_alloc(struct ba_lun *ba_lun)
188 {
189 	u64 bit_pos = -1;
190 	int bit_word = 0;
191 	struct ba_lun_info *bali = NULL;
192 
193 	bali = ba_lun->ba_lun_handle;
194 
195 	pr_debug("%s: Received block allocation request: "
196 		 "lun_id=%016llx free_aun_cnt=%llx\n",
197 		 __func__, ba_lun->lun_id, bali->free_aun_cnt);
198 
199 	if (bali->free_aun_cnt == 0) {
200 		pr_debug("%s: No space left on LUN: lun_id=%016llx\n",
201 			 __func__, ba_lun->lun_id);
202 		return -1ULL;
203 	}
204 
205 	/* Search to find a free entry, curr->high then low->curr */
206 	bit_pos = find_free_range(bali->free_curr_idx,
207 				  bali->free_high_idx, bali, &bit_word);
208 	if (bit_pos == -1) {
209 		bit_pos = find_free_range(bali->free_low_idx,
210 					  bali->free_curr_idx,
211 					  bali, &bit_word);
212 		if (bit_pos == -1) {
213 			pr_debug("%s: Could not find an allocation unit on LUN:"
214 				 " lun_id=%016llx\n", __func__, ba_lun->lun_id);
215 			return -1ULL;
216 		}
217 	}
218 
219 	/* Update the free_curr_idx */
220 	if (bit_pos == HIBIT)
221 		bali->free_curr_idx = bit_word + 1;
222 	else
223 		bali->free_curr_idx = bit_word;
224 
225 	pr_debug("%s: Allocating AU number=%llx lun_id=%016llx "
226 		 "free_aun_cnt=%llx\n", __func__,
227 		 ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
228 		 bali->free_aun_cnt);
229 
230 	return (u64) ((bit_word * BITS_PER_LONG) + bit_pos);
231 }
232 
233 /**
234  * validate_alloc() - validates the specified block has been allocated
235  * @ba_lun_info:	LUN info owning the block allocator.
236  * @aun:		Block to validate.
237  *
238  * Return: 0 on success, -1 on failure
239  */
240 static int validate_alloc(struct ba_lun_info *bali, u64 aun)
241 {
242 	int idx = 0, bit_pos = 0;
243 
244 	idx = aun / BITS_PER_LONG;
245 	bit_pos = aun % BITS_PER_LONG;
246 
247 	if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]))
248 		return -1;
249 
250 	return 0;
251 }
252 
253 /**
254  * ba_free() - frees a block from the block allocator
255  * @ba_lun:	Block allocator from which to allocate a block.
256  * @to_free:	Block to free.
257  *
258  * Return: 0 on success, -1 on failure
259  */
260 static int ba_free(struct ba_lun *ba_lun, u64 to_free)
261 {
262 	int idx = 0, bit_pos = 0;
263 	struct ba_lun_info *bali = NULL;
264 
265 	bali = ba_lun->ba_lun_handle;
266 
267 	if (validate_alloc(bali, to_free)) {
268 		pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n",
269 			 __func__, to_free, ba_lun->lun_id);
270 		return -1;
271 	}
272 
273 	pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx "
274 		 "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id,
275 		 bali->free_aun_cnt);
276 
277 	if (bali->aun_clone_map[to_free] > 0) {
278 		pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n",
279 			 __func__, to_free, ba_lun->lun_id,
280 			 bali->aun_clone_map[to_free]);
281 		bali->aun_clone_map[to_free]--;
282 		return 0;
283 	}
284 
285 	idx = to_free / BITS_PER_LONG;
286 	bit_pos = to_free % BITS_PER_LONG;
287 
288 	set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]);
289 	bali->free_aun_cnt++;
290 
291 	if (idx < bali->free_low_idx)
292 		bali->free_low_idx = idx;
293 	else if (idx > bali->free_high_idx)
294 		bali->free_high_idx = idx;
295 
296 	pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x "
297 		 "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx,
298 		 ba_lun->lun_id, bali->free_aun_cnt);
299 
300 	return 0;
301 }
302 
303 /**
304  * ba_clone() - Clone a chunk of the block allocation table
305  * @ba_lun:	Block allocator from which to allocate a block.
306  * @to_free:	Block to free.
307  *
308  * Return: 0 on success, -1 on failure
309  */
310 static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
311 {
312 	struct ba_lun_info *bali = ba_lun->ba_lun_handle;
313 
314 	if (validate_alloc(bali, to_clone)) {
315 		pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n",
316 			 __func__, to_clone, ba_lun->lun_id);
317 		return -1;
318 	}
319 
320 	pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n",
321 		 __func__, to_clone, ba_lun->lun_id);
322 
323 	if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
324 		pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n",
325 			 __func__, to_clone, ba_lun->lun_id);
326 		return -1;
327 	}
328 
329 	bali->aun_clone_map[to_clone]++;
330 
331 	return 0;
332 }
333 
334 /**
335  * ba_space() - returns the amount of free space left in the block allocator
336  * @ba_lun:	Block allocator.
337  *
338  * Return: Amount of free space in block allocator
339  */
340 static u64 ba_space(struct ba_lun *ba_lun)
341 {
342 	struct ba_lun_info *bali = ba_lun->ba_lun_handle;
343 
344 	return bali->free_aun_cnt;
345 }
346 
347 /**
348  * cxlflash_ba_terminate() - frees resources associated with the block allocator
349  * @ba_lun:	Block allocator.
350  *
351  * Safe to call in a partially allocated state.
352  */
353 void cxlflash_ba_terminate(struct ba_lun *ba_lun)
354 {
355 	struct ba_lun_info *bali = ba_lun->ba_lun_handle;
356 
357 	if (bali) {
358 		kfree(bali->aun_clone_map);
359 		kfree(bali->lun_alloc_map);
360 		kfree(bali);
361 		ba_lun->ba_lun_handle = NULL;
362 	}
363 }
364 
365 /**
366  * init_vlun() - initializes a LUN for virtual use
367  * @lun_info:	LUN information structure that owns the block allocator.
368  *
369  * Return: 0 on success, -errno on failure
370  */
371 static int init_vlun(struct llun_info *lli)
372 {
373 	int rc = 0;
374 	struct glun_info *gli = lli->parent;
375 	struct blka *blka = &gli->blka;
376 
377 	memset(blka, 0, sizeof(*blka));
378 	mutex_init(&blka->mutex);
379 
380 	/* LUN IDs are unique per port, save the index instead */
381 	blka->ba_lun.lun_id = lli->lun_index;
382 	blka->ba_lun.lsize = gli->max_lba + 1;
383 	blka->ba_lun.lba_size = gli->blk_len;
384 
385 	blka->ba_lun.au_size = MC_CHUNK_SIZE;
386 	blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE;
387 
388 	rc = ba_init(&blka->ba_lun);
389 	if (unlikely(rc))
390 		pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc);
391 
392 	pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli);
393 	return rc;
394 }
395 
396 /**
397  * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN
398  * @sdev:	SCSI device associated with LUN.
399  * @lba:	Logical block address to start write same.
400  * @nblks:	Number of logical blocks to write same.
401  *
402  * The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur
403  * while in scsi_execute(), the EEH handler will attempt to recover. As part of
404  * the recovery, the handler drains all currently running ioctls, waiting until
405  * they have completed before proceeding with a reset. As this routine is used
406  * on the ioctl path, this can create a condition where the EEH handler becomes
407  * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
408  * temporarily unmark this thread as an ioctl thread by releasing the ioctl read
409  * semaphore. This will allow the EEH handler to proceed with a recovery while
410  * this thread is still running. Once the scsi_execute() returns, reacquire the
411  * ioctl read semaphore and check the adapter state in case it changed while
412  * inside of scsi_execute(). The state check will wait if the adapter is still
413  * being recovered or return a failure if the recovery failed. In the event that
414  * the adapter reset failed, simply return the failure as the ioctl would be
415  * unable to continue.
416  *
417  * Note that the above puts a requirement on this routine to only be called on
418  * an ioctl thread.
419  *
420  * Return: 0 on success, -errno on failure
421  */
422 static int write_same16(struct scsi_device *sdev,
423 			u64 lba,
424 			u32 nblks)
425 {
426 	u8 *cmd_buf = NULL;
427 	u8 *scsi_cmd = NULL;
428 	u8 *sense_buf = NULL;
429 	int rc = 0;
430 	int result = 0;
431 	int ws_limit = SISLITE_MAX_WS_BLOCKS;
432 	u64 offset = lba;
433 	int left = nblks;
434 	u32 to = sdev->request_queue->rq_timeout;
435 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
436 	struct device *dev = &cfg->dev->dev;
437 
438 	cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
439 	scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
440 	sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
441 	if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
442 		rc = -ENOMEM;
443 		goto out;
444 	}
445 
446 	while (left > 0) {
447 
448 		scsi_cmd[0] = WRITE_SAME_16;
449 		scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0;
450 		put_unaligned_be64(offset, &scsi_cmd[2]);
451 		put_unaligned_be32(ws_limit < left ? ws_limit : left,
452 				   &scsi_cmd[10]);
453 
454 		/* Drop the ioctl read semahpore across lengthy call */
455 		up_read(&cfg->ioctl_rwsem);
456 		result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
457 				      CMD_BUFSIZE, sense_buf, NULL, to,
458 				      CMD_RETRIES, 0, 0, NULL);
459 		down_read(&cfg->ioctl_rwsem);
460 		rc = check_state(cfg);
461 		if (rc) {
462 			dev_err(dev, "%s: Failed state result=%08x\n",
463 				__func__, result);
464 			rc = -ENODEV;
465 			goto out;
466 		}
467 
468 		if (result) {
469 			dev_err_ratelimited(dev, "%s: command failed for "
470 					    "offset=%lld result=%08x\n",
471 					    __func__, offset, result);
472 			rc = -EIO;
473 			goto out;
474 		}
475 		left -= ws_limit;
476 		offset += ws_limit;
477 	}
478 
479 out:
480 	kfree(cmd_buf);
481 	kfree(scsi_cmd);
482 	kfree(sense_buf);
483 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
484 	return rc;
485 }
486 
487 /**
488  * grow_lxt() - expands the translation table associated with the specified RHTE
489  * @afu:	AFU associated with the host.
490  * @sdev:	SCSI device associated with LUN.
491  * @ctxid:	Context ID of context owning the RHTE.
492  * @rhndl:	Resource handle associated with the RHTE.
493  * @rhte:	Resource handle entry (RHTE).
494  * @new_size:	Number of translation entries associated with RHTE.
495  *
496  * By design, this routine employs a 'best attempt' allocation and will
497  * truncate the requested size down if there is not sufficient space in
498  * the block allocator to satisfy the request but there does exist some
499  * amount of space. The user is made aware of this by returning the size
500  * allocated.
501  *
502  * Return: 0 on success, -errno on failure
503  */
504 static int grow_lxt(struct afu *afu,
505 		    struct scsi_device *sdev,
506 		    ctx_hndl_t ctxid,
507 		    res_hndl_t rhndl,
508 		    struct sisl_rht_entry *rhte,
509 		    u64 *new_size)
510 {
511 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
512 	struct device *dev = &cfg->dev->dev;
513 	struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
514 	struct llun_info *lli = sdev->hostdata;
515 	struct glun_info *gli = lli->parent;
516 	struct blka *blka = &gli->blka;
517 	u32 av_size;
518 	u32 ngrps, ngrps_old;
519 	u64 aun;		/* chunk# allocated by block allocator */
520 	u64 delta = *new_size - rhte->lxt_cnt;
521 	u64 my_new_size;
522 	int i, rc = 0;
523 
524 	/*
525 	 * Check what is available in the block allocator before re-allocating
526 	 * LXT array. This is done up front under the mutex which must not be
527 	 * released until after allocation is complete.
528 	 */
529 	mutex_lock(&blka->mutex);
530 	av_size = ba_space(&blka->ba_lun);
531 	if (unlikely(av_size <= 0)) {
532 		dev_dbg(dev, "%s: ba_space error av_size=%d\n",
533 			__func__, av_size);
534 		mutex_unlock(&blka->mutex);
535 		rc = -ENOSPC;
536 		goto out;
537 	}
538 
539 	if (av_size < delta)
540 		delta = av_size;
541 
542 	lxt_old = rhte->lxt_start;
543 	ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
544 	ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta);
545 
546 	if (ngrps != ngrps_old) {
547 		/* reallocate to fit new size */
548 		lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
549 			      GFP_KERNEL);
550 		if (unlikely(!lxt)) {
551 			mutex_unlock(&blka->mutex);
552 			rc = -ENOMEM;
553 			goto out;
554 		}
555 
556 		/* copy over all old entries */
557 		memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt));
558 	} else
559 		lxt = lxt_old;
560 
561 	/* nothing can fail from now on */
562 	my_new_size = rhte->lxt_cnt + delta;
563 
564 	/* add new entries to the end */
565 	for (i = rhte->lxt_cnt; i < my_new_size; i++) {
566 		/*
567 		 * Due to the earlier check of available space, ba_alloc
568 		 * cannot fail here. If it did due to internal error,
569 		 * leave a rlba_base of -1u which will likely be a
570 		 * invalid LUN (too large).
571 		 */
572 		aun = ba_alloc(&blka->ba_lun);
573 		if ((aun == -1ULL) || (aun >= blka->nchunk))
574 			dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu "
575 				"max=%llu\n", __func__, aun, blka->nchunk - 1);
576 
577 		/* select both ports, use r/w perms from RHT */
578 		lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
579 				    (lli->lun_index << LXT_LUNIDX_SHIFT) |
580 				    (RHT_PERM_RW << LXT_PERM_SHIFT |
581 				     lli->port_sel));
582 	}
583 
584 	mutex_unlock(&blka->mutex);
585 
586 	/*
587 	 * The following sequence is prescribed in the SISlite spec
588 	 * for syncing up with the AFU when adding LXT entries.
589 	 */
590 	dma_wmb(); /* Make LXT updates are visible */
591 
592 	rhte->lxt_start = lxt;
593 	dma_wmb(); /* Make RHT entry's LXT table update visible */
594 
595 	rhte->lxt_cnt = my_new_size;
596 	dma_wmb(); /* Make RHT entry's LXT table size update visible */
597 
598 	rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
599 	if (unlikely(rc))
600 		rc = -EAGAIN;
601 
602 	/* free old lxt if reallocated */
603 	if (lxt != lxt_old)
604 		kfree(lxt_old);
605 	*new_size = my_new_size;
606 out:
607 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
608 	return rc;
609 }
610 
611 /**
612  * shrink_lxt() - reduces translation table associated with the specified RHTE
613  * @afu:	AFU associated with the host.
614  * @sdev:	SCSI device associated with LUN.
615  * @rhndl:	Resource handle associated with the RHTE.
616  * @rhte:	Resource handle entry (RHTE).
617  * @ctxi:	Context owning resources.
618  * @new_size:	Number of translation entries associated with RHTE.
619  *
620  * Return: 0 on success, -errno on failure
621  */
622 static int shrink_lxt(struct afu *afu,
623 		      struct scsi_device *sdev,
624 		      res_hndl_t rhndl,
625 		      struct sisl_rht_entry *rhte,
626 		      struct ctx_info *ctxi,
627 		      u64 *new_size)
628 {
629 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
630 	struct device *dev = &cfg->dev->dev;
631 	struct sisl_lxt_entry *lxt, *lxt_old;
632 	struct llun_info *lli = sdev->hostdata;
633 	struct glun_info *gli = lli->parent;
634 	struct blka *blka = &gli->blka;
635 	ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid);
636 	bool needs_ws = ctxi->rht_needs_ws[rhndl];
637 	bool needs_sync = !ctxi->err_recovery_active;
638 	u32 ngrps, ngrps_old;
639 	u64 aun;		/* chunk# allocated by block allocator */
640 	u64 delta = rhte->lxt_cnt - *new_size;
641 	u64 my_new_size;
642 	int i, rc = 0;
643 
644 	lxt_old = rhte->lxt_start;
645 	ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
646 	ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta);
647 
648 	if (ngrps != ngrps_old) {
649 		/* Reallocate to fit new size unless new size is 0 */
650 		if (ngrps) {
651 			lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
652 				      GFP_KERNEL);
653 			if (unlikely(!lxt)) {
654 				rc = -ENOMEM;
655 				goto out;
656 			}
657 
658 			/* Copy over old entries that will remain */
659 			memcpy(lxt, lxt_old,
660 			       (sizeof(*lxt) * (rhte->lxt_cnt - delta)));
661 		} else
662 			lxt = NULL;
663 	} else
664 		lxt = lxt_old;
665 
666 	/* Nothing can fail from now on */
667 	my_new_size = rhte->lxt_cnt - delta;
668 
669 	/*
670 	 * The following sequence is prescribed in the SISlite spec
671 	 * for syncing up with the AFU when removing LXT entries.
672 	 */
673 	rhte->lxt_cnt = my_new_size;
674 	dma_wmb(); /* Make RHT entry's LXT table size update visible */
675 
676 	rhte->lxt_start = lxt;
677 	dma_wmb(); /* Make RHT entry's LXT table update visible */
678 
679 	if (needs_sync) {
680 		rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
681 		if (unlikely(rc))
682 			rc = -EAGAIN;
683 	}
684 
685 	if (needs_ws) {
686 		/*
687 		 * Mark the context as unavailable, so that we can release
688 		 * the mutex safely.
689 		 */
690 		ctxi->unavail = true;
691 		mutex_unlock(&ctxi->mutex);
692 	}
693 
694 	/* Free LBAs allocated to freed chunks */
695 	mutex_lock(&blka->mutex);
696 	for (i = delta - 1; i >= 0; i--) {
697 		/* Mask the higher 48 bits before shifting, even though
698 		 * it is a noop
699 		 */
700 		aun = (lxt_old[my_new_size + i].rlba_base & SISL_ASTATUS_MASK);
701 		aun = (aun >> MC_CHUNK_SHIFT);
702 		if (needs_ws)
703 			write_same16(sdev, aun, MC_CHUNK_SIZE);
704 		ba_free(&blka->ba_lun, aun);
705 	}
706 	mutex_unlock(&blka->mutex);
707 
708 	if (needs_ws) {
709 		/* Make the context visible again */
710 		mutex_lock(&ctxi->mutex);
711 		ctxi->unavail = false;
712 	}
713 
714 	/* Free old lxt if reallocated */
715 	if (lxt != lxt_old)
716 		kfree(lxt_old);
717 	*new_size = my_new_size;
718 out:
719 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
720 	return rc;
721 }
722 
723 /**
724  * _cxlflash_vlun_resize() - changes the size of a virtual LUN
725  * @sdev:	SCSI device associated with LUN owning virtual LUN.
726  * @ctxi:	Context owning resources.
727  * @resize:	Resize ioctl data structure.
728  *
729  * On successful return, the user is informed of the new size (in blocks)
730  * of the virtual LUN in last LBA format. When the size of the virtual
731  * LUN is zero, the last LBA is reflected as -1. See comment in the
732  * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
733  * on the error recovery list.
734  *
735  * Return: 0 on success, -errno on failure
736  */
737 int _cxlflash_vlun_resize(struct scsi_device *sdev,
738 			  struct ctx_info *ctxi,
739 			  struct dk_cxlflash_resize *resize)
740 {
741 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
742 	struct device *dev = &cfg->dev->dev;
743 	struct llun_info *lli = sdev->hostdata;
744 	struct glun_info *gli = lli->parent;
745 	struct afu *afu = cfg->afu;
746 	bool put_ctx = false;
747 
748 	res_hndl_t rhndl = resize->rsrc_handle;
749 	u64 new_size;
750 	u64 nsectors;
751 	u64 ctxid = DECODE_CTXID(resize->context_id),
752 	    rctxid = resize->context_id;
753 
754 	struct sisl_rht_entry *rhte;
755 
756 	int rc = 0;
757 
758 	/*
759 	 * The requested size (req_size) is always assumed to be in 4k blocks,
760 	 * so we have to convert it here from 4k to chunk size.
761 	 */
762 	nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
763 	new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
764 
765 	dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n",
766 		__func__, ctxid, resize->rsrc_handle, resize->req_size,
767 		new_size);
768 
769 	if (unlikely(gli->mode != MODE_VIRTUAL)) {
770 		dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n",
771 			__func__, gli->mode);
772 		rc = -EINVAL;
773 		goto out;
774 
775 	}
776 
777 	if (!ctxi) {
778 		ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
779 		if (unlikely(!ctxi)) {
780 			dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
781 				__func__, ctxid);
782 			rc = -EINVAL;
783 			goto out;
784 		}
785 
786 		put_ctx = true;
787 	}
788 
789 	rhte = get_rhte(ctxi, rhndl, lli);
790 	if (unlikely(!rhte)) {
791 		dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n",
792 			__func__, rhndl);
793 		rc = -EINVAL;
794 		goto out;
795 	}
796 
797 	if (new_size > rhte->lxt_cnt)
798 		rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
799 	else if (new_size < rhte->lxt_cnt)
800 		rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
801 	else {
802 		/*
803 		 * Rare case where there is already sufficient space, just
804 		 * need to perform a translation sync with the AFU. This
805 		 * scenario likely follows a previous sync failure during
806 		 * a resize operation. Accordingly, perform the heavyweight
807 		 * form of translation sync as it is unknown which type of
808 		 * resize failed previously.
809 		 */
810 		rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
811 		if (unlikely(rc)) {
812 			rc = -EAGAIN;
813 			goto out;
814 		}
815 	}
816 
817 	resize->hdr.return_flags = 0;
818 	resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
819 	resize->last_lba /= CXLFLASH_BLOCK_SIZE;
820 	resize->last_lba--;
821 
822 out:
823 	if (put_ctx)
824 		put_context(ctxi);
825 	dev_dbg(dev, "%s: resized to %llu returning rc=%d\n",
826 		__func__, resize->last_lba, rc);
827 	return rc;
828 }
829 
830 int cxlflash_vlun_resize(struct scsi_device *sdev,
831 			 struct dk_cxlflash_resize *resize)
832 {
833 	return _cxlflash_vlun_resize(sdev, NULL, resize);
834 }
835 
836 /**
837  * cxlflash_restore_luntable() - Restore LUN table to prior state
838  * @cfg:	Internal structure associated with the host.
839  */
840 void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
841 {
842 	struct llun_info *lli, *temp;
843 	u32 lind;
844 	int k;
845 	struct device *dev = &cfg->dev->dev;
846 	__be64 __iomem *fc_port_luns;
847 
848 	mutex_lock(&global.mutex);
849 
850 	list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
851 		if (!lli->in_table)
852 			continue;
853 
854 		lind = lli->lun_index;
855 		dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
856 
857 		for (k = 0; k < cfg->num_fc_ports; k++)
858 			if (lli->port_sel & (1 << k)) {
859 				fc_port_luns = get_fc_port_luns(cfg, k);
860 				writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
861 				dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
862 			}
863 	}
864 
865 	mutex_unlock(&global.mutex);
866 }
867 
868 /**
869  * get_num_ports() - compute number of ports from port selection mask
870  * @psm:	Port selection mask.
871  *
872  * Return: Population count of port selection mask
873  */
874 static inline u8 get_num_ports(u32 psm)
875 {
876 	static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3,
877 				     1, 2, 2, 3, 2, 3, 3, 4 };
878 
879 	return bits[psm & 0xf];
880 }
881 
882 /**
883  * init_luntable() - write an entry in the LUN table
884  * @cfg:	Internal structure associated with the host.
885  * @lli:	Per adapter LUN information structure.
886  *
887  * On successful return, a LUN table entry is created:
888  *	- at the top for LUNs visible on multiple ports.
889  *	- at the bottom for LUNs visible only on one port.
890  *
891  * Return: 0 on success, -errno on failure
892  */
893 static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
894 {
895 	u32 chan;
896 	u32 lind;
897 	u32 nports;
898 	int rc = 0;
899 	int k;
900 	struct device *dev = &cfg->dev->dev;
901 	__be64 __iomem *fc_port_luns;
902 
903 	mutex_lock(&global.mutex);
904 
905 	if (lli->in_table)
906 		goto out;
907 
908 	nports = get_num_ports(lli->port_sel);
909 	if (nports == 0 || nports > cfg->num_fc_ports) {
910 		WARN(1, "Unsupported port configuration nports=%u", nports);
911 		rc = -EIO;
912 		goto out;
913 	}
914 
915 	if (nports > 1) {
916 		/*
917 		 * When LUN is visible from multiple ports, we will put
918 		 * it in the top half of the LUN table.
919 		 */
920 		for (k = 0; k < cfg->num_fc_ports; k++) {
921 			if (!(lli->port_sel & (1 << k)))
922 				continue;
923 
924 			if (cfg->promote_lun_index == cfg->last_lun_index[k]) {
925 				rc = -ENOSPC;
926 				goto out;
927 			}
928 		}
929 
930 		lind = lli->lun_index = cfg->promote_lun_index;
931 		dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
932 
933 		for (k = 0; k < cfg->num_fc_ports; k++) {
934 			if (!(lli->port_sel & (1 << k)))
935 				continue;
936 
937 			fc_port_luns = get_fc_port_luns(cfg, k);
938 			writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
939 			dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
940 		}
941 
942 		cfg->promote_lun_index++;
943 	} else {
944 		/*
945 		 * When LUN is visible only from one port, we will put
946 		 * it in the bottom half of the LUN table.
947 		 */
948 		chan = PORTMASK2CHAN(lli->port_sel);
949 		if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
950 			rc = -ENOSPC;
951 			goto out;
952 		}
953 
954 		lind = lli->lun_index = cfg->last_lun_index[chan];
955 		fc_port_luns = get_fc_port_luns(cfg, chan);
956 		writeq_be(lli->lun_id[chan], &fc_port_luns[lind]);
957 		cfg->last_lun_index[chan]--;
958 		dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n",
959 			__func__, lind, chan, lli->lun_id[chan]);
960 	}
961 
962 	lli->in_table = true;
963 out:
964 	mutex_unlock(&global.mutex);
965 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
966 	return rc;
967 }
968 
969 /**
970  * cxlflash_disk_virtual_open() - open a virtual disk of specified size
971  * @sdev:	SCSI device associated with LUN owning virtual LUN.
972  * @arg:	UVirtual ioctl data structure.
973  *
974  * On successful return, the user is informed of the resource handle
975  * to be used to identify the virtual LUN and the size (in blocks) of
976  * the virtual LUN in last LBA format. When the size of the virtual LUN
977  * is zero, the last LBA is reflected as -1.
978  *
979  * Return: 0 on success, -errno on failure
980  */
981 int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
982 {
983 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
984 	struct device *dev = &cfg->dev->dev;
985 	struct llun_info *lli = sdev->hostdata;
986 	struct glun_info *gli = lli->parent;
987 
988 	struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg;
989 	struct dk_cxlflash_resize resize;
990 
991 	u64 ctxid = DECODE_CTXID(virt->context_id),
992 	    rctxid = virt->context_id;
993 	u64 lun_size = virt->lun_size;
994 	u64 last_lba = 0;
995 	u64 rsrc_handle = -1;
996 
997 	int rc = 0;
998 
999 	struct ctx_info *ctxi = NULL;
1000 	struct sisl_rht_entry *rhte = NULL;
1001 
1002 	dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
1003 
1004 	/* Setup the LUNs block allocator on first call */
1005 	mutex_lock(&gli->mutex);
1006 	if (gli->mode == MODE_NONE) {
1007 		rc = init_vlun(lli);
1008 		if (rc) {
1009 			dev_err(dev, "%s: init_vlun failed rc=%d\n",
1010 				__func__, rc);
1011 			rc = -ENOMEM;
1012 			goto err0;
1013 		}
1014 	}
1015 
1016 	rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
1017 	if (unlikely(rc)) {
1018 		dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__);
1019 		goto err0;
1020 	}
1021 	mutex_unlock(&gli->mutex);
1022 
1023 	rc = init_luntable(cfg, lli);
1024 	if (rc) {
1025 		dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc);
1026 		goto err1;
1027 	}
1028 
1029 	ctxi = get_context(cfg, rctxid, lli, 0);
1030 	if (unlikely(!ctxi)) {
1031 		dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1032 		rc = -EINVAL;
1033 		goto err1;
1034 	}
1035 
1036 	rhte = rhte_checkout(ctxi, lli);
1037 	if (unlikely(!rhte)) {
1038 		dev_err(dev, "%s: too many opens ctxid=%llu\n",
1039 			__func__, ctxid);
1040 		rc = -EMFILE;	/* too many opens  */
1041 		goto err1;
1042 	}
1043 
1044 	rsrc_handle = (rhte - ctxi->rht_start);
1045 
1046 	/* Populate RHT format 0 */
1047 	rhte->nmask = MC_RHT_NMASK;
1048 	rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms);
1049 
1050 	/* Resize even if requested size is 0 */
1051 	marshal_virt_to_resize(virt, &resize);
1052 	resize.rsrc_handle = rsrc_handle;
1053 	rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
1054 	if (rc) {
1055 		dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc);
1056 		goto err2;
1057 	}
1058 	last_lba = resize.last_lba;
1059 
1060 	if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME)
1061 		ctxi->rht_needs_ws[rsrc_handle] = true;
1062 
1063 	virt->hdr.return_flags = 0;
1064 	virt->last_lba = last_lba;
1065 	virt->rsrc_handle = rsrc_handle;
1066 
1067 	if (get_num_ports(lli->port_sel) > 1)
1068 		virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
1069 out:
1070 	if (likely(ctxi))
1071 		put_context(ctxi);
1072 	dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
1073 		__func__, rsrc_handle, rc, last_lba);
1074 	return rc;
1075 
1076 err2:
1077 	rhte_checkin(ctxi, rhte);
1078 err1:
1079 	cxlflash_lun_detach(gli);
1080 	goto out;
1081 err0:
1082 	/* Special common cleanup prior to successful LUN attach */
1083 	cxlflash_ba_terminate(&gli->blka.ba_lun);
1084 	mutex_unlock(&gli->mutex);
1085 	goto out;
1086 }
1087 
1088 /**
1089  * clone_lxt() - copies translation tables from source to destination RHTE
1090  * @afu:	AFU associated with the host.
1091  * @blka:	Block allocator associated with LUN.
1092  * @ctxid:	Context ID of context owning the RHTE.
1093  * @rhndl:	Resource handle associated with the RHTE.
1094  * @rhte:	Destination resource handle entry (RHTE).
1095  * @rhte_src:	Source resource handle entry (RHTE).
1096  *
1097  * Return: 0 on success, -errno on failure
1098  */
1099 static int clone_lxt(struct afu *afu,
1100 		     struct blka *blka,
1101 		     ctx_hndl_t ctxid,
1102 		     res_hndl_t rhndl,
1103 		     struct sisl_rht_entry *rhte,
1104 		     struct sisl_rht_entry *rhte_src)
1105 {
1106 	struct cxlflash_cfg *cfg = afu->parent;
1107 	struct device *dev = &cfg->dev->dev;
1108 	struct sisl_lxt_entry *lxt = NULL;
1109 	bool locked = false;
1110 	u32 ngrps;
1111 	u64 aun;		/* chunk# allocated by block allocator */
1112 	int j;
1113 	int i = 0;
1114 	int rc = 0;
1115 
1116 	ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
1117 
1118 	if (ngrps) {
1119 		/* allocate new LXTs for clone */
1120 		lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
1121 				GFP_KERNEL);
1122 		if (unlikely(!lxt)) {
1123 			rc = -ENOMEM;
1124 			goto out;
1125 		}
1126 
1127 		/* copy over */
1128 		memcpy(lxt, rhte_src->lxt_start,
1129 		       (sizeof(*lxt) * rhte_src->lxt_cnt));
1130 
1131 		/* clone the LBAs in block allocator via ref_cnt, note that the
1132 		 * block allocator mutex must be held until it is established
1133 		 * that this routine will complete without the need for a
1134 		 * cleanup.
1135 		 */
1136 		mutex_lock(&blka->mutex);
1137 		locked = true;
1138 		for (i = 0; i < rhte_src->lxt_cnt; i++) {
1139 			aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
1140 			if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
1141 				rc = -EIO;
1142 				goto err;
1143 			}
1144 		}
1145 	}
1146 
1147 	/*
1148 	 * The following sequence is prescribed in the SISlite spec
1149 	 * for syncing up with the AFU when adding LXT entries.
1150 	 */
1151 	dma_wmb(); /* Make LXT updates are visible */
1152 
1153 	rhte->lxt_start = lxt;
1154 	dma_wmb(); /* Make RHT entry's LXT table update visible */
1155 
1156 	rhte->lxt_cnt = rhte_src->lxt_cnt;
1157 	dma_wmb(); /* Make RHT entry's LXT table size update visible */
1158 
1159 	rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
1160 	if (unlikely(rc)) {
1161 		rc = -EAGAIN;
1162 		goto err2;
1163 	}
1164 
1165 out:
1166 	if (locked)
1167 		mutex_unlock(&blka->mutex);
1168 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1169 	return rc;
1170 err2:
1171 	/* Reset the RHTE */
1172 	rhte->lxt_cnt = 0;
1173 	dma_wmb();
1174 	rhte->lxt_start = NULL;
1175 	dma_wmb();
1176 err:
1177 	/* free the clones already made */
1178 	for (j = 0; j < i; j++) {
1179 		aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
1180 		ba_free(&blka->ba_lun, aun);
1181 	}
1182 	kfree(lxt);
1183 	goto out;
1184 }
1185 
1186 /**
1187  * cxlflash_disk_clone() - clone a context by making snapshot of another
1188  * @sdev:	SCSI device associated with LUN owning virtual LUN.
1189  * @clone:	Clone ioctl data structure.
1190  *
1191  * This routine effectively performs cxlflash_disk_open operation for each
1192  * in-use virtual resource in the source context. Note that the destination
1193  * context must be in pristine state and cannot have any resource handles
1194  * open at the time of the clone.
1195  *
1196  * Return: 0 on success, -errno on failure
1197  */
1198 int cxlflash_disk_clone(struct scsi_device *sdev,
1199 			struct dk_cxlflash_clone *clone)
1200 {
1201 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1202 	struct device *dev = &cfg->dev->dev;
1203 	struct llun_info *lli = sdev->hostdata;
1204 	struct glun_info *gli = lli->parent;
1205 	struct blka *blka = &gli->blka;
1206 	struct afu *afu = cfg->afu;
1207 	struct dk_cxlflash_release release = { { 0 }, 0 };
1208 
1209 	struct ctx_info *ctxi_src = NULL,
1210 			*ctxi_dst = NULL;
1211 	struct lun_access *lun_access_src, *lun_access_dst;
1212 	u32 perms;
1213 	u64 ctxid_src = DECODE_CTXID(clone->context_id_src),
1214 	    ctxid_dst = DECODE_CTXID(clone->context_id_dst),
1215 	    rctxid_src = clone->context_id_src,
1216 	    rctxid_dst = clone->context_id_dst;
1217 	int i, j;
1218 	int rc = 0;
1219 	bool found;
1220 	LIST_HEAD(sidecar);
1221 
1222 	dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n",
1223 		__func__, ctxid_src, ctxid_dst);
1224 
1225 	/* Do not clone yourself */
1226 	if (unlikely(rctxid_src == rctxid_dst)) {
1227 		rc = -EINVAL;
1228 		goto out;
1229 	}
1230 
1231 	if (unlikely(gli->mode != MODE_VIRTUAL)) {
1232 		rc = -EINVAL;
1233 		dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n",
1234 			__func__, gli->mode);
1235 		goto out;
1236 	}
1237 
1238 	ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
1239 	ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
1240 	if (unlikely(!ctxi_src || !ctxi_dst)) {
1241 		dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n",
1242 			__func__, ctxid_src, ctxid_dst);
1243 		rc = -EINVAL;
1244 		goto out;
1245 	}
1246 
1247 	/* Verify there is no open resource handle in the destination context */
1248 	for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
1249 		if (ctxi_dst->rht_start[i].nmask != 0) {
1250 			rc = -EINVAL;
1251 			goto out;
1252 		}
1253 
1254 	/* Clone LUN access list */
1255 	list_for_each_entry(lun_access_src, &ctxi_src->luns, list) {
1256 		found = false;
1257 		list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list)
1258 			if (lun_access_dst->sdev == lun_access_src->sdev) {
1259 				found = true;
1260 				break;
1261 			}
1262 
1263 		if (!found) {
1264 			lun_access_dst = kzalloc(sizeof(*lun_access_dst),
1265 						 GFP_KERNEL);
1266 			if (unlikely(!lun_access_dst)) {
1267 				dev_err(dev, "%s: lun_access allocation fail\n",
1268 					__func__);
1269 				rc = -ENOMEM;
1270 				goto out;
1271 			}
1272 
1273 			*lun_access_dst = *lun_access_src;
1274 			list_add(&lun_access_dst->list, &sidecar);
1275 		}
1276 	}
1277 
1278 	if (unlikely(!ctxi_src->rht_out)) {
1279 		dev_dbg(dev, "%s: Nothing to clone\n", __func__);
1280 		goto out_success;
1281 	}
1282 
1283 	/* User specified permission on attach */
1284 	perms = ctxi_dst->rht_perms;
1285 
1286 	/*
1287 	 * Copy over checked-out RHT (and their associated LXT) entries by
1288 	 * hand, stopping after we've copied all outstanding entries and
1289 	 * cleaning up if the clone fails.
1290 	 *
1291 	 * Note: This loop is equivalent to performing cxlflash_disk_open and
1292 	 * cxlflash_vlun_resize. As such, LUN accounting needs to be taken into
1293 	 * account by attaching after each successful RHT entry clone. In the
1294 	 * event that a clone failure is experienced, the LUN detach is handled
1295 	 * via the cleanup performed by _cxlflash_disk_release.
1296 	 */
1297 	for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
1298 		if (ctxi_src->rht_out == ctxi_dst->rht_out)
1299 			break;
1300 		if (ctxi_src->rht_start[i].nmask == 0)
1301 			continue;
1302 
1303 		/* Consume a destination RHT entry */
1304 		ctxi_dst->rht_out++;
1305 		ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask;
1306 		ctxi_dst->rht_start[i].fp =
1307 		    SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms);
1308 		ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i];
1309 
1310 		rc = clone_lxt(afu, blka, ctxid_dst, i,
1311 			       &ctxi_dst->rht_start[i],
1312 			       &ctxi_src->rht_start[i]);
1313 		if (rc) {
1314 			marshal_clone_to_rele(clone, &release);
1315 			for (j = 0; j < i; j++) {
1316 				release.rsrc_handle = j;
1317 				_cxlflash_disk_release(sdev, ctxi_dst,
1318 						       &release);
1319 			}
1320 
1321 			/* Put back the one we failed on */
1322 			rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]);
1323 			goto err;
1324 		}
1325 
1326 		cxlflash_lun_attach(gli, gli->mode, false);
1327 	}
1328 
1329 out_success:
1330 	list_splice(&sidecar, &ctxi_dst->luns);
1331 
1332 	/* fall through */
1333 out:
1334 	if (ctxi_src)
1335 		put_context(ctxi_src);
1336 	if (ctxi_dst)
1337 		put_context(ctxi_dst);
1338 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1339 	return rc;
1340 
1341 err:
1342 	list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list)
1343 		kfree(lun_access_src);
1344 	goto out;
1345 }
1346