1 /*
2  * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3  * Copyright (C) 2014 Red Hat, Inc.
4  * Copyright (C) 2015 Arrikto, Inc.
5  * Copyright (C) 2017 Chinamobile, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19  */
20 
21 #include <linux/spinlock.h>
22 #include <linux/module.h>
23 #include <linux/idr.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/parser.h>
27 #include <linux/vmalloc.h>
28 #include <linux/uio_driver.h>
29 #include <linux/radix-tree.h>
30 #include <linux/stringify.h>
31 #include <linux/bitops.h>
32 #include <linux/highmem.h>
33 #include <linux/configfs.h>
34 #include <linux/mutex.h>
35 #include <linux/kthread.h>
36 #include <net/genetlink.h>
37 #include <scsi/scsi_common.h>
38 #include <scsi/scsi_proto.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
41 #include <target/target_core_backend.h>
42 
43 #include <linux/target_core_user.h>
44 
45 /*
46  * Define a shared-memory interface for LIO to pass SCSI commands and
47  * data to userspace for processing. This is to allow backends that
48  * are too complex for in-kernel support to be possible.
49  *
50  * It uses the UIO framework to do a lot of the device-creation and
51  * introspection work for us.
52  *
53  * See the .h file for how the ring is laid out. Note that while the
54  * command ring is defined, the particulars of the data area are
55  * not. Offset values in the command entry point to other locations
56  * internal to the mmap()ed area. There is separate space outside the
57  * command ring for data buffers. This leaves maximum flexibility for
58  * moving buffer allocations, or even page flipping or other
59  * allocation techniques, without altering the command ring layout.
60  *
61  * SECURITY:
62  * The user process must be assumed to be malicious. There's no way to
63  * prevent it breaking the command ring protocol if it wants, but in
64  * order to prevent other issues we must only ever read *data* from
65  * the shared memory area, not offsets or sizes. This applies to
66  * command ring entries as well as the mailbox. Extra code needed for
67  * this may have a 'UAM' comment.
68  */
69 
70 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
71 
72 /* For cmd area, the size is fixed 8MB */
73 #define CMDR_SIZE (8 * 1024 * 1024)
74 
75 /*
76  * For data area, the block size is PAGE_SIZE and
77  * the total size is 256K * PAGE_SIZE.
78  */
79 #define DATA_BLOCK_SIZE PAGE_SIZE
80 #define DATA_BLOCK_BITS (256 * 1024)
81 #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
82 #define DATA_BLOCK_INIT_BITS 128
83 
84 /* The total size of the ring is 8M + 256K * PAGE_SIZE */
85 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
86 
87 /* Default maximum of the global data blocks(512K * PAGE_SIZE) */
88 #define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024)
89 
90 static struct device *tcmu_root_device;
91 
92 struct tcmu_hba {
93 	u32 host_id;
94 };
95 
96 #define TCMU_CONFIG_LEN 256
97 
98 struct tcmu_dev {
99 	struct list_head node;
100 
101 	struct se_device se_dev;
102 
103 	char *name;
104 	struct se_hba *hba;
105 
106 #define TCMU_DEV_BIT_OPEN 0
107 #define TCMU_DEV_BIT_BROKEN 1
108 	unsigned long flags;
109 
110 	struct uio_info uio_info;
111 
112 	struct inode *inode;
113 
114 	struct tcmu_mailbox *mb_addr;
115 	size_t dev_size;
116 	u32 cmdr_size;
117 	u32 cmdr_last_cleaned;
118 	/* Offset of data area from start of mb */
119 	/* Must add data_off and mb_addr to get the address */
120 	size_t data_off;
121 	size_t data_size;
122 
123 	wait_queue_head_t wait_cmdr;
124 	struct mutex cmdr_lock;
125 
126 	bool waiting_global;
127 	uint32_t dbi_max;
128 	uint32_t dbi_thresh;
129 	DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
130 	struct radix_tree_root data_blocks;
131 
132 	struct idr commands;
133 	spinlock_t commands_lock;
134 
135 	struct timer_list timeout;
136 	unsigned int cmd_time_out;
137 
138 	char dev_config[TCMU_CONFIG_LEN];
139 };
140 
141 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
142 
143 #define CMDR_OFF sizeof(struct tcmu_mailbox)
144 
145 struct tcmu_cmd {
146 	struct se_cmd *se_cmd;
147 	struct tcmu_dev *tcmu_dev;
148 
149 	uint16_t cmd_id;
150 
151 	/* Can't use se_cmd when cleaning up expired cmds, because if
152 	   cmd has been completed then accessing se_cmd is off limits */
153 	uint32_t dbi_cnt;
154 	uint32_t dbi_cur;
155 	uint32_t *dbi;
156 
157 	unsigned long deadline;
158 
159 #define TCMU_CMD_BIT_EXPIRED 0
160 	unsigned long flags;
161 };
162 
163 static struct task_struct *unmap_thread;
164 static wait_queue_head_t unmap_wait;
165 static DEFINE_MUTEX(root_udev_mutex);
166 static LIST_HEAD(root_udev);
167 
168 static atomic_t global_db_count = ATOMIC_INIT(0);
169 
170 static struct kmem_cache *tcmu_cmd_cache;
171 
172 /* multicast group */
173 enum tcmu_multicast_groups {
174 	TCMU_MCGRP_CONFIG,
175 };
176 
177 static const struct genl_multicast_group tcmu_mcgrps[] = {
178 	[TCMU_MCGRP_CONFIG] = { .name = "config", },
179 };
180 
181 /* Our generic netlink family */
182 static struct genl_family tcmu_genl_family __ro_after_init = {
183 	.module = THIS_MODULE,
184 	.hdrsize = 0,
185 	.name = "TCM-USER",
186 	.version = 1,
187 	.maxattr = TCMU_ATTR_MAX,
188 	.mcgrps = tcmu_mcgrps,
189 	.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
190 	.netnsok = true,
191 };
192 
193 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
194 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
195 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
196 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
197 
198 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
199 {
200 	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
201 	uint32_t i;
202 
203 	for (i = 0; i < len; i++)
204 		clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
205 }
206 
207 static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
208 					struct tcmu_cmd *tcmu_cmd)
209 {
210 	struct page *page;
211 	int ret, dbi;
212 
213 	dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
214 	if (dbi == udev->dbi_thresh)
215 		return false;
216 
217 	page = radix_tree_lookup(&udev->data_blocks, dbi);
218 	if (!page) {
219 
220 		if (atomic_add_return(1, &global_db_count) >
221 					TCMU_GLOBAL_MAX_BLOCKS) {
222 			atomic_dec(&global_db_count);
223 			return false;
224 		}
225 
226 		/* try to get new page from the mm */
227 		page = alloc_page(GFP_KERNEL);
228 		if (!page)
229 			return false;
230 
231 		ret = radix_tree_insert(&udev->data_blocks, dbi, page);
232 		if (ret) {
233 			__free_page(page);
234 			return false;
235 		}
236 
237 	}
238 
239 	if (dbi > udev->dbi_max)
240 		udev->dbi_max = dbi;
241 
242 	set_bit(dbi, udev->data_bitmap);
243 	tcmu_cmd_set_dbi(tcmu_cmd, dbi);
244 
245 	return true;
246 }
247 
248 static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
249 				  struct tcmu_cmd *tcmu_cmd)
250 {
251 	int i;
252 
253 	udev->waiting_global = false;
254 
255 	for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
256 		if (!tcmu_get_empty_block(udev, tcmu_cmd))
257 			goto err;
258 	}
259 	return true;
260 
261 err:
262 	udev->waiting_global = true;
263 	/* Try to wake up the unmap thread */
264 	wake_up(&unmap_wait);
265 	return false;
266 }
267 
268 static inline struct page *
269 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
270 {
271 	return radix_tree_lookup(&udev->data_blocks, dbi);
272 }
273 
274 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
275 {
276 	kfree(tcmu_cmd->dbi);
277 	kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
278 }
279 
280 static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
281 {
282 	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
283 	size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
284 
285 	if (se_cmd->se_cmd_flags & SCF_BIDI) {
286 		BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
287 		data_length += round_up(se_cmd->t_bidi_data_sg->length,
288 				DATA_BLOCK_SIZE);
289 	}
290 
291 	return data_length;
292 }
293 
294 static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
295 {
296 	size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
297 
298 	return data_length / DATA_BLOCK_SIZE;
299 }
300 
301 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
302 {
303 	struct se_device *se_dev = se_cmd->se_dev;
304 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
305 	struct tcmu_cmd *tcmu_cmd;
306 	int cmd_id;
307 
308 	tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
309 	if (!tcmu_cmd)
310 		return NULL;
311 
312 	tcmu_cmd->se_cmd = se_cmd;
313 	tcmu_cmd->tcmu_dev = udev;
314 	if (udev->cmd_time_out)
315 		tcmu_cmd->deadline = jiffies +
316 					msecs_to_jiffies(udev->cmd_time_out);
317 
318 	tcmu_cmd_reset_dbi_cur(tcmu_cmd);
319 	tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
320 	tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
321 				GFP_KERNEL);
322 	if (!tcmu_cmd->dbi) {
323 		kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
324 		return NULL;
325 	}
326 
327 	idr_preload(GFP_KERNEL);
328 	spin_lock_irq(&udev->commands_lock);
329 	cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
330 		USHRT_MAX, GFP_NOWAIT);
331 	spin_unlock_irq(&udev->commands_lock);
332 	idr_preload_end();
333 
334 	if (cmd_id < 0) {
335 		tcmu_free_cmd(tcmu_cmd);
336 		return NULL;
337 	}
338 	tcmu_cmd->cmd_id = cmd_id;
339 
340 	return tcmu_cmd;
341 }
342 
343 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
344 {
345 	unsigned long offset = offset_in_page(vaddr);
346 
347 	size = round_up(size+offset, PAGE_SIZE);
348 	vaddr -= offset;
349 
350 	while (size) {
351 		flush_dcache_page(virt_to_page(vaddr));
352 		size -= PAGE_SIZE;
353 	}
354 }
355 
356 /*
357  * Some ring helper functions. We don't assume size is a power of 2 so
358  * we can't use circ_buf.h.
359  */
360 static inline size_t spc_used(size_t head, size_t tail, size_t size)
361 {
362 	int diff = head - tail;
363 
364 	if (diff >= 0)
365 		return diff;
366 	else
367 		return size + diff;
368 }
369 
370 static inline size_t spc_free(size_t head, size_t tail, size_t size)
371 {
372 	/* Keep 1 byte unused or we can't tell full from empty */
373 	return (size - spc_used(head, tail, size) - 1);
374 }
375 
376 static inline size_t head_to_end(size_t head, size_t size)
377 {
378 	return size - head;
379 }
380 
381 static inline void new_iov(struct iovec **iov, int *iov_cnt,
382 			   struct tcmu_dev *udev)
383 {
384 	struct iovec *iovec;
385 
386 	if (*iov_cnt != 0)
387 		(*iov)++;
388 	(*iov_cnt)++;
389 
390 	iovec = *iov;
391 	memset(iovec, 0, sizeof(struct iovec));
392 }
393 
394 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
395 
396 /* offset is relative to mb_addr */
397 static inline size_t get_block_offset_user(struct tcmu_dev *dev,
398 		int dbi, int remaining)
399 {
400 	return dev->data_off + dbi * DATA_BLOCK_SIZE +
401 		DATA_BLOCK_SIZE - remaining;
402 }
403 
404 static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov)
405 {
406 	return (size_t)iov->iov_base + iov->iov_len;
407 }
408 
409 static int scatter_data_area(struct tcmu_dev *udev,
410 	struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
411 	unsigned int data_nents, struct iovec **iov,
412 	int *iov_cnt, bool copy_data)
413 {
414 	int i, dbi;
415 	int block_remaining = 0;
416 	void *from, *to = NULL;
417 	size_t copy_bytes, to_offset, offset;
418 	struct scatterlist *sg;
419 	struct page *page;
420 
421 	for_each_sg(data_sg, sg, data_nents, i) {
422 		int sg_remaining = sg->length;
423 		from = kmap_atomic(sg_page(sg)) + sg->offset;
424 		while (sg_remaining > 0) {
425 			if (block_remaining == 0) {
426 				if (to)
427 					kunmap_atomic(to);
428 
429 				block_remaining = DATA_BLOCK_SIZE;
430 				dbi = tcmu_cmd_get_dbi(tcmu_cmd);
431 				page = tcmu_get_block_page(udev, dbi);
432 				to = kmap_atomic(page);
433 			}
434 
435 			copy_bytes = min_t(size_t, sg_remaining,
436 					block_remaining);
437 			to_offset = get_block_offset_user(udev, dbi,
438 					block_remaining);
439 			offset = DATA_BLOCK_SIZE - block_remaining;
440 			to = (void *)(unsigned long)to + offset;
441 
442 			if (*iov_cnt != 0 &&
443 			    to_offset == iov_tail(udev, *iov)) {
444 				(*iov)->iov_len += copy_bytes;
445 			} else {
446 				new_iov(iov, iov_cnt, udev);
447 				(*iov)->iov_base = (void __user *)to_offset;
448 				(*iov)->iov_len = copy_bytes;
449 			}
450 			if (copy_data) {
451 				memcpy(to, from + sg->length - sg_remaining,
452 					copy_bytes);
453 				tcmu_flush_dcache_range(to, copy_bytes);
454 			}
455 			sg_remaining -= copy_bytes;
456 			block_remaining -= copy_bytes;
457 		}
458 		kunmap_atomic(from - sg->offset);
459 	}
460 	if (to)
461 		kunmap_atomic(to);
462 
463 	return 0;
464 }
465 
466 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
467 			     bool bidi)
468 {
469 	struct se_cmd *se_cmd = cmd->se_cmd;
470 	int i, dbi;
471 	int block_remaining = 0;
472 	void *from = NULL, *to;
473 	size_t copy_bytes, offset;
474 	struct scatterlist *sg, *data_sg;
475 	struct page *page;
476 	unsigned int data_nents;
477 	uint32_t count = 0;
478 
479 	if (!bidi) {
480 		data_sg = se_cmd->t_data_sg;
481 		data_nents = se_cmd->t_data_nents;
482 	} else {
483 
484 		/*
485 		 * For bidi case, the first count blocks are for Data-Out
486 		 * buffer blocks, and before gathering the Data-In buffer
487 		 * the Data-Out buffer blocks should be discarded.
488 		 */
489 		count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
490 
491 		data_sg = se_cmd->t_bidi_data_sg;
492 		data_nents = se_cmd->t_bidi_data_nents;
493 	}
494 
495 	tcmu_cmd_set_dbi_cur(cmd, count);
496 
497 	for_each_sg(data_sg, sg, data_nents, i) {
498 		int sg_remaining = sg->length;
499 		to = kmap_atomic(sg_page(sg)) + sg->offset;
500 		while (sg_remaining > 0) {
501 			if (block_remaining == 0) {
502 				if (from)
503 					kunmap_atomic(from);
504 
505 				block_remaining = DATA_BLOCK_SIZE;
506 				dbi = tcmu_cmd_get_dbi(cmd);
507 				page = tcmu_get_block_page(udev, dbi);
508 				from = kmap_atomic(page);
509 			}
510 			copy_bytes = min_t(size_t, sg_remaining,
511 					block_remaining);
512 			offset = DATA_BLOCK_SIZE - block_remaining;
513 			from = (void *)(unsigned long)from + offset;
514 			tcmu_flush_dcache_range(from, copy_bytes);
515 			memcpy(to + sg->length - sg_remaining, from,
516 					copy_bytes);
517 
518 			sg_remaining -= copy_bytes;
519 			block_remaining -= copy_bytes;
520 		}
521 		kunmap_atomic(to - sg->offset);
522 	}
523 	if (from)
524 		kunmap_atomic(from);
525 }
526 
527 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
528 {
529 	return DATA_BLOCK_SIZE * (thresh - bitmap_weight(bitmap, thresh));
530 }
531 
532 /*
533  * We can't queue a command until we have space available on the cmd ring *and*
534  * space available on the data area.
535  *
536  * Called with ring lock held.
537  */
538 static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
539 		size_t cmd_size, size_t data_needed)
540 {
541 	struct tcmu_mailbox *mb = udev->mb_addr;
542 	uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
543 				/ DATA_BLOCK_SIZE;
544 	size_t space, cmd_needed;
545 	u32 cmd_head;
546 
547 	tcmu_flush_dcache_range(mb, sizeof(*mb));
548 
549 	cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
550 
551 	/*
552 	 * If cmd end-of-ring space is too small then we need space for a NOP plus
553 	 * original cmd - cmds are internally contiguous.
554 	 */
555 	if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
556 		cmd_needed = cmd_size;
557 	else
558 		cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
559 
560 	space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
561 	if (space < cmd_needed) {
562 		pr_debug("no cmd space: %u %u %u\n", cmd_head,
563 		       udev->cmdr_last_cleaned, udev->cmdr_size);
564 		return false;
565 	}
566 
567 	/* try to check and get the data blocks as needed */
568 	space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
569 	if (space < data_needed) {
570 		unsigned long blocks_left = DATA_BLOCK_BITS - udev->dbi_thresh;
571 		unsigned long grow;
572 
573 		if (blocks_left < blocks_needed) {
574 			pr_debug("no data space: only %lu available, but ask for %zu\n",
575 					blocks_left * DATA_BLOCK_SIZE,
576 					data_needed);
577 			return false;
578 		}
579 
580 		/* Try to expand the thresh */
581 		if (!udev->dbi_thresh) {
582 			/* From idle state */
583 			uint32_t init_thresh = DATA_BLOCK_INIT_BITS;
584 
585 			udev->dbi_thresh = max(blocks_needed, init_thresh);
586 		} else {
587 			/*
588 			 * Grow the data area by max(blocks needed,
589 			 * dbi_thresh / 2), but limited to the max
590 			 * DATA_BLOCK_BITS size.
591 			 */
592 			grow = max(blocks_needed, udev->dbi_thresh / 2);
593 			udev->dbi_thresh += grow;
594 			if (udev->dbi_thresh > DATA_BLOCK_BITS)
595 				udev->dbi_thresh = DATA_BLOCK_BITS;
596 		}
597 	}
598 
599 	if (!tcmu_get_empty_blocks(udev, cmd))
600 		return false;
601 
602 	return true;
603 }
604 
605 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
606 {
607 	return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
608 			sizeof(struct tcmu_cmd_entry));
609 }
610 
611 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
612 					   size_t base_command_size)
613 {
614 	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
615 	size_t command_size;
616 
617 	command_size = base_command_size +
618 		round_up(scsi_command_size(se_cmd->t_task_cdb),
619 				TCMU_OP_ALIGN_SIZE);
620 
621 	WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
622 
623 	return command_size;
624 }
625 
626 static sense_reason_t
627 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
628 {
629 	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
630 	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
631 	size_t base_command_size, command_size;
632 	struct tcmu_mailbox *mb;
633 	struct tcmu_cmd_entry *entry;
634 	struct iovec *iov;
635 	int iov_cnt, ret;
636 	uint32_t cmd_head;
637 	uint64_t cdb_off;
638 	bool copy_to_data_area;
639 	size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
640 
641 	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
642 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
643 
644 	/*
645 	 * Must be a certain minimum size for response sense info, but
646 	 * also may be larger if the iov array is large.
647 	 *
648 	 * We prepare as many iovs as possbile for potential uses here,
649 	 * because it's expensive to tell how many regions are freed in
650 	 * the bitmap & global data pool, as the size calculated here
651 	 * will only be used to do the checks.
652 	 *
653 	 * The size will be recalculated later as actually needed to save
654 	 * cmd area memories.
655 	 */
656 	base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
657 	command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
658 
659 	mutex_lock(&udev->cmdr_lock);
660 
661 	mb = udev->mb_addr;
662 	cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
663 	if ((command_size > (udev->cmdr_size / 2)) ||
664 	    data_length > udev->data_size) {
665 		pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
666 			"cmd ring/data area\n", command_size, data_length,
667 			udev->cmdr_size, udev->data_size);
668 		mutex_unlock(&udev->cmdr_lock);
669 		return TCM_INVALID_CDB_FIELD;
670 	}
671 
672 	while (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
673 		int ret;
674 		DEFINE_WAIT(__wait);
675 
676 		prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
677 
678 		pr_debug("sleeping for ring space\n");
679 		mutex_unlock(&udev->cmdr_lock);
680 		if (udev->cmd_time_out)
681 			ret = schedule_timeout(
682 					msecs_to_jiffies(udev->cmd_time_out));
683 		else
684 			ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
685 		finish_wait(&udev->wait_cmdr, &__wait);
686 		if (!ret) {
687 			pr_warn("tcmu: command timed out\n");
688 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
689 		}
690 
691 		mutex_lock(&udev->cmdr_lock);
692 
693 		/* We dropped cmdr_lock, cmd_head is stale */
694 		cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
695 	}
696 
697 	/* Insert a PAD if end-of-ring space is too small */
698 	if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
699 		size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
700 
701 		entry = (void *) mb + CMDR_OFF + cmd_head;
702 		tcmu_flush_dcache_range(entry, sizeof(*entry));
703 		tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
704 		tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
705 		entry->hdr.cmd_id = 0; /* not used for PAD */
706 		entry->hdr.kflags = 0;
707 		entry->hdr.uflags = 0;
708 
709 		UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
710 
711 		cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
712 		WARN_ON(cmd_head != 0);
713 	}
714 
715 	entry = (void *) mb + CMDR_OFF + cmd_head;
716 	tcmu_flush_dcache_range(entry, sizeof(*entry));
717 	tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
718 	entry->hdr.cmd_id = tcmu_cmd->cmd_id;
719 	entry->hdr.kflags = 0;
720 	entry->hdr.uflags = 0;
721 
722 	/* Handle allocating space from the data area */
723 	tcmu_cmd_reset_dbi_cur(tcmu_cmd);
724 	iov = &entry->req.iov[0];
725 	iov_cnt = 0;
726 	copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
727 		|| se_cmd->se_cmd_flags & SCF_BIDI);
728 	ret = scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
729 				se_cmd->t_data_nents, &iov, &iov_cnt,
730 				copy_to_data_area);
731 	if (ret) {
732 		tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
733 		mutex_unlock(&udev->cmdr_lock);
734 
735 		pr_err("tcmu: alloc and scatter data failed\n");
736 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
737 	}
738 	entry->req.iov_cnt = iov_cnt;
739 	entry->req.iov_dif_cnt = 0;
740 
741 	/* Handle BIDI commands */
742 	if (se_cmd->se_cmd_flags & SCF_BIDI) {
743 		iov_cnt = 0;
744 		iov++;
745 		ret = scatter_data_area(udev, tcmu_cmd,
746 					se_cmd->t_bidi_data_sg,
747 					se_cmd->t_bidi_data_nents,
748 					&iov, &iov_cnt, false);
749 		if (ret) {
750 			tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
751 			mutex_unlock(&udev->cmdr_lock);
752 
753 			pr_err("tcmu: alloc and scatter bidi data failed\n");
754 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
755 		}
756 		entry->req.iov_bidi_cnt = iov_cnt;
757 	}
758 
759 	/*
760 	 * Recalaulate the command's base size and size according
761 	 * to the actual needs
762 	 */
763 	base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
764 						       entry->req.iov_bidi_cnt);
765 	command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
766 
767 	tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
768 
769 	/* All offsets relative to mb_addr, not start of entry! */
770 	cdb_off = CMDR_OFF + cmd_head + base_command_size;
771 	memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
772 	entry->req.cdb_off = cdb_off;
773 	tcmu_flush_dcache_range(entry, sizeof(*entry));
774 
775 	UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
776 	tcmu_flush_dcache_range(mb, sizeof(*mb));
777 	mutex_unlock(&udev->cmdr_lock);
778 
779 	/* TODO: only if FLUSH and FUA? */
780 	uio_event_notify(&udev->uio_info);
781 
782 	if (udev->cmd_time_out)
783 		mod_timer(&udev->timeout, round_jiffies_up(jiffies +
784 			  msecs_to_jiffies(udev->cmd_time_out)));
785 
786 	return TCM_NO_SENSE;
787 }
788 
789 static sense_reason_t
790 tcmu_queue_cmd(struct se_cmd *se_cmd)
791 {
792 	struct se_device *se_dev = se_cmd->se_dev;
793 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
794 	struct tcmu_cmd *tcmu_cmd;
795 	sense_reason_t ret;
796 
797 	tcmu_cmd = tcmu_alloc_cmd(se_cmd);
798 	if (!tcmu_cmd)
799 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
800 
801 	ret = tcmu_queue_cmd_ring(tcmu_cmd);
802 	if (ret != TCM_NO_SENSE) {
803 		pr_err("TCMU: Could not queue command\n");
804 		spin_lock_irq(&udev->commands_lock);
805 		idr_remove(&udev->commands, tcmu_cmd->cmd_id);
806 		spin_unlock_irq(&udev->commands_lock);
807 
808 		tcmu_free_cmd(tcmu_cmd);
809 	}
810 
811 	return ret;
812 }
813 
814 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
815 {
816 	struct se_cmd *se_cmd = cmd->se_cmd;
817 	struct tcmu_dev *udev = cmd->tcmu_dev;
818 
819 	/*
820 	 * cmd has been completed already from timeout, just reclaim
821 	 * data area space and free cmd
822 	 */
823 	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
824 		goto out;
825 
826 	tcmu_cmd_reset_dbi_cur(cmd);
827 
828 	if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
829 		pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
830 			cmd->se_cmd);
831 		entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
832 	} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
833 		memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
834 			       se_cmd->scsi_sense_length);
835 	} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
836 		/* Get Data-In buffer before clean up */
837 		gather_data_area(udev, cmd, true);
838 	} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
839 		gather_data_area(udev, cmd, false);
840 	} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
841 		/* TODO: */
842 	} else if (se_cmd->data_direction != DMA_NONE) {
843 		pr_warn("TCMU: data direction was %d!\n",
844 			se_cmd->data_direction);
845 	}
846 
847 	target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
848 
849 out:
850 	cmd->se_cmd = NULL;
851 	tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
852 	tcmu_free_cmd(cmd);
853 }
854 
855 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
856 {
857 	struct tcmu_mailbox *mb;
858 	int handled = 0;
859 
860 	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
861 		pr_err("ring broken, not handling completions\n");
862 		return 0;
863 	}
864 
865 	mb = udev->mb_addr;
866 	tcmu_flush_dcache_range(mb, sizeof(*mb));
867 
868 	while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
869 
870 		struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
871 		struct tcmu_cmd *cmd;
872 
873 		tcmu_flush_dcache_range(entry, sizeof(*entry));
874 
875 		if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
876 			UPDATE_HEAD(udev->cmdr_last_cleaned,
877 				    tcmu_hdr_get_len(entry->hdr.len_op),
878 				    udev->cmdr_size);
879 			continue;
880 		}
881 		WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
882 
883 		spin_lock(&udev->commands_lock);
884 		cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
885 		spin_unlock(&udev->commands_lock);
886 
887 		if (!cmd) {
888 			pr_err("cmd_id not found, ring is broken\n");
889 			set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
890 			break;
891 		}
892 
893 		tcmu_handle_completion(cmd, entry);
894 
895 		UPDATE_HEAD(udev->cmdr_last_cleaned,
896 			    tcmu_hdr_get_len(entry->hdr.len_op),
897 			    udev->cmdr_size);
898 
899 		handled++;
900 	}
901 
902 	if (mb->cmd_tail == mb->cmd_head)
903 		del_timer(&udev->timeout); /* no more pending cmds */
904 
905 	wake_up(&udev->wait_cmdr);
906 
907 	return handled;
908 }
909 
910 static int tcmu_check_expired_cmd(int id, void *p, void *data)
911 {
912 	struct tcmu_cmd *cmd = p;
913 
914 	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
915 		return 0;
916 
917 	if (!time_after(jiffies, cmd->deadline))
918 		return 0;
919 
920 	set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
921 	target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
922 	cmd->se_cmd = NULL;
923 
924 	return 0;
925 }
926 
927 static void tcmu_device_timedout(unsigned long data)
928 {
929 	struct tcmu_dev *udev = (struct tcmu_dev *)data;
930 	unsigned long flags;
931 
932 	spin_lock_irqsave(&udev->commands_lock, flags);
933 	idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
934 	spin_unlock_irqrestore(&udev->commands_lock, flags);
935 
936 	/* Try to wake up the ummap thread */
937 	wake_up(&unmap_wait);
938 
939 	/*
940 	 * We don't need to wakeup threads on wait_cmdr since they have their
941 	 * own timeout.
942 	 */
943 }
944 
945 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
946 {
947 	struct tcmu_hba *tcmu_hba;
948 
949 	tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
950 	if (!tcmu_hba)
951 		return -ENOMEM;
952 
953 	tcmu_hba->host_id = host_id;
954 	hba->hba_ptr = tcmu_hba;
955 
956 	return 0;
957 }
958 
959 static void tcmu_detach_hba(struct se_hba *hba)
960 {
961 	kfree(hba->hba_ptr);
962 	hba->hba_ptr = NULL;
963 }
964 
965 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
966 {
967 	struct tcmu_dev *udev;
968 
969 	udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
970 	if (!udev)
971 		return NULL;
972 
973 	udev->name = kstrdup(name, GFP_KERNEL);
974 	if (!udev->name) {
975 		kfree(udev);
976 		return NULL;
977 	}
978 
979 	udev->hba = hba;
980 	udev->cmd_time_out = TCMU_TIME_OUT;
981 
982 	init_waitqueue_head(&udev->wait_cmdr);
983 	mutex_init(&udev->cmdr_lock);
984 
985 	idr_init(&udev->commands);
986 	spin_lock_init(&udev->commands_lock);
987 
988 	setup_timer(&udev->timeout, tcmu_device_timedout,
989 		(unsigned long)udev);
990 
991 	return &udev->se_dev;
992 }
993 
994 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
995 {
996 	struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);
997 
998 	mutex_lock(&tcmu_dev->cmdr_lock);
999 	tcmu_handle_completions(tcmu_dev);
1000 	mutex_unlock(&tcmu_dev->cmdr_lock);
1001 
1002 	return 0;
1003 }
1004 
1005 /*
1006  * mmap code from uio.c. Copied here because we want to hook mmap()
1007  * and this stuff must come along.
1008  */
1009 static int tcmu_find_mem_index(struct vm_area_struct *vma)
1010 {
1011 	struct tcmu_dev *udev = vma->vm_private_data;
1012 	struct uio_info *info = &udev->uio_info;
1013 
1014 	if (vma->vm_pgoff < MAX_UIO_MAPS) {
1015 		if (info->mem[vma->vm_pgoff].size == 0)
1016 			return -1;
1017 		return (int)vma->vm_pgoff;
1018 	}
1019 	return -1;
1020 }
1021 
1022 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
1023 {
1024 	struct page *page;
1025 	int ret;
1026 
1027 	mutex_lock(&udev->cmdr_lock);
1028 	page = tcmu_get_block_page(udev, dbi);
1029 	if (likely(page)) {
1030 		mutex_unlock(&udev->cmdr_lock);
1031 		return page;
1032 	}
1033 
1034 	/*
1035 	 * Normally it shouldn't be here:
1036 	 * Only when the userspace has touched the blocks which
1037 	 * are out of the tcmu_cmd's data iov[], and will return
1038 	 * one zeroed page.
1039 	 */
1040 	pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi);
1041 	pr_warn("Mostly it will be a bug of userspace, please have a check!\n");
1042 
1043 	if (dbi >= udev->dbi_thresh) {
1044 		/* Extern the udev->dbi_thresh to dbi + 1 */
1045 		udev->dbi_thresh = dbi + 1;
1046 		udev->dbi_max = dbi;
1047 	}
1048 
1049 	page = radix_tree_lookup(&udev->data_blocks, dbi);
1050 	if (!page) {
1051 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1052 		if (!page) {
1053 			mutex_unlock(&udev->cmdr_lock);
1054 			return NULL;
1055 		}
1056 
1057 		ret = radix_tree_insert(&udev->data_blocks, dbi, page);
1058 		if (ret) {
1059 			mutex_unlock(&udev->cmdr_lock);
1060 			__free_page(page);
1061 			return NULL;
1062 		}
1063 
1064 		/*
1065 		 * Since this case is rare in page fault routine, here we
1066 		 * will allow the global_db_count >= TCMU_GLOBAL_MAX_BLOCKS
1067 		 * to reduce possible page fault call trace.
1068 		 */
1069 		atomic_inc(&global_db_count);
1070 	}
1071 	mutex_unlock(&udev->cmdr_lock);
1072 
1073 	return page;
1074 }
1075 
1076 static int tcmu_vma_fault(struct vm_fault *vmf)
1077 {
1078 	struct tcmu_dev *udev = vmf->vma->vm_private_data;
1079 	struct uio_info *info = &udev->uio_info;
1080 	struct page *page;
1081 	unsigned long offset;
1082 	void *addr;
1083 
1084 	int mi = tcmu_find_mem_index(vmf->vma);
1085 	if (mi < 0)
1086 		return VM_FAULT_SIGBUS;
1087 
1088 	/*
1089 	 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1090 	 * to use mem[N].
1091 	 */
1092 	offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1093 
1094 	if (offset < udev->data_off) {
1095 		/* For the vmalloc()ed cmd area pages */
1096 		addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1097 		page = vmalloc_to_page(addr);
1098 	} else {
1099 		uint32_t dbi;
1100 
1101 		/* For the dynamically growing data area pages */
1102 		dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
1103 		page = tcmu_try_get_block_page(udev, dbi);
1104 		if (!page)
1105 			return VM_FAULT_NOPAGE;
1106 	}
1107 
1108 	get_page(page);
1109 	vmf->page = page;
1110 	return 0;
1111 }
1112 
1113 static const struct vm_operations_struct tcmu_vm_ops = {
1114 	.fault = tcmu_vma_fault,
1115 };
1116 
1117 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1118 {
1119 	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1120 
1121 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1122 	vma->vm_ops = &tcmu_vm_ops;
1123 
1124 	vma->vm_private_data = udev;
1125 
1126 	/* Ensure the mmap is exactly the right size */
1127 	if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
1128 		return -EINVAL;
1129 
1130 	return 0;
1131 }
1132 
1133 static int tcmu_open(struct uio_info *info, struct inode *inode)
1134 {
1135 	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1136 
1137 	/* O_EXCL not supported for char devs, so fake it? */
1138 	if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1139 		return -EBUSY;
1140 
1141 	udev->inode = inode;
1142 
1143 	pr_debug("open\n");
1144 
1145 	return 0;
1146 }
1147 
1148 static int tcmu_release(struct uio_info *info, struct inode *inode)
1149 {
1150 	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1151 
1152 	clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1153 
1154 	pr_debug("close\n");
1155 
1156 	return 0;
1157 }
1158 
1159 static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
1160 {
1161 	struct sk_buff *skb;
1162 	void *msg_header;
1163 	int ret = -ENOMEM;
1164 
1165 	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1166 	if (!skb)
1167 		return ret;
1168 
1169 	msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
1170 	if (!msg_header)
1171 		goto free_skb;
1172 
1173 	ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
1174 	if (ret < 0)
1175 		goto free_skb;
1176 
1177 	ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
1178 	if (ret < 0)
1179 		goto free_skb;
1180 
1181 	genlmsg_end(skb, msg_header);
1182 
1183 	ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1184 				TCMU_MCGRP_CONFIG, GFP_KERNEL);
1185 
1186 	/* We don't care if no one is listening */
1187 	if (ret == -ESRCH)
1188 		ret = 0;
1189 
1190 	return ret;
1191 free_skb:
1192 	nlmsg_free(skb);
1193 	return ret;
1194 }
1195 
1196 static int tcmu_configure_device(struct se_device *dev)
1197 {
1198 	struct tcmu_dev *udev = TCMU_DEV(dev);
1199 	struct tcmu_hba *hba = udev->hba->hba_ptr;
1200 	struct uio_info *info;
1201 	struct tcmu_mailbox *mb;
1202 	size_t size;
1203 	size_t used;
1204 	int ret = 0;
1205 	char *str;
1206 
1207 	info = &udev->uio_info;
1208 
1209 	size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
1210 			udev->dev_config);
1211 	size += 1; /* for \0 */
1212 	str = kmalloc(size, GFP_KERNEL);
1213 	if (!str)
1214 		return -ENOMEM;
1215 
1216 	used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
1217 
1218 	if (udev->dev_config[0])
1219 		snprintf(str + used, size - used, "/%s", udev->dev_config);
1220 
1221 	info->name = str;
1222 
1223 	udev->mb_addr = vzalloc(CMDR_SIZE);
1224 	if (!udev->mb_addr) {
1225 		ret = -ENOMEM;
1226 		goto err_vzalloc;
1227 	}
1228 
1229 	/* mailbox fits in first part of CMDR space */
1230 	udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
1231 	udev->data_off = CMDR_SIZE;
1232 	udev->data_size = DATA_SIZE;
1233 	udev->dbi_thresh = 0; /* Default in Idle state */
1234 	udev->waiting_global = false;
1235 
1236 	/* Initialise the mailbox of the ring buffer */
1237 	mb = udev->mb_addr;
1238 	mb->version = TCMU_MAILBOX_VERSION;
1239 	mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
1240 	mb->cmdr_off = CMDR_OFF;
1241 	mb->cmdr_size = udev->cmdr_size;
1242 
1243 	WARN_ON(!PAGE_ALIGNED(udev->data_off));
1244 	WARN_ON(udev->data_size % PAGE_SIZE);
1245 	WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
1246 
1247 	INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
1248 
1249 	info->version = __stringify(TCMU_MAILBOX_VERSION);
1250 
1251 	info->mem[0].name = "tcm-user command & data buffer";
1252 	info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
1253 	info->mem[0].size = TCMU_RING_SIZE;
1254 	info->mem[0].memtype = UIO_MEM_NONE;
1255 
1256 	info->irqcontrol = tcmu_irqcontrol;
1257 	info->irq = UIO_IRQ_CUSTOM;
1258 
1259 	info->mmap = tcmu_mmap;
1260 	info->open = tcmu_open;
1261 	info->release = tcmu_release;
1262 
1263 	ret = uio_register_device(tcmu_root_device, info);
1264 	if (ret)
1265 		goto err_register;
1266 
1267 	/* User can set hw_block_size before enable the device */
1268 	if (dev->dev_attrib.hw_block_size == 0)
1269 		dev->dev_attrib.hw_block_size = 512;
1270 	/* Other attributes can be configured in userspace */
1271 	if (!dev->dev_attrib.hw_max_sectors)
1272 		dev->dev_attrib.hw_max_sectors = 128;
1273 	dev->dev_attrib.hw_queue_depth = 128;
1274 
1275 	ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
1276 				 udev->uio_info.uio_dev->minor);
1277 	if (ret)
1278 		goto err_netlink;
1279 
1280 	mutex_lock(&root_udev_mutex);
1281 	list_add(&udev->node, &root_udev);
1282 	mutex_unlock(&root_udev_mutex);
1283 
1284 	return 0;
1285 
1286 err_netlink:
1287 	uio_unregister_device(&udev->uio_info);
1288 err_register:
1289 	vfree(udev->mb_addr);
1290 err_vzalloc:
1291 	kfree(info->name);
1292 
1293 	return ret;
1294 }
1295 
1296 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1297 {
1298 	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1299 		kmem_cache_free(tcmu_cmd_cache, cmd);
1300 		return 0;
1301 	}
1302 	return -EINVAL;
1303 }
1304 
1305 static void tcmu_dev_call_rcu(struct rcu_head *p)
1306 {
1307 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
1308 	struct tcmu_dev *udev = TCMU_DEV(dev);
1309 
1310 	kfree(udev);
1311 }
1312 
1313 static bool tcmu_dev_configured(struct tcmu_dev *udev)
1314 {
1315 	return udev->uio_info.uio_dev ? true : false;
1316 }
1317 
1318 static void tcmu_blocks_release(struct tcmu_dev *udev)
1319 {
1320 	int i;
1321 	struct page *page;
1322 
1323 	/* Try to release all block pages */
1324 	mutex_lock(&udev->cmdr_lock);
1325 	for (i = 0; i <= udev->dbi_max; i++) {
1326 		page = radix_tree_delete(&udev->data_blocks, i);
1327 		if (page) {
1328 			__free_page(page);
1329 			atomic_dec(&global_db_count);
1330 		}
1331 	}
1332 	mutex_unlock(&udev->cmdr_lock);
1333 }
1334 
1335 static void tcmu_free_device(struct se_device *dev)
1336 {
1337 	struct tcmu_dev *udev = TCMU_DEV(dev);
1338 	struct tcmu_cmd *cmd;
1339 	bool all_expired = true;
1340 	int i;
1341 
1342 	del_timer_sync(&udev->timeout);
1343 
1344 	mutex_lock(&root_udev_mutex);
1345 	list_del(&udev->node);
1346 	mutex_unlock(&root_udev_mutex);
1347 
1348 	vfree(udev->mb_addr);
1349 
1350 	/* Upper layer should drain all requests before calling this */
1351 	spin_lock_irq(&udev->commands_lock);
1352 	idr_for_each_entry(&udev->commands, cmd, i) {
1353 		if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1354 			all_expired = false;
1355 	}
1356 	idr_destroy(&udev->commands);
1357 	spin_unlock_irq(&udev->commands_lock);
1358 	WARN_ON(!all_expired);
1359 
1360 	tcmu_blocks_release(udev);
1361 
1362 	if (tcmu_dev_configured(udev)) {
1363 		tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
1364 				   udev->uio_info.uio_dev->minor);
1365 
1366 		uio_unregister_device(&udev->uio_info);
1367 		kfree(udev->uio_info.name);
1368 		kfree(udev->name);
1369 	}
1370 	call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1371 }
1372 
1373 enum {
1374 	Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
1375 	Opt_err,
1376 };
1377 
1378 static match_table_t tokens = {
1379 	{Opt_dev_config, "dev_config=%s"},
1380 	{Opt_dev_size, "dev_size=%u"},
1381 	{Opt_hw_block_size, "hw_block_size=%u"},
1382 	{Opt_hw_max_sectors, "hw_max_sectors=%u"},
1383 	{Opt_err, NULL}
1384 };
1385 
1386 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
1387 {
1388 	unsigned long tmp_ul;
1389 	char *arg_p;
1390 	int ret;
1391 
1392 	arg_p = match_strdup(arg);
1393 	if (!arg_p)
1394 		return -ENOMEM;
1395 
1396 	ret = kstrtoul(arg_p, 0, &tmp_ul);
1397 	kfree(arg_p);
1398 	if (ret < 0) {
1399 		pr_err("kstrtoul() failed for dev attrib\n");
1400 		return ret;
1401 	}
1402 	if (!tmp_ul) {
1403 		pr_err("dev attrib must be nonzero\n");
1404 		return -EINVAL;
1405 	}
1406 	*dev_attrib = tmp_ul;
1407 	return 0;
1408 }
1409 
1410 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1411 		const char *page, ssize_t count)
1412 {
1413 	struct tcmu_dev *udev = TCMU_DEV(dev);
1414 	char *orig, *ptr, *opts, *arg_p;
1415 	substring_t args[MAX_OPT_ARGS];
1416 	int ret = 0, token;
1417 
1418 	opts = kstrdup(page, GFP_KERNEL);
1419 	if (!opts)
1420 		return -ENOMEM;
1421 
1422 	orig = opts;
1423 
1424 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
1425 		if (!*ptr)
1426 			continue;
1427 
1428 		token = match_token(ptr, tokens, args);
1429 		switch (token) {
1430 		case Opt_dev_config:
1431 			if (match_strlcpy(udev->dev_config, &args[0],
1432 					  TCMU_CONFIG_LEN) == 0) {
1433 				ret = -EINVAL;
1434 				break;
1435 			}
1436 			pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
1437 			break;
1438 		case Opt_dev_size:
1439 			arg_p = match_strdup(&args[0]);
1440 			if (!arg_p) {
1441 				ret = -ENOMEM;
1442 				break;
1443 			}
1444 			ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
1445 			kfree(arg_p);
1446 			if (ret < 0)
1447 				pr_err("kstrtoul() failed for dev_size=\n");
1448 			break;
1449 		case Opt_hw_block_size:
1450 			ret = tcmu_set_dev_attrib(&args[0],
1451 					&(dev->dev_attrib.hw_block_size));
1452 			break;
1453 		case Opt_hw_max_sectors:
1454 			ret = tcmu_set_dev_attrib(&args[0],
1455 					&(dev->dev_attrib.hw_max_sectors));
1456 			break;
1457 		default:
1458 			break;
1459 		}
1460 
1461 		if (ret)
1462 			break;
1463 	}
1464 
1465 	kfree(orig);
1466 	return (!ret) ? count : ret;
1467 }
1468 
1469 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
1470 {
1471 	struct tcmu_dev *udev = TCMU_DEV(dev);
1472 	ssize_t bl = 0;
1473 
1474 	bl = sprintf(b + bl, "Config: %s ",
1475 		     udev->dev_config[0] ? udev->dev_config : "NULL");
1476 	bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
1477 
1478 	return bl;
1479 }
1480 
1481 static sector_t tcmu_get_blocks(struct se_device *dev)
1482 {
1483 	struct tcmu_dev *udev = TCMU_DEV(dev);
1484 
1485 	return div_u64(udev->dev_size - dev->dev_attrib.block_size,
1486 		       dev->dev_attrib.block_size);
1487 }
1488 
1489 static sense_reason_t
1490 tcmu_parse_cdb(struct se_cmd *cmd)
1491 {
1492 	return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
1493 }
1494 
1495 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
1496 {
1497 	struct se_dev_attrib *da = container_of(to_config_group(item),
1498 					struct se_dev_attrib, da_group);
1499 	struct tcmu_dev *udev = container_of(da->da_dev,
1500 					struct tcmu_dev, se_dev);
1501 
1502 	return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
1503 }
1504 
1505 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
1506 				       size_t count)
1507 {
1508 	struct se_dev_attrib *da = container_of(to_config_group(item),
1509 					struct se_dev_attrib, da_group);
1510 	struct tcmu_dev *udev = container_of(da->da_dev,
1511 					struct tcmu_dev, se_dev);
1512 	u32 val;
1513 	int ret;
1514 
1515 	if (da->da_dev->export_count) {
1516 		pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
1517 		return -EINVAL;
1518 	}
1519 
1520 	ret = kstrtou32(page, 0, &val);
1521 	if (ret < 0)
1522 		return ret;
1523 
1524 	udev->cmd_time_out = val * MSEC_PER_SEC;
1525 	return count;
1526 }
1527 CONFIGFS_ATTR(tcmu_, cmd_time_out);
1528 
1529 static struct configfs_attribute **tcmu_attrs;
1530 
1531 static struct target_backend_ops tcmu_ops = {
1532 	.name			= "user",
1533 	.owner			= THIS_MODULE,
1534 	.transport_flags	= TRANSPORT_FLAG_PASSTHROUGH,
1535 	.attach_hba		= tcmu_attach_hba,
1536 	.detach_hba		= tcmu_detach_hba,
1537 	.alloc_device		= tcmu_alloc_device,
1538 	.configure_device	= tcmu_configure_device,
1539 	.free_device		= tcmu_free_device,
1540 	.parse_cdb		= tcmu_parse_cdb,
1541 	.set_configfs_dev_params = tcmu_set_configfs_dev_params,
1542 	.show_configfs_dev_params = tcmu_show_configfs_dev_params,
1543 	.get_device_type	= sbc_get_device_type,
1544 	.get_blocks		= tcmu_get_blocks,
1545 	.tb_dev_attrib_attrs	= NULL,
1546 };
1547 
1548 static int unmap_thread_fn(void *data)
1549 {
1550 	struct tcmu_dev *udev;
1551 	loff_t off;
1552 	uint32_t start, end, block;
1553 	struct page *page;
1554 	int i;
1555 
1556 	while (1) {
1557 		DEFINE_WAIT(__wait);
1558 
1559 		prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
1560 		schedule();
1561 		finish_wait(&unmap_wait, &__wait);
1562 
1563 		mutex_lock(&root_udev_mutex);
1564 		list_for_each_entry(udev, &root_udev, node) {
1565 			mutex_lock(&udev->cmdr_lock);
1566 
1567 			/* Try to complete the finished commands first */
1568 			tcmu_handle_completions(udev);
1569 
1570 			/* Skip the udevs waiting the global pool or in idle */
1571 			if (udev->waiting_global || !udev->dbi_thresh) {
1572 				mutex_unlock(&udev->cmdr_lock);
1573 				continue;
1574 			}
1575 
1576 			end = udev->dbi_max + 1;
1577 			block = find_last_bit(udev->data_bitmap, end);
1578 			if (block == udev->dbi_max) {
1579 				/*
1580 				 * The last bit is dbi_max, so there is
1581 				 * no need to shrink any blocks.
1582 				 */
1583 				mutex_unlock(&udev->cmdr_lock);
1584 				continue;
1585 			} else if (block == end) {
1586 				/* The current udev will goto idle state */
1587 				udev->dbi_thresh = start = 0;
1588 				udev->dbi_max = 0;
1589 			} else {
1590 				udev->dbi_thresh = start = block + 1;
1591 				udev->dbi_max = block;
1592 			}
1593 
1594 			/* Here will truncate the data area from off */
1595 			off = udev->data_off + start * DATA_BLOCK_SIZE;
1596 			unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
1597 
1598 			/* Release the block pages */
1599 			for (i = start; i < end; i++) {
1600 				page = radix_tree_delete(&udev->data_blocks, i);
1601 				if (page) {
1602 					__free_page(page);
1603 					atomic_dec(&global_db_count);
1604 				}
1605 			}
1606 			mutex_unlock(&udev->cmdr_lock);
1607 		}
1608 
1609 		/*
1610 		 * Try to wake up the udevs who are waiting
1611 		 * for the global data pool.
1612 		 */
1613 		list_for_each_entry(udev, &root_udev, node) {
1614 			if (udev->waiting_global)
1615 				wake_up(&udev->wait_cmdr);
1616 		}
1617 		mutex_unlock(&root_udev_mutex);
1618 	}
1619 
1620 	return 0;
1621 }
1622 
1623 static int __init tcmu_module_init(void)
1624 {
1625 	int ret, i, len = 0;
1626 
1627 	BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
1628 
1629 	tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
1630 				sizeof(struct tcmu_cmd),
1631 				__alignof__(struct tcmu_cmd),
1632 				0, NULL);
1633 	if (!tcmu_cmd_cache)
1634 		return -ENOMEM;
1635 
1636 	tcmu_root_device = root_device_register("tcm_user");
1637 	if (IS_ERR(tcmu_root_device)) {
1638 		ret = PTR_ERR(tcmu_root_device);
1639 		goto out_free_cache;
1640 	}
1641 
1642 	ret = genl_register_family(&tcmu_genl_family);
1643 	if (ret < 0) {
1644 		goto out_unreg_device;
1645 	}
1646 
1647 	for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
1648 		len += sizeof(struct configfs_attribute *);
1649 	}
1650 	len += sizeof(struct configfs_attribute *) * 2;
1651 
1652 	tcmu_attrs = kzalloc(len, GFP_KERNEL);
1653 	if (!tcmu_attrs) {
1654 		ret = -ENOMEM;
1655 		goto out_unreg_genl;
1656 	}
1657 
1658 	for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
1659 		tcmu_attrs[i] = passthrough_attrib_attrs[i];
1660 	}
1661 	tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
1662 	tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
1663 
1664 	ret = transport_backend_register(&tcmu_ops);
1665 	if (ret)
1666 		goto out_attrs;
1667 
1668 	init_waitqueue_head(&unmap_wait);
1669 	unmap_thread = kthread_run(unmap_thread_fn, NULL, "tcmu_unmap");
1670 	if (IS_ERR(unmap_thread)) {
1671 		ret = PTR_ERR(unmap_thread);
1672 		goto out_unreg_transport;
1673 	}
1674 
1675 	return 0;
1676 
1677 out_unreg_transport:
1678 	target_backend_unregister(&tcmu_ops);
1679 out_attrs:
1680 	kfree(tcmu_attrs);
1681 out_unreg_genl:
1682 	genl_unregister_family(&tcmu_genl_family);
1683 out_unreg_device:
1684 	root_device_unregister(tcmu_root_device);
1685 out_free_cache:
1686 	kmem_cache_destroy(tcmu_cmd_cache);
1687 
1688 	return ret;
1689 }
1690 
1691 static void __exit tcmu_module_exit(void)
1692 {
1693 	kthread_stop(unmap_thread);
1694 	target_backend_unregister(&tcmu_ops);
1695 	kfree(tcmu_attrs);
1696 	genl_unregister_family(&tcmu_genl_family);
1697 	root_device_unregister(tcmu_root_device);
1698 	kmem_cache_destroy(tcmu_cmd_cache);
1699 }
1700 
1701 MODULE_DESCRIPTION("TCM USER subsystem plugin");
1702 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
1703 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
1704 MODULE_LICENSE("GPL");
1705 
1706 module_init(tcmu_module_init);
1707 module_exit(tcmu_module_exit);
1708