1 /*******************************************************************************
2  * Filename:  target_core_rd.c
3  *
4  * This file contains the Storage Engine <-> Ramdisk transport
5  * specific functions.
6  *
7  * (c) Copyright 2003-2013 Datera, Inc.
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24  *
25  ******************************************************************************/
26 
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <scsi/scsi_proto.h>
33 
34 #include <target/target_core_base.h>
35 #include <target/target_core_backend.h>
36 
37 #include "target_core_rd.h"
38 
39 static inline struct rd_dev *RD_DEV(struct se_device *dev)
40 {
41 	return container_of(dev, struct rd_dev, dev);
42 }
43 
44 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
45 {
46 	struct rd_host *rd_host;
47 
48 	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
49 	if (!rd_host) {
50 		pr_err("Unable to allocate memory for struct rd_host\n");
51 		return -ENOMEM;
52 	}
53 
54 	rd_host->rd_host_id = host_id;
55 
56 	hba->hba_ptr = rd_host;
57 
58 	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
59 		" Generic Target Core Stack %s\n", hba->hba_id,
60 		RD_HBA_VERSION, TARGET_CORE_VERSION);
61 
62 	return 0;
63 }
64 
65 static void rd_detach_hba(struct se_hba *hba)
66 {
67 	struct rd_host *rd_host = hba->hba_ptr;
68 
69 	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
70 		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
71 
72 	kfree(rd_host);
73 	hba->hba_ptr = NULL;
74 }
75 
76 static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
77 				 u32 sg_table_count)
78 {
79 	struct page *pg;
80 	struct scatterlist *sg;
81 	u32 i, j, page_count = 0, sg_per_table;
82 
83 	for (i = 0; i < sg_table_count; i++) {
84 		sg = sg_table[i].sg_table;
85 		sg_per_table = sg_table[i].rd_sg_count;
86 
87 		for (j = 0; j < sg_per_table; j++) {
88 			pg = sg_page(&sg[j]);
89 			if (pg) {
90 				__free_page(pg);
91 				page_count++;
92 			}
93 		}
94 		kfree(sg);
95 	}
96 
97 	kfree(sg_table);
98 	return page_count;
99 }
100 
101 static void rd_release_device_space(struct rd_dev *rd_dev)
102 {
103 	u32 page_count;
104 
105 	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
106 		return;
107 
108 	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
109 					  rd_dev->sg_table_count);
110 
111 	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
112 		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
113 		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
114 		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
115 
116 	rd_dev->sg_table_array = NULL;
117 	rd_dev->sg_table_count = 0;
118 }
119 
120 
121 /*	rd_build_device_space():
122  *
123  *
124  */
125 static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
126 				 u32 total_sg_needed, unsigned char init_payload)
127 {
128 	u32 i = 0, j, page_offset = 0, sg_per_table;
129 	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
130 				sizeof(struct scatterlist));
131 	struct page *pg;
132 	struct scatterlist *sg;
133 	unsigned char *p;
134 
135 	while (total_sg_needed) {
136 		unsigned int chain_entry = 0;
137 
138 		sg_per_table = (total_sg_needed > max_sg_per_table) ?
139 			max_sg_per_table : total_sg_needed;
140 
141 #ifdef CONFIG_ARCH_HAS_SG_CHAIN
142 
143 		/*
144 		 * Reserve extra element for chain entry
145 		 */
146 		if (sg_per_table < total_sg_needed)
147 			chain_entry = 1;
148 
149 #endif /* CONFIG_ARCH_HAS_SG_CHAIN */
150 
151 		sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
152 				GFP_KERNEL);
153 		if (!sg) {
154 			pr_err("Unable to allocate scatterlist array"
155 				" for struct rd_dev\n");
156 			return -ENOMEM;
157 		}
158 
159 		sg_init_table(sg, sg_per_table + chain_entry);
160 
161 #ifdef CONFIG_ARCH_HAS_SG_CHAIN
162 
163 		if (i > 0) {
164 			sg_chain(sg_table[i - 1].sg_table,
165 				 max_sg_per_table + 1, sg);
166 		}
167 
168 #endif /* CONFIG_ARCH_HAS_SG_CHAIN */
169 
170 		sg_table[i].sg_table = sg;
171 		sg_table[i].rd_sg_count = sg_per_table;
172 		sg_table[i].page_start_offset = page_offset;
173 		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
174 						- 1;
175 
176 		for (j = 0; j < sg_per_table; j++) {
177 			pg = alloc_pages(GFP_KERNEL, 0);
178 			if (!pg) {
179 				pr_err("Unable to allocate scatterlist"
180 					" pages for struct rd_dev_sg_table\n");
181 				return -ENOMEM;
182 			}
183 			sg_assign_page(&sg[j], pg);
184 			sg[j].length = PAGE_SIZE;
185 
186 			p = kmap(pg);
187 			memset(p, init_payload, PAGE_SIZE);
188 			kunmap(pg);
189 		}
190 
191 		page_offset += sg_per_table;
192 		total_sg_needed -= sg_per_table;
193 	}
194 
195 	return 0;
196 }
197 
198 static int rd_build_device_space(struct rd_dev *rd_dev)
199 {
200 	struct rd_dev_sg_table *sg_table;
201 	u32 sg_tables, total_sg_needed;
202 	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
203 				sizeof(struct scatterlist));
204 	int rc;
205 
206 	if (rd_dev->rd_page_count <= 0) {
207 		pr_err("Illegal page count: %u for Ramdisk device\n",
208 		       rd_dev->rd_page_count);
209 		return -EINVAL;
210 	}
211 
212 	/* Don't need backing pages for NULLIO */
213 	if (rd_dev->rd_flags & RDF_NULLIO)
214 		return 0;
215 
216 	total_sg_needed = rd_dev->rd_page_count;
217 
218 	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
219 
220 	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
221 	if (!sg_table) {
222 		pr_err("Unable to allocate memory for Ramdisk"
223 		       " scatterlist tables\n");
224 		return -ENOMEM;
225 	}
226 
227 	rd_dev->sg_table_array = sg_table;
228 	rd_dev->sg_table_count = sg_tables;
229 
230 	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
231 	if (rc)
232 		return rc;
233 
234 	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
235 		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
236 		 rd_dev->rd_dev_id, rd_dev->rd_page_count,
237 		 rd_dev->sg_table_count);
238 
239 	return 0;
240 }
241 
242 static void rd_release_prot_space(struct rd_dev *rd_dev)
243 {
244 	u32 page_count;
245 
246 	if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
247 		return;
248 
249 	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
250 					  rd_dev->sg_prot_count);
251 
252 	pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
253 		 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
254 		 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
255 		 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
256 
257 	rd_dev->sg_prot_array = NULL;
258 	rd_dev->sg_prot_count = 0;
259 }
260 
261 static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
262 {
263 	struct rd_dev_sg_table *sg_table;
264 	u32 total_sg_needed, sg_tables;
265 	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
266 				sizeof(struct scatterlist));
267 	int rc;
268 
269 	if (rd_dev->rd_flags & RDF_NULLIO)
270 		return 0;
271 	/*
272 	 * prot_length=8byte dif data
273 	 * tot sg needed = rd_page_count * (PGSZ/block_size) *
274 	 * 		   (prot_length/block_size) + pad
275 	 * PGSZ canceled each other.
276 	 */
277 	total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
278 
279 	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
280 
281 	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
282 	if (!sg_table) {
283 		pr_err("Unable to allocate memory for Ramdisk protection"
284 		       " scatterlist tables\n");
285 		return -ENOMEM;
286 	}
287 
288 	rd_dev->sg_prot_array = sg_table;
289 	rd_dev->sg_prot_count = sg_tables;
290 
291 	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
292 	if (rc)
293 		return rc;
294 
295 	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
296 		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
297 		 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
298 
299 	return 0;
300 }
301 
302 static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
303 {
304 	struct rd_dev *rd_dev;
305 	struct rd_host *rd_host = hba->hba_ptr;
306 
307 	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
308 	if (!rd_dev) {
309 		pr_err("Unable to allocate memory for struct rd_dev\n");
310 		return NULL;
311 	}
312 
313 	rd_dev->rd_host = rd_host;
314 
315 	return &rd_dev->dev;
316 }
317 
318 static int rd_configure_device(struct se_device *dev)
319 {
320 	struct rd_dev *rd_dev = RD_DEV(dev);
321 	struct rd_host *rd_host = dev->se_hba->hba_ptr;
322 	int ret;
323 
324 	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
325 		pr_debug("Missing rd_pages= parameter\n");
326 		return -EINVAL;
327 	}
328 
329 	ret = rd_build_device_space(rd_dev);
330 	if (ret < 0)
331 		goto fail;
332 
333 	dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
334 	dev->dev_attrib.hw_max_sectors = UINT_MAX;
335 	dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
336 	dev->dev_attrib.is_nonrot = 1;
337 
338 	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
339 
340 	pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
341 		" %u pages in %u tables, %lu total bytes\n",
342 		rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
343 		rd_dev->sg_table_count,
344 		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
345 
346 	return 0;
347 
348 fail:
349 	rd_release_device_space(rd_dev);
350 	return ret;
351 }
352 
353 static void rd_dev_call_rcu(struct rcu_head *p)
354 {
355 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
356 	struct rd_dev *rd_dev = RD_DEV(dev);
357 
358 	kfree(rd_dev);
359 }
360 
361 static void rd_free_device(struct se_device *dev)
362 {
363 	struct rd_dev *rd_dev = RD_DEV(dev);
364 
365 	rd_release_device_space(rd_dev);
366 	call_rcu(&dev->rcu_head, rd_dev_call_rcu);
367 }
368 
369 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
370 {
371 	struct rd_dev_sg_table *sg_table;
372 	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
373 				sizeof(struct scatterlist));
374 
375 	i = page / sg_per_table;
376 	if (i < rd_dev->sg_table_count) {
377 		sg_table = &rd_dev->sg_table_array[i];
378 		if ((sg_table->page_start_offset <= page) &&
379 		    (sg_table->page_end_offset >= page))
380 			return sg_table;
381 	}
382 
383 	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
384 			page);
385 
386 	return NULL;
387 }
388 
389 static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
390 {
391 	struct rd_dev_sg_table *sg_table;
392 	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
393 				sizeof(struct scatterlist));
394 
395 	i = page / sg_per_table;
396 	if (i < rd_dev->sg_prot_count) {
397 		sg_table = &rd_dev->sg_prot_array[i];
398 		if ((sg_table->page_start_offset <= page) &&
399 		     (sg_table->page_end_offset >= page))
400 			return sg_table;
401 	}
402 
403 	pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
404 			page);
405 
406 	return NULL;
407 }
408 
409 static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
410 {
411 	struct se_device *se_dev = cmd->se_dev;
412 	struct rd_dev *dev = RD_DEV(se_dev);
413 	struct rd_dev_sg_table *prot_table;
414 	bool need_to_release = false;
415 	struct scatterlist *prot_sg;
416 	u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
417 	u32 prot_offset, prot_page;
418 	u32 prot_npages __maybe_unused;
419 	u64 tmp;
420 	sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
421 
422 	tmp = cmd->t_task_lba * se_dev->prot_length;
423 	prot_offset = do_div(tmp, PAGE_SIZE);
424 	prot_page = tmp;
425 
426 	prot_table = rd_get_prot_table(dev, prot_page);
427 	if (!prot_table)
428 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
429 
430 	prot_sg = &prot_table->sg_table[prot_page -
431 					prot_table->page_start_offset];
432 
433 #ifndef CONFIG_ARCH_HAS_SG_CHAIN
434 
435 	prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
436 				   PAGE_SIZE);
437 
438 	/*
439 	 * Allocate temporaly contiguous scatterlist entries if prot pages
440 	 * straddles multiple scatterlist tables.
441 	 */
442 	if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
443 		int i;
444 
445 		prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
446 		if (!prot_sg)
447 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
448 
449 		need_to_release = true;
450 		sg_init_table(prot_sg, prot_npages);
451 
452 		for (i = 0; i < prot_npages; i++) {
453 			if (prot_page + i > prot_table->page_end_offset) {
454 				prot_table = rd_get_prot_table(dev,
455 								prot_page + i);
456 				if (!prot_table) {
457 					kfree(prot_sg);
458 					return rc;
459 				}
460 				sg_unmark_end(&prot_sg[i - 1]);
461 			}
462 			prot_sg[i] = prot_table->sg_table[prot_page + i -
463 						prot_table->page_start_offset];
464 		}
465 	}
466 
467 #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
468 
469 	if (is_read)
470 		rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
471 				    prot_sg, prot_offset);
472 	else
473 		rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
474 				    cmd->t_prot_sg, 0);
475 
476 	if (!rc)
477 		sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
478 
479 	if (need_to_release)
480 		kfree(prot_sg);
481 
482 	return rc;
483 }
484 
485 static sense_reason_t
486 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
487 	      enum dma_data_direction data_direction)
488 {
489 	struct se_device *se_dev = cmd->se_dev;
490 	struct rd_dev *dev = RD_DEV(se_dev);
491 	struct rd_dev_sg_table *table;
492 	struct scatterlist *rd_sg;
493 	struct sg_mapping_iter m;
494 	u32 rd_offset;
495 	u32 rd_size;
496 	u32 rd_page;
497 	u32 src_len;
498 	u64 tmp;
499 	sense_reason_t rc;
500 
501 	if (dev->rd_flags & RDF_NULLIO) {
502 		target_complete_cmd(cmd, SAM_STAT_GOOD);
503 		return 0;
504 	}
505 
506 	tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
507 	rd_offset = do_div(tmp, PAGE_SIZE);
508 	rd_page = tmp;
509 	rd_size = cmd->data_length;
510 
511 	table = rd_get_sg_table(dev, rd_page);
512 	if (!table)
513 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
514 
515 	rd_sg = &table->sg_table[rd_page - table->page_start_offset];
516 
517 	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
518 			dev->rd_dev_id,
519 			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
520 			cmd->t_task_lba, rd_size, rd_page, rd_offset);
521 
522 	if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
523 	    data_direction == DMA_TO_DEVICE) {
524 		rc = rd_do_prot_rw(cmd, false);
525 		if (rc)
526 			return rc;
527 	}
528 
529 	src_len = PAGE_SIZE - rd_offset;
530 	sg_miter_start(&m, sgl, sgl_nents,
531 			data_direction == DMA_FROM_DEVICE ?
532 				SG_MITER_TO_SG : SG_MITER_FROM_SG);
533 	while (rd_size) {
534 		u32 len;
535 		void *rd_addr;
536 
537 		sg_miter_next(&m);
538 		if (!(u32)m.length) {
539 			pr_debug("RD[%u]: invalid sgl %p len %zu\n",
540 				 dev->rd_dev_id, m.addr, m.length);
541 			sg_miter_stop(&m);
542 			return TCM_INCORRECT_AMOUNT_OF_DATA;
543 		}
544 		len = min((u32)m.length, src_len);
545 		if (len > rd_size) {
546 			pr_debug("RD[%u]: size underrun page %d offset %d "
547 				 "size %d\n", dev->rd_dev_id,
548 				 rd_page, rd_offset, rd_size);
549 			len = rd_size;
550 		}
551 		m.consumed = len;
552 
553 		rd_addr = sg_virt(rd_sg) + rd_offset;
554 
555 		if (data_direction == DMA_FROM_DEVICE)
556 			memcpy(m.addr, rd_addr, len);
557 		else
558 			memcpy(rd_addr, m.addr, len);
559 
560 		rd_size -= len;
561 		if (!rd_size)
562 			continue;
563 
564 		src_len -= len;
565 		if (src_len) {
566 			rd_offset += len;
567 			continue;
568 		}
569 
570 		/* rd page completed, next one please */
571 		rd_page++;
572 		rd_offset = 0;
573 		src_len = PAGE_SIZE;
574 		if (rd_page <= table->page_end_offset) {
575 			rd_sg++;
576 			continue;
577 		}
578 
579 		table = rd_get_sg_table(dev, rd_page);
580 		if (!table) {
581 			sg_miter_stop(&m);
582 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
583 		}
584 
585 		/* since we increment, the first sg entry is correct */
586 		rd_sg = table->sg_table;
587 	}
588 	sg_miter_stop(&m);
589 
590 	if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
591 	    data_direction == DMA_FROM_DEVICE) {
592 		rc = rd_do_prot_rw(cmd, true);
593 		if (rc)
594 			return rc;
595 	}
596 
597 	target_complete_cmd(cmd, SAM_STAT_GOOD);
598 	return 0;
599 }
600 
601 enum {
602 	Opt_rd_pages, Opt_rd_nullio, Opt_err
603 };
604 
605 static match_table_t tokens = {
606 	{Opt_rd_pages, "rd_pages=%d"},
607 	{Opt_rd_nullio, "rd_nullio=%d"},
608 	{Opt_err, NULL}
609 };
610 
611 static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
612 		const char *page, ssize_t count)
613 {
614 	struct rd_dev *rd_dev = RD_DEV(dev);
615 	char *orig, *ptr, *opts;
616 	substring_t args[MAX_OPT_ARGS];
617 	int ret = 0, arg, token;
618 
619 	opts = kstrdup(page, GFP_KERNEL);
620 	if (!opts)
621 		return -ENOMEM;
622 
623 	orig = opts;
624 
625 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
626 		if (!*ptr)
627 			continue;
628 
629 		token = match_token(ptr, tokens, args);
630 		switch (token) {
631 		case Opt_rd_pages:
632 			match_int(args, &arg);
633 			rd_dev->rd_page_count = arg;
634 			pr_debug("RAMDISK: Referencing Page"
635 				" Count: %u\n", rd_dev->rd_page_count);
636 			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
637 			break;
638 		case Opt_rd_nullio:
639 			match_int(args, &arg);
640 			if (arg != 1)
641 				break;
642 
643 			pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
644 			rd_dev->rd_flags |= RDF_NULLIO;
645 			break;
646 		default:
647 			break;
648 		}
649 	}
650 
651 	kfree(orig);
652 	return (!ret) ? count : ret;
653 }
654 
655 static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
656 {
657 	struct rd_dev *rd_dev = RD_DEV(dev);
658 
659 	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
660 			rd_dev->rd_dev_id);
661 	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
662 			"  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
663 			PAGE_SIZE, rd_dev->sg_table_count,
664 			!!(rd_dev->rd_flags & RDF_NULLIO));
665 	return bl;
666 }
667 
668 static sector_t rd_get_blocks(struct se_device *dev)
669 {
670 	struct rd_dev *rd_dev = RD_DEV(dev);
671 
672 	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
673 			dev->dev_attrib.block_size) - 1;
674 
675 	return blocks_long;
676 }
677 
678 static int rd_init_prot(struct se_device *dev)
679 {
680 	struct rd_dev *rd_dev = RD_DEV(dev);
681 
682         if (!dev->dev_attrib.pi_prot_type)
683 		return 0;
684 
685 	return rd_build_prot_space(rd_dev, dev->prot_length,
686 				   dev->dev_attrib.block_size);
687 }
688 
689 static void rd_free_prot(struct se_device *dev)
690 {
691 	struct rd_dev *rd_dev = RD_DEV(dev);
692 
693 	rd_release_prot_space(rd_dev);
694 }
695 
696 static struct sbc_ops rd_sbc_ops = {
697 	.execute_rw		= rd_execute_rw,
698 };
699 
700 static sense_reason_t
701 rd_parse_cdb(struct se_cmd *cmd)
702 {
703 	return sbc_parse_cdb(cmd, &rd_sbc_ops);
704 }
705 
706 static const struct target_backend_ops rd_mcp_ops = {
707 	.name			= "rd_mcp",
708 	.inquiry_prod		= "RAMDISK-MCP",
709 	.inquiry_rev		= RD_MCP_VERSION,
710 	.attach_hba		= rd_attach_hba,
711 	.detach_hba		= rd_detach_hba,
712 	.alloc_device		= rd_alloc_device,
713 	.configure_device	= rd_configure_device,
714 	.free_device		= rd_free_device,
715 	.parse_cdb		= rd_parse_cdb,
716 	.set_configfs_dev_params = rd_set_configfs_dev_params,
717 	.show_configfs_dev_params = rd_show_configfs_dev_params,
718 	.get_device_type	= sbc_get_device_type,
719 	.get_blocks		= rd_get_blocks,
720 	.init_prot		= rd_init_prot,
721 	.free_prot		= rd_free_prot,
722 	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
723 };
724 
725 int __init rd_module_init(void)
726 {
727 	return transport_backend_register(&rd_mcp_ops);
728 }
729 
730 void rd_module_exit(void)
731 {
732 	target_backend_unregister(&rd_mcp_ops);
733 }
734