1 /*******************************************************************************
2  * Filename:  target_core_rd.c
3  *
4  * This file contains the Storage Engine <-> Ramdisk transport
5  * specific functions.
6  *
7  * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9  * Copyright (c) 2007-2010 Rising Tide Systems
10  * Copyright (c) 2008-2010 Linux-iSCSI.org
11  *
12  * Nicholas A. Bellinger <nab@kernel.org>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2 of the License, or
17  * (at your option) any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27  *
28  ******************************************************************************/
29 
30 #include <linux/version.h>
31 #include <linux/string.h>
32 #include <linux/parser.h>
33 #include <linux/timer.h>
34 #include <linux/blkdev.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_host.h>
39 
40 #include <target/target_core_base.h>
41 #include <target/target_core_device.h>
42 #include <target/target_core_transport.h>
43 #include <target/target_core_fabric_ops.h>
44 
45 #include "target_core_rd.h"
46 
47 static struct se_subsystem_api rd_dr_template;
48 static struct se_subsystem_api rd_mcp_template;
49 
50 /* #define DEBUG_RAMDISK_MCP */
51 /* #define DEBUG_RAMDISK_DR */
52 
53 /*	rd_attach_hba(): (Part of se_subsystem_api_t template)
54  *
55  *
56  */
57 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
58 {
59 	struct rd_host *rd_host;
60 
61 	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
62 	if (!(rd_host)) {
63 		printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
64 		return -ENOMEM;
65 	}
66 
67 	rd_host->rd_host_id = host_id;
68 
69 	atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
70 	atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
71 	hba->hba_ptr = (void *) rd_host;
72 
73 	printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
74 		" Generic Target Core Stack %s\n", hba->hba_id,
75 		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
76 	printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
77 		" Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
78 		rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
79 		RD_MAX_SECTORS);
80 
81 	return 0;
82 }
83 
84 static void rd_detach_hba(struct se_hba *hba)
85 {
86 	struct rd_host *rd_host = hba->hba_ptr;
87 
88 	printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
89 		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
90 
91 	kfree(rd_host);
92 	hba->hba_ptr = NULL;
93 }
94 
95 /*	rd_release_device_space():
96  *
97  *
98  */
99 static void rd_release_device_space(struct rd_dev *rd_dev)
100 {
101 	u32 i, j, page_count = 0, sg_per_table;
102 	struct rd_dev_sg_table *sg_table;
103 	struct page *pg;
104 	struct scatterlist *sg;
105 
106 	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
107 		return;
108 
109 	sg_table = rd_dev->sg_table_array;
110 
111 	for (i = 0; i < rd_dev->sg_table_count; i++) {
112 		sg = sg_table[i].sg_table;
113 		sg_per_table = sg_table[i].rd_sg_count;
114 
115 		for (j = 0; j < sg_per_table; j++) {
116 			pg = sg_page(&sg[j]);
117 			if ((pg)) {
118 				__free_page(pg);
119 				page_count++;
120 			}
121 		}
122 
123 		kfree(sg);
124 	}
125 
126 	printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
127 		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
128 		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
129 		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
130 
131 	kfree(sg_table);
132 	rd_dev->sg_table_array = NULL;
133 	rd_dev->sg_table_count = 0;
134 }
135 
136 
137 /*	rd_build_device_space():
138  *
139  *
140  */
141 static int rd_build_device_space(struct rd_dev *rd_dev)
142 {
143 	u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
144 	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
145 				sizeof(struct scatterlist));
146 	struct rd_dev_sg_table *sg_table;
147 	struct page *pg;
148 	struct scatterlist *sg;
149 
150 	if (rd_dev->rd_page_count <= 0) {
151 		printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
152 			rd_dev->rd_page_count);
153 		return -1;
154 	}
155 	total_sg_needed = rd_dev->rd_page_count;
156 
157 	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
158 
159 	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
160 	if (!(sg_table)) {
161 		printk(KERN_ERR "Unable to allocate memory for Ramdisk"
162 			" scatterlist tables\n");
163 		return -1;
164 	}
165 
166 	rd_dev->sg_table_array = sg_table;
167 	rd_dev->sg_table_count = sg_tables;
168 
169 	while (total_sg_needed) {
170 		sg_per_table = (total_sg_needed > max_sg_per_table) ?
171 			max_sg_per_table : total_sg_needed;
172 
173 		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
174 				GFP_KERNEL);
175 		if (!(sg)) {
176 			printk(KERN_ERR "Unable to allocate scatterlist array"
177 				" for struct rd_dev\n");
178 			return -1;
179 		}
180 
181 		sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
182 
183 		sg_table[i].sg_table = sg;
184 		sg_table[i].rd_sg_count = sg_per_table;
185 		sg_table[i].page_start_offset = page_offset;
186 		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
187 						- 1;
188 
189 		for (j = 0; j < sg_per_table; j++) {
190 			pg = alloc_pages(GFP_KERNEL, 0);
191 			if (!(pg)) {
192 				printk(KERN_ERR "Unable to allocate scatterlist"
193 					" pages for struct rd_dev_sg_table\n");
194 				return -1;
195 			}
196 			sg_assign_page(&sg[j], pg);
197 			sg[j].length = PAGE_SIZE;
198 		}
199 
200 		page_offset += sg_per_table;
201 		total_sg_needed -= sg_per_table;
202 	}
203 
204 	printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
205 		" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
206 		rd_dev->rd_dev_id, rd_dev->rd_page_count,
207 		rd_dev->sg_table_count);
208 
209 	return 0;
210 }
211 
212 static void *rd_allocate_virtdevice(
213 	struct se_hba *hba,
214 	const char *name,
215 	int rd_direct)
216 {
217 	struct rd_dev *rd_dev;
218 	struct rd_host *rd_host = hba->hba_ptr;
219 
220 	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
221 	if (!(rd_dev)) {
222 		printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
223 		return NULL;
224 	}
225 
226 	rd_dev->rd_host = rd_host;
227 	rd_dev->rd_direct = rd_direct;
228 
229 	return rd_dev;
230 }
231 
232 static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
233 {
234 	return rd_allocate_virtdevice(hba, name, 1);
235 }
236 
237 static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
238 {
239 	return rd_allocate_virtdevice(hba, name, 0);
240 }
241 
242 /*	rd_create_virtdevice():
243  *
244  *
245  */
246 static struct se_device *rd_create_virtdevice(
247 	struct se_hba *hba,
248 	struct se_subsystem_dev *se_dev,
249 	void *p,
250 	int rd_direct)
251 {
252 	struct se_device *dev;
253 	struct se_dev_limits dev_limits;
254 	struct rd_dev *rd_dev = p;
255 	struct rd_host *rd_host = hba->hba_ptr;
256 	int dev_flags = 0;
257 	char prod[16], rev[4];
258 
259 	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
260 
261 	if (rd_build_device_space(rd_dev) < 0)
262 		goto fail;
263 
264 	snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
265 	snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
266 						RD_MCP_VERSION);
267 
268 	dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
269 	dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
270 	dev_limits.limits.max_sectors = RD_MAX_SECTORS;
271 	dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
272 	dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
273 
274 	dev = transport_add_device_to_core_hba(hba,
275 			(rd_dev->rd_direct) ? &rd_dr_template :
276 			&rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
277 			&dev_limits, prod, rev);
278 	if (!(dev))
279 		goto fail;
280 
281 	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
282 	rd_dev->rd_queue_depth = dev->queue_depth;
283 
284 	printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
285 		" %u pages in %u tables, %lu total bytes\n",
286 		rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
287 		"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
288 		rd_dev->sg_table_count,
289 		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
290 
291 	return dev;
292 
293 fail:
294 	rd_release_device_space(rd_dev);
295 	return NULL;
296 }
297 
298 static struct se_device *rd_DIRECT_create_virtdevice(
299 	struct se_hba *hba,
300 	struct se_subsystem_dev *se_dev,
301 	void *p)
302 {
303 	return rd_create_virtdevice(hba, se_dev, p, 1);
304 }
305 
306 static struct se_device *rd_MEMCPY_create_virtdevice(
307 	struct se_hba *hba,
308 	struct se_subsystem_dev *se_dev,
309 	void *p)
310 {
311 	return rd_create_virtdevice(hba, se_dev, p, 0);
312 }
313 
314 /*	rd_free_device(): (Part of se_subsystem_api_t template)
315  *
316  *
317  */
318 static void rd_free_device(void *p)
319 {
320 	struct rd_dev *rd_dev = p;
321 
322 	rd_release_device_space(rd_dev);
323 	kfree(rd_dev);
324 }
325 
326 static inline struct rd_request *RD_REQ(struct se_task *task)
327 {
328 	return container_of(task, struct rd_request, rd_task);
329 }
330 
331 static struct se_task *
332 rd_alloc_task(struct se_cmd *cmd)
333 {
334 	struct rd_request *rd_req;
335 
336 	rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
337 	if (!rd_req) {
338 		printk(KERN_ERR "Unable to allocate struct rd_request\n");
339 		return NULL;
340 	}
341 	rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
342 
343 	return &rd_req->rd_task;
344 }
345 
346 /*	rd_get_sg_table():
347  *
348  *
349  */
350 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
351 {
352 	u32 i;
353 	struct rd_dev_sg_table *sg_table;
354 
355 	for (i = 0; i < rd_dev->sg_table_count; i++) {
356 		sg_table = &rd_dev->sg_table_array[i];
357 		if ((sg_table->page_start_offset <= page) &&
358 		    (sg_table->page_end_offset >= page))
359 			return sg_table;
360 	}
361 
362 	printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
363 			page);
364 
365 	return NULL;
366 }
367 
368 /*	rd_MEMCPY_read():
369  *
370  *
371  */
372 static int rd_MEMCPY_read(struct rd_request *req)
373 {
374 	struct se_task *task = &req->rd_task;
375 	struct rd_dev *dev = req->rd_dev;
376 	struct rd_dev_sg_table *table;
377 	struct scatterlist *sg_d, *sg_s;
378 	void *dst, *src;
379 	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
380 	u32 length, page_end = 0, table_sg_end;
381 	u32 rd_offset = req->rd_offset;
382 
383 	table = rd_get_sg_table(dev, req->rd_page);
384 	if (!(table))
385 		return -1;
386 
387 	table_sg_end = (table->page_end_offset - req->rd_page);
388 	sg_d = task->task_sg;
389 	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
390 #ifdef DEBUG_RAMDISK_MCP
391 	printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
392 		" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
393 		req->rd_page, req->rd_offset);
394 #endif
395 	src_offset = rd_offset;
396 
397 	while (req->rd_size) {
398 		if ((sg_d[i].length - dst_offset) <
399 		    (sg_s[j].length - src_offset)) {
400 			length = (sg_d[i].length - dst_offset);
401 #ifdef DEBUG_RAMDISK_MCP
402 			printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
403 				" offset: %u sg_s[%d].length: %u\n", i,
404 				&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
405 				sg_s[j].length);
406 			printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
407 				" src_offset: %u\n", length, dst_offset,
408 				src_offset);
409 #endif
410 			if (length > req->rd_size)
411 				length = req->rd_size;
412 
413 			dst = sg_virt(&sg_d[i++]) + dst_offset;
414 			if (!dst)
415 				BUG();
416 
417 			src = sg_virt(&sg_s[j]) + src_offset;
418 			if (!src)
419 				BUG();
420 
421 			dst_offset = 0;
422 			src_offset = length;
423 			page_end = 0;
424 		} else {
425 			length = (sg_s[j].length - src_offset);
426 #ifdef DEBUG_RAMDISK_MCP
427 			printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
428 				" offset: %u sg_s[%d].length: %u\n", i,
429 				&sg_d[i], sg_d[i].length, sg_d[i].offset,
430 				j, sg_s[j].length);
431 			printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
432 				" src_offset: %u\n", length, dst_offset,
433 				src_offset);
434 #endif
435 			if (length > req->rd_size)
436 				length = req->rd_size;
437 
438 			dst = sg_virt(&sg_d[i]) + dst_offset;
439 			if (!dst)
440 				BUG();
441 
442 			if (sg_d[i].length == length) {
443 				i++;
444 				dst_offset = 0;
445 			} else
446 				dst_offset = length;
447 
448 			src = sg_virt(&sg_s[j++]) + src_offset;
449 			if (!src)
450 				BUG();
451 
452 			src_offset = 0;
453 			page_end = 1;
454 		}
455 
456 		memcpy(dst, src, length);
457 
458 #ifdef DEBUG_RAMDISK_MCP
459 		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
460 			" i: %u, j: %u\n", req->rd_page,
461 			(req->rd_size - length), length, i, j);
462 #endif
463 		req->rd_size -= length;
464 		if (!(req->rd_size))
465 			return 0;
466 
467 		if (!page_end)
468 			continue;
469 
470 		if (++req->rd_page <= table->page_end_offset) {
471 #ifdef DEBUG_RAMDISK_MCP
472 			printk(KERN_INFO "page: %u in same page table\n",
473 				req->rd_page);
474 #endif
475 			continue;
476 		}
477 #ifdef DEBUG_RAMDISK_MCP
478 		printk(KERN_INFO "getting new page table for page: %u\n",
479 				req->rd_page);
480 #endif
481 		table = rd_get_sg_table(dev, req->rd_page);
482 		if (!(table))
483 			return -1;
484 
485 		sg_s = &table->sg_table[j = 0];
486 	}
487 
488 	return 0;
489 }
490 
491 /*	rd_MEMCPY_write():
492  *
493  *
494  */
495 static int rd_MEMCPY_write(struct rd_request *req)
496 {
497 	struct se_task *task = &req->rd_task;
498 	struct rd_dev *dev = req->rd_dev;
499 	struct rd_dev_sg_table *table;
500 	struct scatterlist *sg_d, *sg_s;
501 	void *dst, *src;
502 	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
503 	u32 length, page_end = 0, table_sg_end;
504 	u32 rd_offset = req->rd_offset;
505 
506 	table = rd_get_sg_table(dev, req->rd_page);
507 	if (!(table))
508 		return -1;
509 
510 	table_sg_end = (table->page_end_offset - req->rd_page);
511 	sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
512 	sg_s = task->task_sg;
513 #ifdef DEBUG_RAMDISK_MCP
514 	printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
515 		" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
516 		req->rd_page, req->rd_offset);
517 #endif
518 	dst_offset = rd_offset;
519 
520 	while (req->rd_size) {
521 		if ((sg_s[i].length - src_offset) <
522 		    (sg_d[j].length - dst_offset)) {
523 			length = (sg_s[i].length - src_offset);
524 #ifdef DEBUG_RAMDISK_MCP
525 			printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
526 				" offset: %d sg_d[%d].length: %u\n", i,
527 				&sg_s[i], sg_s[i].length, sg_s[i].offset,
528 				j, sg_d[j].length);
529 			printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
530 				" dst_offset: %u\n", length, src_offset,
531 				dst_offset);
532 #endif
533 			if (length > req->rd_size)
534 				length = req->rd_size;
535 
536 			src = sg_virt(&sg_s[i++]) + src_offset;
537 			if (!src)
538 				BUG();
539 
540 			dst = sg_virt(&sg_d[j]) + dst_offset;
541 			if (!dst)
542 				BUG();
543 
544 			src_offset = 0;
545 			dst_offset = length;
546 			page_end = 0;
547 		} else {
548 			length = (sg_d[j].length - dst_offset);
549 #ifdef DEBUG_RAMDISK_MCP
550 			printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
551 				" offset: %d sg_d[%d].length: %u\n", i,
552 				&sg_s[i], sg_s[i].length, sg_s[i].offset,
553 				j, sg_d[j].length);
554 			printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
555 				" dst_offset: %u\n", length, src_offset,
556 				dst_offset);
557 #endif
558 			if (length > req->rd_size)
559 				length = req->rd_size;
560 
561 			src = sg_virt(&sg_s[i]) + src_offset;
562 			if (!src)
563 				BUG();
564 
565 			if (sg_s[i].length == length) {
566 				i++;
567 				src_offset = 0;
568 			} else
569 				src_offset = length;
570 
571 			dst = sg_virt(&sg_d[j++]) + dst_offset;
572 			if (!dst)
573 				BUG();
574 
575 			dst_offset = 0;
576 			page_end = 1;
577 		}
578 
579 		memcpy(dst, src, length);
580 
581 #ifdef DEBUG_RAMDISK_MCP
582 		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
583 			" i: %u, j: %u\n", req->rd_page,
584 			(req->rd_size - length), length, i, j);
585 #endif
586 		req->rd_size -= length;
587 		if (!(req->rd_size))
588 			return 0;
589 
590 		if (!page_end)
591 			continue;
592 
593 		if (++req->rd_page <= table->page_end_offset) {
594 #ifdef DEBUG_RAMDISK_MCP
595 			printk(KERN_INFO "page: %u in same page table\n",
596 				req->rd_page);
597 #endif
598 			continue;
599 		}
600 #ifdef DEBUG_RAMDISK_MCP
601 		printk(KERN_INFO "getting new page table for page: %u\n",
602 				req->rd_page);
603 #endif
604 		table = rd_get_sg_table(dev, req->rd_page);
605 		if (!(table))
606 			return -1;
607 
608 		sg_d = &table->sg_table[j = 0];
609 	}
610 
611 	return 0;
612 }
613 
614 /*	rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
615  *
616  *
617  */
618 static int rd_MEMCPY_do_task(struct se_task *task)
619 {
620 	struct se_device *dev = task->se_dev;
621 	struct rd_request *req = RD_REQ(task);
622 	unsigned long long lba;
623 	int ret;
624 
625 	req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
626 	lba = task->task_lba;
627 	req->rd_offset = (do_div(lba,
628 			  (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
629 			   DEV_ATTRIB(dev)->block_size;
630 	req->rd_size = task->task_size;
631 
632 	if (task->task_data_direction == DMA_FROM_DEVICE)
633 		ret = rd_MEMCPY_read(req);
634 	else
635 		ret = rd_MEMCPY_write(req);
636 
637 	if (ret != 0)
638 		return ret;
639 
640 	task->task_scsi_status = GOOD;
641 	transport_complete_task(task, 1);
642 
643 	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
644 }
645 
646 /*	rd_DIRECT_with_offset():
647  *
648  *
649  */
650 static int rd_DIRECT_with_offset(
651 	struct se_task *task,
652 	struct list_head *se_mem_list,
653 	u32 *se_mem_cnt,
654 	u32 *task_offset)
655 {
656 	struct rd_request *req = RD_REQ(task);
657 	struct rd_dev *dev = req->rd_dev;
658 	struct rd_dev_sg_table *table;
659 	struct se_mem *se_mem;
660 	struct scatterlist *sg_s;
661 	u32 j = 0, set_offset = 1;
662 	u32 get_next_table = 0, offset_length, table_sg_end;
663 
664 	table = rd_get_sg_table(dev, req->rd_page);
665 	if (!(table))
666 		return -1;
667 
668 	table_sg_end = (table->page_end_offset - req->rd_page);
669 	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
670 #ifdef DEBUG_RAMDISK_DR
671 	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
672 		(task->task_data_direction == DMA_TO_DEVICE) ?
673 			"Write" : "Read",
674 		task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
675 #endif
676 	while (req->rd_size) {
677 		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
678 		if (!(se_mem)) {
679 			printk(KERN_ERR "Unable to allocate struct se_mem\n");
680 			return -1;
681 		}
682 		INIT_LIST_HEAD(&se_mem->se_list);
683 
684 		if (set_offset) {
685 			offset_length = sg_s[j].length - req->rd_offset;
686 			if (offset_length > req->rd_size)
687 				offset_length = req->rd_size;
688 
689 			se_mem->se_page = sg_page(&sg_s[j++]);
690 			se_mem->se_off = req->rd_offset;
691 			se_mem->se_len = offset_length;
692 
693 			set_offset = 0;
694 			get_next_table = (j > table_sg_end);
695 			goto check_eot;
696 		}
697 
698 		offset_length = (req->rd_size < req->rd_offset) ?
699 			req->rd_size : req->rd_offset;
700 
701 		se_mem->se_page = sg_page(&sg_s[j]);
702 		se_mem->se_len = offset_length;
703 
704 		set_offset = 1;
705 
706 check_eot:
707 #ifdef DEBUG_RAMDISK_DR
708 		printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
709 			" se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
710 			req->rd_page, req->rd_size, offset_length, j, se_mem,
711 			se_mem->se_page, se_mem->se_off, se_mem->se_len);
712 #endif
713 		list_add_tail(&se_mem->se_list, se_mem_list);
714 		(*se_mem_cnt)++;
715 
716 		req->rd_size -= offset_length;
717 		if (!(req->rd_size))
718 			goto out;
719 
720 		if (!set_offset && !get_next_table)
721 			continue;
722 
723 		if (++req->rd_page <= table->page_end_offset) {
724 #ifdef DEBUG_RAMDISK_DR
725 			printk(KERN_INFO "page: %u in same page table\n",
726 					req->rd_page);
727 #endif
728 			continue;
729 		}
730 #ifdef DEBUG_RAMDISK_DR
731 		printk(KERN_INFO "getting new page table for page: %u\n",
732 				req->rd_page);
733 #endif
734 		table = rd_get_sg_table(dev, req->rd_page);
735 		if (!(table))
736 			return -1;
737 
738 		sg_s = &table->sg_table[j = 0];
739 	}
740 
741 out:
742 	T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
743 #ifdef DEBUG_RAMDISK_DR
744 	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
745 			*se_mem_cnt);
746 #endif
747 	return 0;
748 }
749 
750 /*	rd_DIRECT_without_offset():
751  *
752  *
753  */
754 static int rd_DIRECT_without_offset(
755 	struct se_task *task,
756 	struct list_head *se_mem_list,
757 	u32 *se_mem_cnt,
758 	u32 *task_offset)
759 {
760 	struct rd_request *req = RD_REQ(task);
761 	struct rd_dev *dev = req->rd_dev;
762 	struct rd_dev_sg_table *table;
763 	struct se_mem *se_mem;
764 	struct scatterlist *sg_s;
765 	u32 length, j = 0;
766 
767 	table = rd_get_sg_table(dev, req->rd_page);
768 	if (!(table))
769 		return -1;
770 
771 	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
772 #ifdef DEBUG_RAMDISK_DR
773 	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
774 		(task->task_data_direction == DMA_TO_DEVICE) ?
775 			"Write" : "Read",
776 		task->task_lba, req->rd_size, req->rd_page);
777 #endif
778 	while (req->rd_size) {
779 		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
780 		if (!(se_mem)) {
781 			printk(KERN_ERR "Unable to allocate struct se_mem\n");
782 			return -1;
783 		}
784 		INIT_LIST_HEAD(&se_mem->se_list);
785 
786 		length = (req->rd_size < sg_s[j].length) ?
787 			req->rd_size : sg_s[j].length;
788 
789 		se_mem->se_page = sg_page(&sg_s[j++]);
790 		se_mem->se_len = length;
791 
792 #ifdef DEBUG_RAMDISK_DR
793 		printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
794 			" se_page: %p se_off: %u se_len: %u\n", req->rd_page,
795 			req->rd_size, j, se_mem, se_mem->se_page,
796 			se_mem->se_off, se_mem->se_len);
797 #endif
798 		list_add_tail(&se_mem->se_list, se_mem_list);
799 		(*se_mem_cnt)++;
800 
801 		req->rd_size -= length;
802 		if (!(req->rd_size))
803 			goto out;
804 
805 		if (++req->rd_page <= table->page_end_offset) {
806 #ifdef DEBUG_RAMDISK_DR
807 			printk("page: %u in same page table\n",
808 				req->rd_page);
809 #endif
810 			continue;
811 		}
812 #ifdef DEBUG_RAMDISK_DR
813 		printk(KERN_INFO "getting new page table for page: %u\n",
814 				req->rd_page);
815 #endif
816 		table = rd_get_sg_table(dev, req->rd_page);
817 		if (!(table))
818 			return -1;
819 
820 		sg_s = &table->sg_table[j = 0];
821 	}
822 
823 out:
824 	T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
825 #ifdef DEBUG_RAMDISK_DR
826 	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
827 			*se_mem_cnt);
828 #endif
829 	return 0;
830 }
831 
832 /*	rd_DIRECT_do_se_mem_map():
833  *
834  *
835  */
836 static int rd_DIRECT_do_se_mem_map(
837 	struct se_task *task,
838 	struct list_head *se_mem_list,
839 	void *in_mem,
840 	struct se_mem *in_se_mem,
841 	struct se_mem **out_se_mem,
842 	u32 *se_mem_cnt,
843 	u32 *task_offset_in)
844 {
845 	struct se_cmd *cmd = task->task_se_cmd;
846 	struct rd_request *req = RD_REQ(task);
847 	u32 task_offset = *task_offset_in;
848 	unsigned long long lba;
849 	int ret;
850 
851 	req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
852 			PAGE_SIZE);
853 	lba = task->task_lba;
854 	req->rd_offset = (do_div(lba,
855 			  (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
856 			   DEV_ATTRIB(task->se_dev)->block_size;
857 	req->rd_size = task->task_size;
858 
859 	if (req->rd_offset)
860 		ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
861 				task_offset_in);
862 	else
863 		ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
864 				task_offset_in);
865 
866 	if (ret < 0)
867 		return ret;
868 
869 	if (CMD_TFO(cmd)->task_sg_chaining == 0)
870 		return 0;
871 	/*
872 	 * Currently prevent writers from multiple HW fabrics doing
873 	 * pci_map_sg() to RD_DR's internal scatterlist memory.
874 	 */
875 	if (cmd->data_direction == DMA_TO_DEVICE) {
876 		printk(KERN_ERR "DMA_TO_DEVICE not supported for"
877 				" RAMDISK_DR with task_sg_chaining=1\n");
878 		return -1;
879 	}
880 	/*
881 	 * Special case for if task_sg_chaining is enabled, then
882 	 * we setup struct se_task->task_sg[], as it will be used by
883 	 * transport_do_task_sg_chain() for creating chainged SGLs
884 	 * across multiple struct se_task->task_sg[].
885 	 */
886 	if (!(transport_calc_sg_num(task,
887 			list_entry(T_TASK(cmd)->t_mem_list->next,
888 				   struct se_mem, se_list),
889 			task_offset)))
890 		return -1;
891 
892 	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
893 			list_entry(T_TASK(cmd)->t_mem_list->next,
894 				   struct se_mem, se_list),
895 			out_se_mem, se_mem_cnt, task_offset_in);
896 }
897 
898 /*	rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
899  *
900  *
901  */
902 static int rd_DIRECT_do_task(struct se_task *task)
903 {
904 	/*
905 	 * At this point the locally allocated RD tables have been mapped
906 	 * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
907 	 */
908 	task->task_scsi_status = GOOD;
909 	transport_complete_task(task, 1);
910 
911 	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
912 }
913 
914 /*	rd_free_task(): (Part of se_subsystem_api_t template)
915  *
916  *
917  */
918 static void rd_free_task(struct se_task *task)
919 {
920 	kfree(RD_REQ(task));
921 }
922 
923 enum {
924 	Opt_rd_pages, Opt_err
925 };
926 
927 static match_table_t tokens = {
928 	{Opt_rd_pages, "rd_pages=%d"},
929 	{Opt_err, NULL}
930 };
931 
932 static ssize_t rd_set_configfs_dev_params(
933 	struct se_hba *hba,
934 	struct se_subsystem_dev *se_dev,
935 	const char *page,
936 	ssize_t count)
937 {
938 	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
939 	char *orig, *ptr, *opts;
940 	substring_t args[MAX_OPT_ARGS];
941 	int ret = 0, arg, token;
942 
943 	opts = kstrdup(page, GFP_KERNEL);
944 	if (!opts)
945 		return -ENOMEM;
946 
947 	orig = opts;
948 
949 	while ((ptr = strsep(&opts, ",")) != NULL) {
950 		if (!*ptr)
951 			continue;
952 
953 		token = match_token(ptr, tokens, args);
954 		switch (token) {
955 		case Opt_rd_pages:
956 			match_int(args, &arg);
957 			rd_dev->rd_page_count = arg;
958 			printk(KERN_INFO "RAMDISK: Referencing Page"
959 				" Count: %u\n", rd_dev->rd_page_count);
960 			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
961 			break;
962 		default:
963 			break;
964 		}
965 	}
966 
967 	kfree(orig);
968 	return (!ret) ? count : ret;
969 }
970 
971 static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
972 {
973 	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
974 
975 	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
976 		printk(KERN_INFO "Missing rd_pages= parameter\n");
977 		return -1;
978 	}
979 
980 	return 0;
981 }
982 
983 static ssize_t rd_show_configfs_dev_params(
984 	struct se_hba *hba,
985 	struct se_subsystem_dev *se_dev,
986 	char *b)
987 {
988 	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
989 	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: %s\n",
990 			rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
991 			"rd_direct" : "rd_mcp");
992 	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
993 			"  SG_table_count: %u\n", rd_dev->rd_page_count,
994 			PAGE_SIZE, rd_dev->sg_table_count);
995 	return bl;
996 }
997 
998 /*	rd_get_cdb(): (Part of se_subsystem_api_t template)
999  *
1000  *
1001  */
1002 static unsigned char *rd_get_cdb(struct se_task *task)
1003 {
1004 	struct rd_request *req = RD_REQ(task);
1005 
1006 	return req->rd_scsi_cdb;
1007 }
1008 
1009 static u32 rd_get_device_rev(struct se_device *dev)
1010 {
1011 	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
1012 }
1013 
1014 static u32 rd_get_device_type(struct se_device *dev)
1015 {
1016 	return TYPE_DISK;
1017 }
1018 
1019 static sector_t rd_get_blocks(struct se_device *dev)
1020 {
1021 	struct rd_dev *rd_dev = dev->dev_ptr;
1022 	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
1023 			DEV_ATTRIB(dev)->block_size) - 1;
1024 
1025 	return blocks_long;
1026 }
1027 
1028 static struct se_subsystem_api rd_dr_template = {
1029 	.name			= "rd_dr",
1030 	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
1031 	.attach_hba		= rd_attach_hba,
1032 	.detach_hba		= rd_detach_hba,
1033 	.allocate_virtdevice	= rd_DIRECT_allocate_virtdevice,
1034 	.create_virtdevice	= rd_DIRECT_create_virtdevice,
1035 	.free_device		= rd_free_device,
1036 	.alloc_task		= rd_alloc_task,
1037 	.do_task		= rd_DIRECT_do_task,
1038 	.free_task		= rd_free_task,
1039 	.check_configfs_dev_params = rd_check_configfs_dev_params,
1040 	.set_configfs_dev_params = rd_set_configfs_dev_params,
1041 	.show_configfs_dev_params = rd_show_configfs_dev_params,
1042 	.get_cdb		= rd_get_cdb,
1043 	.get_device_rev		= rd_get_device_rev,
1044 	.get_device_type	= rd_get_device_type,
1045 	.get_blocks		= rd_get_blocks,
1046 	.do_se_mem_map		= rd_DIRECT_do_se_mem_map,
1047 };
1048 
1049 static struct se_subsystem_api rd_mcp_template = {
1050 	.name			= "rd_mcp",
1051 	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
1052 	.attach_hba		= rd_attach_hba,
1053 	.detach_hba		= rd_detach_hba,
1054 	.allocate_virtdevice	= rd_MEMCPY_allocate_virtdevice,
1055 	.create_virtdevice	= rd_MEMCPY_create_virtdevice,
1056 	.free_device		= rd_free_device,
1057 	.alloc_task		= rd_alloc_task,
1058 	.do_task		= rd_MEMCPY_do_task,
1059 	.free_task		= rd_free_task,
1060 	.check_configfs_dev_params = rd_check_configfs_dev_params,
1061 	.set_configfs_dev_params = rd_set_configfs_dev_params,
1062 	.show_configfs_dev_params = rd_show_configfs_dev_params,
1063 	.get_cdb		= rd_get_cdb,
1064 	.get_device_rev		= rd_get_device_rev,
1065 	.get_device_type	= rd_get_device_type,
1066 	.get_blocks		= rd_get_blocks,
1067 };
1068 
1069 int __init rd_module_init(void)
1070 {
1071 	int ret;
1072 
1073 	ret = transport_subsystem_register(&rd_dr_template);
1074 	if (ret < 0)
1075 		return ret;
1076 
1077 	ret = transport_subsystem_register(&rd_mcp_template);
1078 	if (ret < 0) {
1079 		transport_subsystem_release(&rd_dr_template);
1080 		return ret;
1081 	}
1082 
1083 	return 0;
1084 }
1085 
1086 void rd_module_exit(void)
1087 {
1088 	transport_subsystem_release(&rd_dr_template);
1089 	transport_subsystem_release(&rd_mcp_template);
1090 }
1091