1 /*******************************************************************************
2  * Filename:  target_core_rd.c
3  *
4  * This file contains the Storage Engine <-> Ramdisk transport
5  * specific functions.
6  *
7  * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9  * Copyright (c) 2007-2010 Rising Tide Systems
10  * Copyright (c) 2008-2010 Linux-iSCSI.org
11  *
12  * Nicholas A. Bellinger <nab@kernel.org>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2 of the License, or
17  * (at your option) any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27  *
28  ******************************************************************************/
29 
30 #include <linux/string.h>
31 #include <linux/parser.h>
32 #include <linux/timer.h>
33 #include <linux/blkdev.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 
39 #include <target/target_core_base.h>
40 #include <target/target_core_backend.h>
41 
42 #include "target_core_rd.h"
43 
44 static struct se_subsystem_api rd_mcp_template;
45 
46 /*	rd_attach_hba(): (Part of se_subsystem_api_t template)
47  *
48  *
49  */
50 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
51 {
52 	struct rd_host *rd_host;
53 
54 	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
55 	if (!rd_host) {
56 		pr_err("Unable to allocate memory for struct rd_host\n");
57 		return -ENOMEM;
58 	}
59 
60 	rd_host->rd_host_id = host_id;
61 
62 	hba->hba_ptr = rd_host;
63 
64 	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
65 		" Generic Target Core Stack %s\n", hba->hba_id,
66 		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
67 	pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
68 		" MaxSectors: %u\n", hba->hba_id,
69 		rd_host->rd_host_id, RD_MAX_SECTORS);
70 
71 	return 0;
72 }
73 
74 static void rd_detach_hba(struct se_hba *hba)
75 {
76 	struct rd_host *rd_host = hba->hba_ptr;
77 
78 	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
79 		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
80 
81 	kfree(rd_host);
82 	hba->hba_ptr = NULL;
83 }
84 
85 /*	rd_release_device_space():
86  *
87  *
88  */
89 static void rd_release_device_space(struct rd_dev *rd_dev)
90 {
91 	u32 i, j, page_count = 0, sg_per_table;
92 	struct rd_dev_sg_table *sg_table;
93 	struct page *pg;
94 	struct scatterlist *sg;
95 
96 	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
97 		return;
98 
99 	sg_table = rd_dev->sg_table_array;
100 
101 	for (i = 0; i < rd_dev->sg_table_count; i++) {
102 		sg = sg_table[i].sg_table;
103 		sg_per_table = sg_table[i].rd_sg_count;
104 
105 		for (j = 0; j < sg_per_table; j++) {
106 			pg = sg_page(&sg[j]);
107 			if (pg) {
108 				__free_page(pg);
109 				page_count++;
110 			}
111 		}
112 
113 		kfree(sg);
114 	}
115 
116 	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
117 		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
118 		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
119 		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
120 
121 	kfree(sg_table);
122 	rd_dev->sg_table_array = NULL;
123 	rd_dev->sg_table_count = 0;
124 }
125 
126 
127 /*	rd_build_device_space():
128  *
129  *
130  */
131 static int rd_build_device_space(struct rd_dev *rd_dev)
132 {
133 	u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
134 	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
135 				sizeof(struct scatterlist));
136 	struct rd_dev_sg_table *sg_table;
137 	struct page *pg;
138 	struct scatterlist *sg;
139 
140 	if (rd_dev->rd_page_count <= 0) {
141 		pr_err("Illegal page count: %u for Ramdisk device\n",
142 			rd_dev->rd_page_count);
143 		return -EINVAL;
144 	}
145 	total_sg_needed = rd_dev->rd_page_count;
146 
147 	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
148 
149 	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
150 	if (!sg_table) {
151 		pr_err("Unable to allocate memory for Ramdisk"
152 			" scatterlist tables\n");
153 		return -ENOMEM;
154 	}
155 
156 	rd_dev->sg_table_array = sg_table;
157 	rd_dev->sg_table_count = sg_tables;
158 
159 	while (total_sg_needed) {
160 		sg_per_table = (total_sg_needed > max_sg_per_table) ?
161 			max_sg_per_table : total_sg_needed;
162 
163 		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
164 				GFP_KERNEL);
165 		if (!sg) {
166 			pr_err("Unable to allocate scatterlist array"
167 				" for struct rd_dev\n");
168 			return -ENOMEM;
169 		}
170 
171 		sg_init_table(sg, sg_per_table);
172 
173 		sg_table[i].sg_table = sg;
174 		sg_table[i].rd_sg_count = sg_per_table;
175 		sg_table[i].page_start_offset = page_offset;
176 		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
177 						- 1;
178 
179 		for (j = 0; j < sg_per_table; j++) {
180 			pg = alloc_pages(GFP_KERNEL, 0);
181 			if (!pg) {
182 				pr_err("Unable to allocate scatterlist"
183 					" pages for struct rd_dev_sg_table\n");
184 				return -ENOMEM;
185 			}
186 			sg_assign_page(&sg[j], pg);
187 			sg[j].length = PAGE_SIZE;
188 		}
189 
190 		page_offset += sg_per_table;
191 		total_sg_needed -= sg_per_table;
192 	}
193 
194 	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
195 		" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
196 		rd_dev->rd_dev_id, rd_dev->rd_page_count,
197 		rd_dev->sg_table_count);
198 
199 	return 0;
200 }
201 
202 static void *rd_allocate_virtdevice(
203 	struct se_hba *hba,
204 	const char *name,
205 	int rd_direct)
206 {
207 	struct rd_dev *rd_dev;
208 	struct rd_host *rd_host = hba->hba_ptr;
209 
210 	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
211 	if (!rd_dev) {
212 		pr_err("Unable to allocate memory for struct rd_dev\n");
213 		return NULL;
214 	}
215 
216 	rd_dev->rd_host = rd_host;
217 	rd_dev->rd_direct = rd_direct;
218 
219 	return rd_dev;
220 }
221 
222 static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
223 {
224 	return rd_allocate_virtdevice(hba, name, 0);
225 }
226 
227 /*	rd_create_virtdevice():
228  *
229  *
230  */
231 static struct se_device *rd_create_virtdevice(
232 	struct se_hba *hba,
233 	struct se_subsystem_dev *se_dev,
234 	void *p,
235 	int rd_direct)
236 {
237 	struct se_device *dev;
238 	struct se_dev_limits dev_limits;
239 	struct rd_dev *rd_dev = p;
240 	struct rd_host *rd_host = hba->hba_ptr;
241 	int dev_flags = 0, ret;
242 	char prod[16], rev[4];
243 
244 	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
245 
246 	ret = rd_build_device_space(rd_dev);
247 	if (ret < 0)
248 		goto fail;
249 
250 	snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
251 	snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
252 						RD_MCP_VERSION);
253 
254 	dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
255 	dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
256 	dev_limits.limits.max_sectors = RD_MAX_SECTORS;
257 	dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
258 	dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
259 
260 	dev = transport_add_device_to_core_hba(hba,
261 			&rd_mcp_template, se_dev, dev_flags, rd_dev,
262 			&dev_limits, prod, rev);
263 	if (!dev)
264 		goto fail;
265 
266 	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
267 	rd_dev->rd_queue_depth = dev->queue_depth;
268 
269 	pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
270 		" %u pages in %u tables, %lu total bytes\n",
271 		rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
272 		"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
273 		rd_dev->sg_table_count,
274 		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
275 
276 	return dev;
277 
278 fail:
279 	rd_release_device_space(rd_dev);
280 	return ERR_PTR(ret);
281 }
282 
283 static struct se_device *rd_MEMCPY_create_virtdevice(
284 	struct se_hba *hba,
285 	struct se_subsystem_dev *se_dev,
286 	void *p)
287 {
288 	return rd_create_virtdevice(hba, se_dev, p, 0);
289 }
290 
291 /*	rd_free_device(): (Part of se_subsystem_api_t template)
292  *
293  *
294  */
295 static void rd_free_device(void *p)
296 {
297 	struct rd_dev *rd_dev = p;
298 
299 	rd_release_device_space(rd_dev);
300 	kfree(rd_dev);
301 }
302 
303 static inline struct rd_request *RD_REQ(struct se_task *task)
304 {
305 	return container_of(task, struct rd_request, rd_task);
306 }
307 
308 static struct se_task *
309 rd_alloc_task(unsigned char *cdb)
310 {
311 	struct rd_request *rd_req;
312 
313 	rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
314 	if (!rd_req) {
315 		pr_err("Unable to allocate struct rd_request\n");
316 		return NULL;
317 	}
318 
319 	return &rd_req->rd_task;
320 }
321 
322 /*	rd_get_sg_table():
323  *
324  *
325  */
326 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
327 {
328 	u32 i;
329 	struct rd_dev_sg_table *sg_table;
330 
331 	for (i = 0; i < rd_dev->sg_table_count; i++) {
332 		sg_table = &rd_dev->sg_table_array[i];
333 		if ((sg_table->page_start_offset <= page) &&
334 		    (sg_table->page_end_offset >= page))
335 			return sg_table;
336 	}
337 
338 	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
339 			page);
340 
341 	return NULL;
342 }
343 
344 static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
345 {
346 	struct se_task *task = &req->rd_task;
347 	struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
348 	struct rd_dev_sg_table *table;
349 	struct scatterlist *rd_sg;
350 	struct sg_mapping_iter m;
351 	u32 rd_offset = req->rd_offset;
352 	u32 src_len;
353 
354 	table = rd_get_sg_table(dev, req->rd_page);
355 	if (!table)
356 		return -EINVAL;
357 
358 	rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
359 
360 	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
361 			dev->rd_dev_id, read_rd ? "Read" : "Write",
362 			task->task_lba, req->rd_size, req->rd_page,
363 			rd_offset);
364 
365 	src_len = PAGE_SIZE - rd_offset;
366 	sg_miter_start(&m, task->task_sg, task->task_sg_nents,
367 			read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
368 	while (req->rd_size) {
369 		u32 len;
370 		void *rd_addr;
371 
372 		sg_miter_next(&m);
373 		len = min((u32)m.length, src_len);
374 		m.consumed = len;
375 
376 		rd_addr = sg_virt(rd_sg) + rd_offset;
377 
378 		if (read_rd)
379 			memcpy(m.addr, rd_addr, len);
380 		else
381 			memcpy(rd_addr, m.addr, len);
382 
383 		req->rd_size -= len;
384 		if (!req->rd_size)
385 			continue;
386 
387 		src_len -= len;
388 		if (src_len) {
389 			rd_offset += len;
390 			continue;
391 		}
392 
393 		/* rd page completed, next one please */
394 		req->rd_page++;
395 		rd_offset = 0;
396 		src_len = PAGE_SIZE;
397 		if (req->rd_page <= table->page_end_offset) {
398 			rd_sg++;
399 			continue;
400 		}
401 
402 		table = rd_get_sg_table(dev, req->rd_page);
403 		if (!table) {
404 			sg_miter_stop(&m);
405 			return -EINVAL;
406 		}
407 
408 		/* since we increment, the first sg entry is correct */
409 		rd_sg = table->sg_table;
410 	}
411 	sg_miter_stop(&m);
412 	return 0;
413 }
414 
415 /*	rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
416  *
417  *
418  */
419 static int rd_MEMCPY_do_task(struct se_task *task)
420 {
421 	struct se_device *dev = task->task_se_cmd->se_dev;
422 	struct rd_request *req = RD_REQ(task);
423 	u64 tmp;
424 	int ret;
425 
426 	tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
427 	req->rd_offset = do_div(tmp, PAGE_SIZE);
428 	req->rd_page = tmp;
429 	req->rd_size = task->task_size;
430 
431 	ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
432 	if (ret != 0)
433 		return ret;
434 
435 	task->task_scsi_status = GOOD;
436 	transport_complete_task(task, 1);
437 	return 0;
438 }
439 
440 /*	rd_free_task(): (Part of se_subsystem_api_t template)
441  *
442  *
443  */
444 static void rd_free_task(struct se_task *task)
445 {
446 	kfree(RD_REQ(task));
447 }
448 
449 enum {
450 	Opt_rd_pages, Opt_err
451 };
452 
453 static match_table_t tokens = {
454 	{Opt_rd_pages, "rd_pages=%d"},
455 	{Opt_err, NULL}
456 };
457 
458 static ssize_t rd_set_configfs_dev_params(
459 	struct se_hba *hba,
460 	struct se_subsystem_dev *se_dev,
461 	const char *page,
462 	ssize_t count)
463 {
464 	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
465 	char *orig, *ptr, *opts;
466 	substring_t args[MAX_OPT_ARGS];
467 	int ret = 0, arg, token;
468 
469 	opts = kstrdup(page, GFP_KERNEL);
470 	if (!opts)
471 		return -ENOMEM;
472 
473 	orig = opts;
474 
475 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
476 		if (!*ptr)
477 			continue;
478 
479 		token = match_token(ptr, tokens, args);
480 		switch (token) {
481 		case Opt_rd_pages:
482 			match_int(args, &arg);
483 			rd_dev->rd_page_count = arg;
484 			pr_debug("RAMDISK: Referencing Page"
485 				" Count: %u\n", rd_dev->rd_page_count);
486 			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
487 			break;
488 		default:
489 			break;
490 		}
491 	}
492 
493 	kfree(orig);
494 	return (!ret) ? count : ret;
495 }
496 
497 static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
498 {
499 	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
500 
501 	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
502 		pr_debug("Missing rd_pages= parameter\n");
503 		return -EINVAL;
504 	}
505 
506 	return 0;
507 }
508 
509 static ssize_t rd_show_configfs_dev_params(
510 	struct se_hba *hba,
511 	struct se_subsystem_dev *se_dev,
512 	char *b)
513 {
514 	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
515 	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: %s\n",
516 			rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
517 			"rd_direct" : "rd_mcp");
518 	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
519 			"  SG_table_count: %u\n", rd_dev->rd_page_count,
520 			PAGE_SIZE, rd_dev->sg_table_count);
521 	return bl;
522 }
523 
524 static u32 rd_get_device_rev(struct se_device *dev)
525 {
526 	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
527 }
528 
529 static u32 rd_get_device_type(struct se_device *dev)
530 {
531 	return TYPE_DISK;
532 }
533 
534 static sector_t rd_get_blocks(struct se_device *dev)
535 {
536 	struct rd_dev *rd_dev = dev->dev_ptr;
537 	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
538 			dev->se_sub_dev->se_dev_attrib.block_size) - 1;
539 
540 	return blocks_long;
541 }
542 
543 static struct se_subsystem_api rd_mcp_template = {
544 	.name			= "rd_mcp",
545 	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
546 	.attach_hba		= rd_attach_hba,
547 	.detach_hba		= rd_detach_hba,
548 	.allocate_virtdevice	= rd_MEMCPY_allocate_virtdevice,
549 	.create_virtdevice	= rd_MEMCPY_create_virtdevice,
550 	.free_device		= rd_free_device,
551 	.alloc_task		= rd_alloc_task,
552 	.do_task		= rd_MEMCPY_do_task,
553 	.free_task		= rd_free_task,
554 	.check_configfs_dev_params = rd_check_configfs_dev_params,
555 	.set_configfs_dev_params = rd_set_configfs_dev_params,
556 	.show_configfs_dev_params = rd_show_configfs_dev_params,
557 	.get_device_rev		= rd_get_device_rev,
558 	.get_device_type	= rd_get_device_type,
559 	.get_blocks		= rd_get_blocks,
560 };
561 
562 int __init rd_module_init(void)
563 {
564 	int ret;
565 
566 	ret = transport_subsystem_register(&rd_mcp_template);
567 	if (ret < 0) {
568 		return ret;
569 	}
570 
571 	return 0;
572 }
573 
574 void rd_module_exit(void)
575 {
576 	transport_subsystem_release(&rd_mcp_template);
577 }
578