1 /*******************************************************************************
2  * Filename:  target_core_rd.c
3  *
4  * This file contains the Storage Engine <-> Ramdisk transport
5  * specific functions.
6  *
7  * (c) Copyright 2003-2013 Datera, Inc.
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24  *
25  ******************************************************************************/
26 
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
34 
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 
38 #include "target_core_rd.h"
39 
40 static inline struct rd_dev *RD_DEV(struct se_device *dev)
41 {
42 	return container_of(dev, struct rd_dev, dev);
43 }
44 
45 /*	rd_attach_hba(): (Part of se_subsystem_api_t template)
46  *
47  *
48  */
49 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
50 {
51 	struct rd_host *rd_host;
52 
53 	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
54 	if (!rd_host) {
55 		pr_err("Unable to allocate memory for struct rd_host\n");
56 		return -ENOMEM;
57 	}
58 
59 	rd_host->rd_host_id = host_id;
60 
61 	hba->hba_ptr = rd_host;
62 
63 	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
64 		" Generic Target Core Stack %s\n", hba->hba_id,
65 		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
66 
67 	return 0;
68 }
69 
70 static void rd_detach_hba(struct se_hba *hba)
71 {
72 	struct rd_host *rd_host = hba->hba_ptr;
73 
74 	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
75 		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
76 
77 	kfree(rd_host);
78 	hba->hba_ptr = NULL;
79 }
80 
81 static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
82 				 u32 sg_table_count)
83 {
84 	struct page *pg;
85 	struct scatterlist *sg;
86 	u32 i, j, page_count = 0, sg_per_table;
87 
88 	for (i = 0; i < sg_table_count; i++) {
89 		sg = sg_table[i].sg_table;
90 		sg_per_table = sg_table[i].rd_sg_count;
91 
92 		for (j = 0; j < sg_per_table; j++) {
93 			pg = sg_page(&sg[j]);
94 			if (pg) {
95 				__free_page(pg);
96 				page_count++;
97 			}
98 		}
99 		kfree(sg);
100 	}
101 
102 	kfree(sg_table);
103 	return page_count;
104 }
105 
106 static void rd_release_device_space(struct rd_dev *rd_dev)
107 {
108 	u32 page_count;
109 
110 	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
111 		return;
112 
113 	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
114 					  rd_dev->sg_table_count);
115 
116 	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
117 		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
118 		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
119 		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
120 
121 	rd_dev->sg_table_array = NULL;
122 	rd_dev->sg_table_count = 0;
123 }
124 
125 
126 /*	rd_build_device_space():
127  *
128  *
129  */
130 static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
131 				 u32 total_sg_needed, unsigned char init_payload)
132 {
133 	u32 i = 0, j, page_offset = 0, sg_per_table;
134 	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
135 				sizeof(struct scatterlist));
136 	struct page *pg;
137 	struct scatterlist *sg;
138 	unsigned char *p;
139 
140 	while (total_sg_needed) {
141 		sg_per_table = (total_sg_needed > max_sg_per_table) ?
142 			max_sg_per_table : total_sg_needed;
143 
144 		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
145 				GFP_KERNEL);
146 		if (!sg) {
147 			pr_err("Unable to allocate scatterlist array"
148 				" for struct rd_dev\n");
149 			return -ENOMEM;
150 		}
151 
152 		sg_init_table(sg, sg_per_table);
153 
154 		sg_table[i].sg_table = sg;
155 		sg_table[i].rd_sg_count = sg_per_table;
156 		sg_table[i].page_start_offset = page_offset;
157 		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
158 						- 1;
159 
160 		for (j = 0; j < sg_per_table; j++) {
161 			pg = alloc_pages(GFP_KERNEL, 0);
162 			if (!pg) {
163 				pr_err("Unable to allocate scatterlist"
164 					" pages for struct rd_dev_sg_table\n");
165 				return -ENOMEM;
166 			}
167 			sg_assign_page(&sg[j], pg);
168 			sg[j].length = PAGE_SIZE;
169 
170 			p = kmap(pg);
171 			memset(p, init_payload, PAGE_SIZE);
172 			kunmap(pg);
173 		}
174 
175 		page_offset += sg_per_table;
176 		total_sg_needed -= sg_per_table;
177 	}
178 
179 	return 0;
180 }
181 
182 static int rd_build_device_space(struct rd_dev *rd_dev)
183 {
184 	struct rd_dev_sg_table *sg_table;
185 	u32 sg_tables, total_sg_needed;
186 	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
187 				sizeof(struct scatterlist));
188 	int rc;
189 
190 	if (rd_dev->rd_page_count <= 0) {
191 		pr_err("Illegal page count: %u for Ramdisk device\n",
192 		       rd_dev->rd_page_count);
193 		return -EINVAL;
194 	}
195 
196 	/* Don't need backing pages for NULLIO */
197 	if (rd_dev->rd_flags & RDF_NULLIO)
198 		return 0;
199 
200 	total_sg_needed = rd_dev->rd_page_count;
201 
202 	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
203 
204 	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
205 	if (!sg_table) {
206 		pr_err("Unable to allocate memory for Ramdisk"
207 		       " scatterlist tables\n");
208 		return -ENOMEM;
209 	}
210 
211 	rd_dev->sg_table_array = sg_table;
212 	rd_dev->sg_table_count = sg_tables;
213 
214 	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
215 	if (rc)
216 		return rc;
217 
218 	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
219 		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
220 		 rd_dev->rd_dev_id, rd_dev->rd_page_count,
221 		 rd_dev->sg_table_count);
222 
223 	return 0;
224 }
225 
226 static void rd_release_prot_space(struct rd_dev *rd_dev)
227 {
228 	u32 page_count;
229 
230 	if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
231 		return;
232 
233 	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
234 					  rd_dev->sg_prot_count);
235 
236 	pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
237 		 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
238 		 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
239 		 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
240 
241 	rd_dev->sg_prot_array = NULL;
242 	rd_dev->sg_prot_count = 0;
243 }
244 
245 static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
246 {
247 	struct rd_dev_sg_table *sg_table;
248 	u32 total_sg_needed, sg_tables;
249 	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
250 				sizeof(struct scatterlist));
251 	int rc;
252 
253 	if (rd_dev->rd_flags & RDF_NULLIO)
254 		return 0;
255 
256 	total_sg_needed = rd_dev->rd_page_count / prot_length;
257 
258 	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
259 
260 	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
261 	if (!sg_table) {
262 		pr_err("Unable to allocate memory for Ramdisk protection"
263 		       " scatterlist tables\n");
264 		return -ENOMEM;
265 	}
266 
267 	rd_dev->sg_prot_array = sg_table;
268 	rd_dev->sg_prot_count = sg_tables;
269 
270 	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
271 	if (rc)
272 		return rc;
273 
274 	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
275 		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
276 		 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
277 
278 	return 0;
279 }
280 
281 static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
282 {
283 	struct rd_dev *rd_dev;
284 	struct rd_host *rd_host = hba->hba_ptr;
285 
286 	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
287 	if (!rd_dev) {
288 		pr_err("Unable to allocate memory for struct rd_dev\n");
289 		return NULL;
290 	}
291 
292 	rd_dev->rd_host = rd_host;
293 
294 	return &rd_dev->dev;
295 }
296 
297 static int rd_configure_device(struct se_device *dev)
298 {
299 	struct rd_dev *rd_dev = RD_DEV(dev);
300 	struct rd_host *rd_host = dev->se_hba->hba_ptr;
301 	int ret;
302 
303 	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
304 		pr_debug("Missing rd_pages= parameter\n");
305 		return -EINVAL;
306 	}
307 
308 	ret = rd_build_device_space(rd_dev);
309 	if (ret < 0)
310 		goto fail;
311 
312 	dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
313 	dev->dev_attrib.hw_max_sectors = UINT_MAX;
314 	dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
315 
316 	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
317 
318 	pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
319 		" %u pages in %u tables, %lu total bytes\n",
320 		rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
321 		rd_dev->sg_table_count,
322 		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
323 
324 	return 0;
325 
326 fail:
327 	rd_release_device_space(rd_dev);
328 	return ret;
329 }
330 
331 static void rd_free_device(struct se_device *dev)
332 {
333 	struct rd_dev *rd_dev = RD_DEV(dev);
334 
335 	rd_release_device_space(rd_dev);
336 	kfree(rd_dev);
337 }
338 
339 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
340 {
341 	struct rd_dev_sg_table *sg_table;
342 	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
343 				sizeof(struct scatterlist));
344 
345 	i = page / sg_per_table;
346 	if (i < rd_dev->sg_table_count) {
347 		sg_table = &rd_dev->sg_table_array[i];
348 		if ((sg_table->page_start_offset <= page) &&
349 		    (sg_table->page_end_offset >= page))
350 			return sg_table;
351 	}
352 
353 	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
354 			page);
355 
356 	return NULL;
357 }
358 
359 static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
360 {
361 	struct rd_dev_sg_table *sg_table;
362 	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
363 				sizeof(struct scatterlist));
364 
365 	i = page / sg_per_table;
366 	if (i < rd_dev->sg_prot_count) {
367 		sg_table = &rd_dev->sg_prot_array[i];
368 		if ((sg_table->page_start_offset <= page) &&
369 		     (sg_table->page_end_offset >= page))
370 			return sg_table;
371 	}
372 
373 	pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
374 			page);
375 
376 	return NULL;
377 }
378 
379 static sense_reason_t
380 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
381 	      enum dma_data_direction data_direction)
382 {
383 	struct se_device *se_dev = cmd->se_dev;
384 	struct rd_dev *dev = RD_DEV(se_dev);
385 	struct rd_dev_sg_table *table;
386 	struct scatterlist *rd_sg;
387 	struct sg_mapping_iter m;
388 	u32 rd_offset;
389 	u32 rd_size;
390 	u32 rd_page;
391 	u32 src_len;
392 	u64 tmp;
393 	sense_reason_t rc;
394 
395 	if (dev->rd_flags & RDF_NULLIO) {
396 		target_complete_cmd(cmd, SAM_STAT_GOOD);
397 		return 0;
398 	}
399 
400 	tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
401 	rd_offset = do_div(tmp, PAGE_SIZE);
402 	rd_page = tmp;
403 	rd_size = cmd->data_length;
404 
405 	table = rd_get_sg_table(dev, rd_page);
406 	if (!table)
407 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
408 
409 	rd_sg = &table->sg_table[rd_page - table->page_start_offset];
410 
411 	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
412 			dev->rd_dev_id,
413 			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
414 			cmd->t_task_lba, rd_size, rd_page, rd_offset);
415 
416 	if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
417 		struct rd_dev_sg_table *prot_table;
418 		struct scatterlist *prot_sg;
419 		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
420 		u32 prot_offset, prot_page;
421 
422 		tmp = cmd->t_task_lba * se_dev->prot_length;
423 		prot_offset = do_div(tmp, PAGE_SIZE);
424 		prot_page = tmp;
425 
426 		prot_table = rd_get_prot_table(dev, prot_page);
427 		if (!prot_table)
428 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
429 
430 		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
431 
432 		rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
433 					  prot_sg, prot_offset);
434 		if (rc)
435 			return rc;
436 	}
437 
438 	src_len = PAGE_SIZE - rd_offset;
439 	sg_miter_start(&m, sgl, sgl_nents,
440 			data_direction == DMA_FROM_DEVICE ?
441 				SG_MITER_TO_SG : SG_MITER_FROM_SG);
442 	while (rd_size) {
443 		u32 len;
444 		void *rd_addr;
445 
446 		sg_miter_next(&m);
447 		if (!(u32)m.length) {
448 			pr_debug("RD[%u]: invalid sgl %p len %zu\n",
449 				 dev->rd_dev_id, m.addr, m.length);
450 			sg_miter_stop(&m);
451 			return TCM_INCORRECT_AMOUNT_OF_DATA;
452 		}
453 		len = min((u32)m.length, src_len);
454 		if (len > rd_size) {
455 			pr_debug("RD[%u]: size underrun page %d offset %d "
456 				 "size %d\n", dev->rd_dev_id,
457 				 rd_page, rd_offset, rd_size);
458 			len = rd_size;
459 		}
460 		m.consumed = len;
461 
462 		rd_addr = sg_virt(rd_sg) + rd_offset;
463 
464 		if (data_direction == DMA_FROM_DEVICE)
465 			memcpy(m.addr, rd_addr, len);
466 		else
467 			memcpy(rd_addr, m.addr, len);
468 
469 		rd_size -= len;
470 		if (!rd_size)
471 			continue;
472 
473 		src_len -= len;
474 		if (src_len) {
475 			rd_offset += len;
476 			continue;
477 		}
478 
479 		/* rd page completed, next one please */
480 		rd_page++;
481 		rd_offset = 0;
482 		src_len = PAGE_SIZE;
483 		if (rd_page <= table->page_end_offset) {
484 			rd_sg++;
485 			continue;
486 		}
487 
488 		table = rd_get_sg_table(dev, rd_page);
489 		if (!table) {
490 			sg_miter_stop(&m);
491 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
492 		}
493 
494 		/* since we increment, the first sg entry is correct */
495 		rd_sg = table->sg_table;
496 	}
497 	sg_miter_stop(&m);
498 
499 	if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
500 		struct rd_dev_sg_table *prot_table;
501 		struct scatterlist *prot_sg;
502 		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
503 		u32 prot_offset, prot_page;
504 
505 		tmp = cmd->t_task_lba * se_dev->prot_length;
506 		prot_offset = do_div(tmp, PAGE_SIZE);
507 		prot_page = tmp;
508 
509 		prot_table = rd_get_prot_table(dev, prot_page);
510 		if (!prot_table)
511 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
512 
513 		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
514 
515 		rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
516 					 prot_sg, prot_offset);
517 		if (rc)
518 			return rc;
519 	}
520 
521 	target_complete_cmd(cmd, SAM_STAT_GOOD);
522 	return 0;
523 }
524 
525 enum {
526 	Opt_rd_pages, Opt_rd_nullio, Opt_err
527 };
528 
529 static match_table_t tokens = {
530 	{Opt_rd_pages, "rd_pages=%d"},
531 	{Opt_rd_nullio, "rd_nullio=%d"},
532 	{Opt_err, NULL}
533 };
534 
535 static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
536 		const char *page, ssize_t count)
537 {
538 	struct rd_dev *rd_dev = RD_DEV(dev);
539 	char *orig, *ptr, *opts;
540 	substring_t args[MAX_OPT_ARGS];
541 	int ret = 0, arg, token;
542 
543 	opts = kstrdup(page, GFP_KERNEL);
544 	if (!opts)
545 		return -ENOMEM;
546 
547 	orig = opts;
548 
549 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
550 		if (!*ptr)
551 			continue;
552 
553 		token = match_token(ptr, tokens, args);
554 		switch (token) {
555 		case Opt_rd_pages:
556 			match_int(args, &arg);
557 			rd_dev->rd_page_count = arg;
558 			pr_debug("RAMDISK: Referencing Page"
559 				" Count: %u\n", rd_dev->rd_page_count);
560 			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
561 			break;
562 		case Opt_rd_nullio:
563 			match_int(args, &arg);
564 			if (arg != 1)
565 				break;
566 
567 			pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
568 			rd_dev->rd_flags |= RDF_NULLIO;
569 			break;
570 		default:
571 			break;
572 		}
573 	}
574 
575 	kfree(orig);
576 	return (!ret) ? count : ret;
577 }
578 
579 static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
580 {
581 	struct rd_dev *rd_dev = RD_DEV(dev);
582 
583 	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
584 			rd_dev->rd_dev_id);
585 	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
586 			"  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
587 			PAGE_SIZE, rd_dev->sg_table_count,
588 			!!(rd_dev->rd_flags & RDF_NULLIO));
589 	return bl;
590 }
591 
592 static sector_t rd_get_blocks(struct se_device *dev)
593 {
594 	struct rd_dev *rd_dev = RD_DEV(dev);
595 
596 	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
597 			dev->dev_attrib.block_size) - 1;
598 
599 	return blocks_long;
600 }
601 
602 static int rd_init_prot(struct se_device *dev)
603 {
604 	struct rd_dev *rd_dev = RD_DEV(dev);
605 
606         if (!dev->dev_attrib.pi_prot_type)
607 		return 0;
608 
609 	return rd_build_prot_space(rd_dev, dev->prot_length);
610 }
611 
612 static void rd_free_prot(struct se_device *dev)
613 {
614 	struct rd_dev *rd_dev = RD_DEV(dev);
615 
616 	rd_release_prot_space(rd_dev);
617 }
618 
619 static struct sbc_ops rd_sbc_ops = {
620 	.execute_rw		= rd_execute_rw,
621 };
622 
623 static sense_reason_t
624 rd_parse_cdb(struct se_cmd *cmd)
625 {
626 	return sbc_parse_cdb(cmd, &rd_sbc_ops);
627 }
628 
629 static struct se_subsystem_api rd_mcp_template = {
630 	.name			= "rd_mcp",
631 	.inquiry_prod		= "RAMDISK-MCP",
632 	.inquiry_rev		= RD_MCP_VERSION,
633 	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
634 	.attach_hba		= rd_attach_hba,
635 	.detach_hba		= rd_detach_hba,
636 	.alloc_device		= rd_alloc_device,
637 	.configure_device	= rd_configure_device,
638 	.free_device		= rd_free_device,
639 	.parse_cdb		= rd_parse_cdb,
640 	.set_configfs_dev_params = rd_set_configfs_dev_params,
641 	.show_configfs_dev_params = rd_show_configfs_dev_params,
642 	.get_device_type	= sbc_get_device_type,
643 	.get_blocks		= rd_get_blocks,
644 	.init_prot		= rd_init_prot,
645 	.free_prot		= rd_free_prot,
646 };
647 
648 int __init rd_module_init(void)
649 {
650 	int ret;
651 
652 	ret = transport_subsystem_register(&rd_mcp_template);
653 	if (ret < 0) {
654 		return ret;
655 	}
656 
657 	return 0;
658 }
659 
660 void rd_module_exit(void)
661 {
662 	transport_subsystem_release(&rd_mcp_template);
663 }
664