1 /******************************************************************************* 2 * Filename: target_core_rd.c 3 * 4 * This file contains the Storage Engine <-> Ramdisk transport 5 * specific functions. 6 * 7 * (c) Copyright 2003-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * 25 ******************************************************************************/ 26 27 #include <linux/string.h> 28 #include <linux/parser.h> 29 #include <linux/highmem.h> 30 #include <linux/timer.h> 31 #include <linux/scatterlist.h> 32 #include <linux/slab.h> 33 #include <linux/spinlock.h> 34 #include <scsi/scsi_proto.h> 35 36 #include <target/target_core_base.h> 37 #include <target/target_core_backend.h> 38 39 #include "target_core_rd.h" 40 41 static inline struct rd_dev *RD_DEV(struct se_device *dev) 42 { 43 return container_of(dev, struct rd_dev, dev); 44 } 45 46 static int rd_attach_hba(struct se_hba *hba, u32 host_id) 47 { 48 struct rd_host *rd_host; 49 50 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); 51 if (!rd_host) { 52 pr_err("Unable to allocate memory for struct rd_host\n"); 53 return -ENOMEM; 54 } 55 56 rd_host->rd_host_id = host_id; 57 58 hba->hba_ptr = rd_host; 59 60 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" 61 " Generic Target Core Stack %s\n", hba->hba_id, 62 RD_HBA_VERSION, TARGET_CORE_VERSION); 63 64 return 0; 65 } 66 67 static void rd_detach_hba(struct se_hba *hba) 68 { 69 struct rd_host *rd_host = hba->hba_ptr; 70 71 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" 72 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); 73 74 kfree(rd_host); 75 hba->hba_ptr = NULL; 76 } 77 78 static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, 79 u32 sg_table_count) 80 { 81 struct page *pg; 82 struct scatterlist *sg; 83 u32 i, j, page_count = 0, sg_per_table; 84 85 for (i = 0; i < sg_table_count; i++) { 86 sg = sg_table[i].sg_table; 87 sg_per_table = sg_table[i].rd_sg_count; 88 89 for (j = 0; j < sg_per_table; j++) { 90 pg = sg_page(&sg[j]); 91 if (pg) { 92 __free_page(pg); 93 page_count++; 94 } 95 } 96 kfree(sg); 97 } 98 99 kfree(sg_table); 100 return page_count; 101 } 102 103 static void rd_release_device_space(struct rd_dev *rd_dev) 104 { 105 u32 page_count; 106 107 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) 108 return; 109 110 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, 111 rd_dev->sg_table_count); 112 113 pr_debug("CORE_RD[%u] - Released device space for Ramdisk" 114 " Device ID: %u, pages %u in %u tables total bytes %lu\n", 115 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 116 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); 117 118 rd_dev->sg_table_array = NULL; 119 rd_dev->sg_table_count = 0; 120 } 121 122 123 /* rd_build_device_space(): 124 * 125 * 126 */ 127 static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, 128 u32 total_sg_needed, unsigned char init_payload) 129 { 130 u32 i = 0, j, page_offset = 0, sg_per_table; 131 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 132 sizeof(struct scatterlist)); 133 struct page *pg; 134 struct scatterlist *sg; 135 unsigned char *p; 136 137 while (total_sg_needed) { 138 unsigned int chain_entry = 0; 139 140 sg_per_table = (total_sg_needed > max_sg_per_table) ? 141 max_sg_per_table : total_sg_needed; 142 143 /* 144 * Reserve extra element for chain entry 145 */ 146 if (sg_per_table < total_sg_needed) 147 chain_entry = 1; 148 149 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg), 150 GFP_KERNEL); 151 if (!sg) { 152 pr_err("Unable to allocate scatterlist array" 153 " for struct rd_dev\n"); 154 return -ENOMEM; 155 } 156 157 sg_init_table(sg, sg_per_table + chain_entry); 158 159 if (i > 0) { 160 sg_chain(sg_table[i - 1].sg_table, 161 max_sg_per_table + 1, sg); 162 } 163 164 sg_table[i].sg_table = sg; 165 sg_table[i].rd_sg_count = sg_per_table; 166 sg_table[i].page_start_offset = page_offset; 167 sg_table[i++].page_end_offset = (page_offset + sg_per_table) 168 - 1; 169 170 for (j = 0; j < sg_per_table; j++) { 171 pg = alloc_pages(GFP_KERNEL, 0); 172 if (!pg) { 173 pr_err("Unable to allocate scatterlist" 174 " pages for struct rd_dev_sg_table\n"); 175 return -ENOMEM; 176 } 177 sg_assign_page(&sg[j], pg); 178 sg[j].length = PAGE_SIZE; 179 180 p = kmap(pg); 181 memset(p, init_payload, PAGE_SIZE); 182 kunmap(pg); 183 } 184 185 page_offset += sg_per_table; 186 total_sg_needed -= sg_per_table; 187 } 188 189 return 0; 190 } 191 192 static int rd_build_device_space(struct rd_dev *rd_dev) 193 { 194 struct rd_dev_sg_table *sg_table; 195 u32 sg_tables, total_sg_needed; 196 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 197 sizeof(struct scatterlist)); 198 int rc; 199 200 if (rd_dev->rd_page_count <= 0) { 201 pr_err("Illegal page count: %u for Ramdisk device\n", 202 rd_dev->rd_page_count); 203 return -EINVAL; 204 } 205 206 /* Don't need backing pages for NULLIO */ 207 if (rd_dev->rd_flags & RDF_NULLIO) 208 return 0; 209 210 total_sg_needed = rd_dev->rd_page_count; 211 212 sg_tables = (total_sg_needed / max_sg_per_table) + 1; 213 sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL); 214 if (!sg_table) { 215 pr_err("Unable to allocate memory for Ramdisk" 216 " scatterlist tables\n"); 217 return -ENOMEM; 218 } 219 220 rd_dev->sg_table_array = sg_table; 221 rd_dev->sg_table_count = sg_tables; 222 223 rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00); 224 if (rc) 225 return rc; 226 227 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" 228 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 229 rd_dev->rd_dev_id, rd_dev->rd_page_count, 230 rd_dev->sg_table_count); 231 232 return 0; 233 } 234 235 static void rd_release_prot_space(struct rd_dev *rd_dev) 236 { 237 u32 page_count; 238 239 if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count) 240 return; 241 242 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, 243 rd_dev->sg_prot_count); 244 245 pr_debug("CORE_RD[%u] - Released protection space for Ramdisk" 246 " Device ID: %u, pages %u in %u tables total bytes %lu\n", 247 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 248 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); 249 250 rd_dev->sg_prot_array = NULL; 251 rd_dev->sg_prot_count = 0; 252 } 253 254 static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size) 255 { 256 struct rd_dev_sg_table *sg_table; 257 u32 total_sg_needed, sg_tables; 258 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 259 sizeof(struct scatterlist)); 260 int rc; 261 262 if (rd_dev->rd_flags & RDF_NULLIO) 263 return 0; 264 /* 265 * prot_length=8byte dif data 266 * tot sg needed = rd_page_count * (PGSZ/block_size) * 267 * (prot_length/block_size) + pad 268 * PGSZ canceled each other. 269 */ 270 total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1; 271 272 sg_tables = (total_sg_needed / max_sg_per_table) + 1; 273 sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL); 274 if (!sg_table) { 275 pr_err("Unable to allocate memory for Ramdisk protection" 276 " scatterlist tables\n"); 277 return -ENOMEM; 278 } 279 280 rd_dev->sg_prot_array = sg_table; 281 rd_dev->sg_prot_count = sg_tables; 282 283 rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff); 284 if (rc) 285 return rc; 286 287 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of" 288 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 289 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count); 290 291 return 0; 292 } 293 294 static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name) 295 { 296 struct rd_dev *rd_dev; 297 struct rd_host *rd_host = hba->hba_ptr; 298 299 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); 300 if (!rd_dev) { 301 pr_err("Unable to allocate memory for struct rd_dev\n"); 302 return NULL; 303 } 304 305 rd_dev->rd_host = rd_host; 306 307 return &rd_dev->dev; 308 } 309 310 static int rd_configure_device(struct se_device *dev) 311 { 312 struct rd_dev *rd_dev = RD_DEV(dev); 313 struct rd_host *rd_host = dev->se_hba->hba_ptr; 314 int ret; 315 316 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { 317 pr_debug("Missing rd_pages= parameter\n"); 318 return -EINVAL; 319 } 320 321 ret = rd_build_device_space(rd_dev); 322 if (ret < 0) 323 goto fail; 324 325 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; 326 dev->dev_attrib.hw_max_sectors = UINT_MAX; 327 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; 328 dev->dev_attrib.is_nonrot = 1; 329 330 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 331 332 pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of" 333 " %u pages in %u tables, %lu total bytes\n", 334 rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, 335 rd_dev->sg_table_count, 336 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); 337 338 return 0; 339 340 fail: 341 rd_release_device_space(rd_dev); 342 return ret; 343 } 344 345 static void rd_dev_call_rcu(struct rcu_head *p) 346 { 347 struct se_device *dev = container_of(p, struct se_device, rcu_head); 348 struct rd_dev *rd_dev = RD_DEV(dev); 349 350 kfree(rd_dev); 351 } 352 353 static void rd_free_device(struct se_device *dev) 354 { 355 struct rd_dev *rd_dev = RD_DEV(dev); 356 357 rd_release_device_space(rd_dev); 358 call_rcu(&dev->rcu_head, rd_dev_call_rcu); 359 } 360 361 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) 362 { 363 struct rd_dev_sg_table *sg_table; 364 u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / 365 sizeof(struct scatterlist)); 366 367 i = page / sg_per_table; 368 if (i < rd_dev->sg_table_count) { 369 sg_table = &rd_dev->sg_table_array[i]; 370 if ((sg_table->page_start_offset <= page) && 371 (sg_table->page_end_offset >= page)) 372 return sg_table; 373 } 374 375 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", 376 page); 377 378 return NULL; 379 } 380 381 static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page) 382 { 383 struct rd_dev_sg_table *sg_table; 384 u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / 385 sizeof(struct scatterlist)); 386 387 i = page / sg_per_table; 388 if (i < rd_dev->sg_prot_count) { 389 sg_table = &rd_dev->sg_prot_array[i]; 390 if ((sg_table->page_start_offset <= page) && 391 (sg_table->page_end_offset >= page)) 392 return sg_table; 393 } 394 395 pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n", 396 page); 397 398 return NULL; 399 } 400 401 static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read) 402 { 403 struct se_device *se_dev = cmd->se_dev; 404 struct rd_dev *dev = RD_DEV(se_dev); 405 struct rd_dev_sg_table *prot_table; 406 struct scatterlist *prot_sg; 407 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; 408 u32 prot_offset, prot_page; 409 u32 prot_npages __maybe_unused; 410 u64 tmp; 411 sense_reason_t rc = 0; 412 413 tmp = cmd->t_task_lba * se_dev->prot_length; 414 prot_offset = do_div(tmp, PAGE_SIZE); 415 prot_page = tmp; 416 417 prot_table = rd_get_prot_table(dev, prot_page); 418 if (!prot_table) 419 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 420 421 prot_sg = &prot_table->sg_table[prot_page - 422 prot_table->page_start_offset]; 423 424 if (se_dev->dev_attrib.pi_prot_verify) { 425 if (is_read) 426 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, 427 prot_sg, prot_offset); 428 else 429 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, 430 cmd->t_prot_sg, 0); 431 } 432 if (!rc) 433 sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset); 434 435 return rc; 436 } 437 438 static sense_reason_t 439 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 440 enum dma_data_direction data_direction) 441 { 442 struct se_device *se_dev = cmd->se_dev; 443 struct rd_dev *dev = RD_DEV(se_dev); 444 struct rd_dev_sg_table *table; 445 struct scatterlist *rd_sg; 446 struct sg_mapping_iter m; 447 u32 rd_offset; 448 u32 rd_size; 449 u32 rd_page; 450 u32 src_len; 451 u64 tmp; 452 sense_reason_t rc; 453 454 if (dev->rd_flags & RDF_NULLIO) { 455 target_complete_cmd(cmd, SAM_STAT_GOOD); 456 return 0; 457 } 458 459 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; 460 rd_offset = do_div(tmp, PAGE_SIZE); 461 rd_page = tmp; 462 rd_size = cmd->data_length; 463 464 table = rd_get_sg_table(dev, rd_page); 465 if (!table) 466 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 467 468 rd_sg = &table->sg_table[rd_page - table->page_start_offset]; 469 470 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", 471 dev->rd_dev_id, 472 data_direction == DMA_FROM_DEVICE ? "Read" : "Write", 473 cmd->t_task_lba, rd_size, rd_page, rd_offset); 474 475 if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && 476 data_direction == DMA_TO_DEVICE) { 477 rc = rd_do_prot_rw(cmd, false); 478 if (rc) 479 return rc; 480 } 481 482 src_len = PAGE_SIZE - rd_offset; 483 sg_miter_start(&m, sgl, sgl_nents, 484 data_direction == DMA_FROM_DEVICE ? 485 SG_MITER_TO_SG : SG_MITER_FROM_SG); 486 while (rd_size) { 487 u32 len; 488 void *rd_addr; 489 490 sg_miter_next(&m); 491 if (!(u32)m.length) { 492 pr_debug("RD[%u]: invalid sgl %p len %zu\n", 493 dev->rd_dev_id, m.addr, m.length); 494 sg_miter_stop(&m); 495 return TCM_INCORRECT_AMOUNT_OF_DATA; 496 } 497 len = min((u32)m.length, src_len); 498 if (len > rd_size) { 499 pr_debug("RD[%u]: size underrun page %d offset %d " 500 "size %d\n", dev->rd_dev_id, 501 rd_page, rd_offset, rd_size); 502 len = rd_size; 503 } 504 m.consumed = len; 505 506 rd_addr = sg_virt(rd_sg) + rd_offset; 507 508 if (data_direction == DMA_FROM_DEVICE) 509 memcpy(m.addr, rd_addr, len); 510 else 511 memcpy(rd_addr, m.addr, len); 512 513 rd_size -= len; 514 if (!rd_size) 515 continue; 516 517 src_len -= len; 518 if (src_len) { 519 rd_offset += len; 520 continue; 521 } 522 523 /* rd page completed, next one please */ 524 rd_page++; 525 rd_offset = 0; 526 src_len = PAGE_SIZE; 527 if (rd_page <= table->page_end_offset) { 528 rd_sg++; 529 continue; 530 } 531 532 table = rd_get_sg_table(dev, rd_page); 533 if (!table) { 534 sg_miter_stop(&m); 535 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 536 } 537 538 /* since we increment, the first sg entry is correct */ 539 rd_sg = table->sg_table; 540 } 541 sg_miter_stop(&m); 542 543 if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && 544 data_direction == DMA_FROM_DEVICE) { 545 rc = rd_do_prot_rw(cmd, true); 546 if (rc) 547 return rc; 548 } 549 550 target_complete_cmd(cmd, SAM_STAT_GOOD); 551 return 0; 552 } 553 554 enum { 555 Opt_rd_pages, Opt_rd_nullio, Opt_err 556 }; 557 558 static match_table_t tokens = { 559 {Opt_rd_pages, "rd_pages=%d"}, 560 {Opt_rd_nullio, "rd_nullio=%d"}, 561 {Opt_err, NULL} 562 }; 563 564 static ssize_t rd_set_configfs_dev_params(struct se_device *dev, 565 const char *page, ssize_t count) 566 { 567 struct rd_dev *rd_dev = RD_DEV(dev); 568 char *orig, *ptr, *opts; 569 substring_t args[MAX_OPT_ARGS]; 570 int ret = 0, arg, token; 571 572 opts = kstrdup(page, GFP_KERNEL); 573 if (!opts) 574 return -ENOMEM; 575 576 orig = opts; 577 578 while ((ptr = strsep(&opts, ",\n")) != NULL) { 579 if (!*ptr) 580 continue; 581 582 token = match_token(ptr, tokens, args); 583 switch (token) { 584 case Opt_rd_pages: 585 match_int(args, &arg); 586 rd_dev->rd_page_count = arg; 587 pr_debug("RAMDISK: Referencing Page" 588 " Count: %u\n", rd_dev->rd_page_count); 589 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; 590 break; 591 case Opt_rd_nullio: 592 match_int(args, &arg); 593 if (arg != 1) 594 break; 595 596 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg); 597 rd_dev->rd_flags |= RDF_NULLIO; 598 break; 599 default: 600 break; 601 } 602 } 603 604 kfree(orig); 605 return (!ret) ? count : ret; 606 } 607 608 static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) 609 { 610 struct rd_dev *rd_dev = RD_DEV(dev); 611 612 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", 613 rd_dev->rd_dev_id); 614 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" 615 " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count, 616 PAGE_SIZE, rd_dev->sg_table_count, 617 !!(rd_dev->rd_flags & RDF_NULLIO)); 618 return bl; 619 } 620 621 static sector_t rd_get_blocks(struct se_device *dev) 622 { 623 struct rd_dev *rd_dev = RD_DEV(dev); 624 625 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / 626 dev->dev_attrib.block_size) - 1; 627 628 return blocks_long; 629 } 630 631 static int rd_init_prot(struct se_device *dev) 632 { 633 struct rd_dev *rd_dev = RD_DEV(dev); 634 635 if (!dev->dev_attrib.pi_prot_type) 636 return 0; 637 638 return rd_build_prot_space(rd_dev, dev->prot_length, 639 dev->dev_attrib.block_size); 640 } 641 642 static void rd_free_prot(struct se_device *dev) 643 { 644 struct rd_dev *rd_dev = RD_DEV(dev); 645 646 rd_release_prot_space(rd_dev); 647 } 648 649 static struct sbc_ops rd_sbc_ops = { 650 .execute_rw = rd_execute_rw, 651 }; 652 653 static sense_reason_t 654 rd_parse_cdb(struct se_cmd *cmd) 655 { 656 return sbc_parse_cdb(cmd, &rd_sbc_ops); 657 } 658 659 static const struct target_backend_ops rd_mcp_ops = { 660 .name = "rd_mcp", 661 .inquiry_prod = "RAMDISK-MCP", 662 .inquiry_rev = RD_MCP_VERSION, 663 .attach_hba = rd_attach_hba, 664 .detach_hba = rd_detach_hba, 665 .alloc_device = rd_alloc_device, 666 .configure_device = rd_configure_device, 667 .free_device = rd_free_device, 668 .parse_cdb = rd_parse_cdb, 669 .set_configfs_dev_params = rd_set_configfs_dev_params, 670 .show_configfs_dev_params = rd_show_configfs_dev_params, 671 .get_device_type = sbc_get_device_type, 672 .get_blocks = rd_get_blocks, 673 .init_prot = rd_init_prot, 674 .free_prot = rd_free_prot, 675 .tb_dev_attrib_attrs = sbc_attrib_attrs, 676 }; 677 678 int __init rd_module_init(void) 679 { 680 return transport_backend_register(&rd_mcp_ops); 681 } 682 683 void rd_module_exit(void) 684 { 685 target_backend_unregister(&rd_mcp_ops); 686 } 687