1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * dcssblk.c -- the S/390 block driver for dcss memory 4 * 5 * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer 6 */ 7 8 #define KMSG_COMPONENT "dcssblk" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/ctype.h> 14 #include <linux/errno.h> 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/blkdev.h> 18 #include <linux/completion.h> 19 #include <linux/interrupt.h> 20 #include <linux/pfn_t.h> 21 #include <linux/uio.h> 22 #include <linux/dax.h> 23 #include <asm/extmem.h> 24 #include <asm/io.h> 25 26 #define DCSSBLK_NAME "dcssblk" 27 #define DCSSBLK_MINORS_PER_DISK 1 28 #define DCSSBLK_PARM_LEN 400 29 #define DCSS_BUS_ID_SIZE 20 30 31 static int dcssblk_open(struct block_device *bdev, fmode_t mode); 32 static void dcssblk_release(struct gendisk *disk, fmode_t mode); 33 static void dcssblk_submit_bio(struct bio *bio); 34 static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 35 long nr_pages, void **kaddr, pfn_t *pfn); 36 37 static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; 38 39 static int dcssblk_major; 40 static const struct block_device_operations dcssblk_devops = { 41 .owner = THIS_MODULE, 42 .submit_bio = dcssblk_submit_bio, 43 .open = dcssblk_open, 44 .release = dcssblk_release, 45 }; 46 47 static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev, 48 pgoff_t pgoff, size_t nr_pages) 49 { 50 long rc; 51 void *kaddr; 52 53 rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL); 54 if (rc < 0) 55 return rc; 56 memset(kaddr, 0, nr_pages << PAGE_SHIFT); 57 dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT); 58 return 0; 59 } 60 61 static const struct dax_operations dcssblk_dax_ops = { 62 .direct_access = dcssblk_dax_direct_access, 63 .zero_page_range = dcssblk_dax_zero_page_range, 64 }; 65 66 struct dcssblk_dev_info { 67 struct list_head lh; 68 struct device dev; 69 char segment_name[DCSS_BUS_ID_SIZE]; 70 atomic_t use_count; 71 struct gendisk *gd; 72 unsigned long start; 73 unsigned long end; 74 int segment_type; 75 unsigned char save_pending; 76 unsigned char is_shared; 77 int num_of_segments; 78 struct list_head seg_list; 79 struct dax_device *dax_dev; 80 }; 81 82 struct segment_info { 83 struct list_head lh; 84 char segment_name[DCSS_BUS_ID_SIZE]; 85 unsigned long start; 86 unsigned long end; 87 int segment_type; 88 }; 89 90 static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf, 91 size_t count); 92 static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf, 93 size_t count); 94 95 static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store); 96 static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store); 97 98 static struct device *dcssblk_root_dev; 99 100 static LIST_HEAD(dcssblk_devices); 101 static struct rw_semaphore dcssblk_devices_sem; 102 103 /* 104 * release function for segment device. 105 */ 106 static void 107 dcssblk_release_segment(struct device *dev) 108 { 109 struct dcssblk_dev_info *dev_info; 110 struct segment_info *entry, *temp; 111 112 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 113 list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) { 114 list_del(&entry->lh); 115 kfree(entry); 116 } 117 kfree(dev_info); 118 module_put(THIS_MODULE); 119 } 120 121 /* 122 * get a minor number. needs to be called with 123 * down_write(&dcssblk_devices_sem) and the 124 * device needs to be enqueued before the semaphore is 125 * freed. 126 */ 127 static int 128 dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) 129 { 130 int minor, found; 131 struct dcssblk_dev_info *entry; 132 133 if (dev_info == NULL) 134 return -EINVAL; 135 for (minor = 0; minor < (1<<MINORBITS); minor++) { 136 found = 0; 137 // test if minor available 138 list_for_each_entry(entry, &dcssblk_devices, lh) 139 if (minor == entry->gd->first_minor) 140 found++; 141 if (!found) break; // got unused minor 142 } 143 if (found) 144 return -EBUSY; 145 dev_info->gd->first_minor = minor; 146 return 0; 147 } 148 149 /* 150 * get the struct dcssblk_dev_info from dcssblk_devices 151 * for the given name. 152 * down_read(&dcssblk_devices_sem) must be held. 153 */ 154 static struct dcssblk_dev_info * 155 dcssblk_get_device_by_name(char *name) 156 { 157 struct dcssblk_dev_info *entry; 158 159 list_for_each_entry(entry, &dcssblk_devices, lh) { 160 if (!strcmp(name, entry->segment_name)) { 161 return entry; 162 } 163 } 164 return NULL; 165 } 166 167 /* 168 * get the struct segment_info from seg_list 169 * for the given name. 170 * down_read(&dcssblk_devices_sem) must be held. 171 */ 172 static struct segment_info * 173 dcssblk_get_segment_by_name(char *name) 174 { 175 struct dcssblk_dev_info *dev_info; 176 struct segment_info *entry; 177 178 list_for_each_entry(dev_info, &dcssblk_devices, lh) { 179 list_for_each_entry(entry, &dev_info->seg_list, lh) { 180 if (!strcmp(name, entry->segment_name)) 181 return entry; 182 } 183 } 184 return NULL; 185 } 186 187 /* 188 * get the highest address of the multi-segment block. 189 */ 190 static unsigned long 191 dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info) 192 { 193 unsigned long highest_addr; 194 struct segment_info *entry; 195 196 highest_addr = 0; 197 list_for_each_entry(entry, &dev_info->seg_list, lh) { 198 if (highest_addr < entry->end) 199 highest_addr = entry->end; 200 } 201 return highest_addr; 202 } 203 204 /* 205 * get the lowest address of the multi-segment block. 206 */ 207 static unsigned long 208 dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info) 209 { 210 int set_first; 211 unsigned long lowest_addr; 212 struct segment_info *entry; 213 214 set_first = 0; 215 lowest_addr = 0; 216 list_for_each_entry(entry, &dev_info->seg_list, lh) { 217 if (set_first == 0) { 218 lowest_addr = entry->start; 219 set_first = 1; 220 } else { 221 if (lowest_addr > entry->start) 222 lowest_addr = entry->start; 223 } 224 } 225 return lowest_addr; 226 } 227 228 /* 229 * Check continuity of segments. 230 */ 231 static int 232 dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) 233 { 234 int i, j, rc; 235 struct segment_info *sort_list, *entry, temp; 236 237 if (dev_info->num_of_segments <= 1) 238 return 0; 239 240 sort_list = kcalloc(dev_info->num_of_segments, 241 sizeof(struct segment_info), 242 GFP_KERNEL); 243 if (sort_list == NULL) 244 return -ENOMEM; 245 i = 0; 246 list_for_each_entry(entry, &dev_info->seg_list, lh) { 247 memcpy(&sort_list[i], entry, sizeof(struct segment_info)); 248 i++; 249 } 250 251 /* sort segments */ 252 for (i = 0; i < dev_info->num_of_segments; i++) 253 for (j = 0; j < dev_info->num_of_segments; j++) 254 if (sort_list[j].start > sort_list[i].start) { 255 memcpy(&temp, &sort_list[i], 256 sizeof(struct segment_info)); 257 memcpy(&sort_list[i], &sort_list[j], 258 sizeof(struct segment_info)); 259 memcpy(&sort_list[j], &temp, 260 sizeof(struct segment_info)); 261 } 262 263 /* check continuity */ 264 for (i = 0; i < dev_info->num_of_segments - 1; i++) { 265 if ((sort_list[i].end + 1) != sort_list[i+1].start) { 266 pr_err("Adjacent DCSSs %s and %s are not " 267 "contiguous\n", sort_list[i].segment_name, 268 sort_list[i+1].segment_name); 269 rc = -EINVAL; 270 goto out; 271 } 272 /* EN and EW are allowed in a block device */ 273 if (sort_list[i].segment_type != sort_list[i+1].segment_type) { 274 if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) || 275 (sort_list[i].segment_type == SEG_TYPE_ER) || 276 !(sort_list[i+1].segment_type & 277 SEGMENT_EXCLUSIVE) || 278 (sort_list[i+1].segment_type == SEG_TYPE_ER)) { 279 pr_err("DCSS %s and DCSS %s have " 280 "incompatible types\n", 281 sort_list[i].segment_name, 282 sort_list[i+1].segment_name); 283 rc = -EINVAL; 284 goto out; 285 } 286 } 287 } 288 rc = 0; 289 out: 290 kfree(sort_list); 291 return rc; 292 } 293 294 /* 295 * Load a segment 296 */ 297 static int 298 dcssblk_load_segment(char *name, struct segment_info **seg_info) 299 { 300 int rc; 301 302 /* already loaded? */ 303 down_read(&dcssblk_devices_sem); 304 *seg_info = dcssblk_get_segment_by_name(name); 305 up_read(&dcssblk_devices_sem); 306 if (*seg_info != NULL) 307 return -EEXIST; 308 309 /* get a struct segment_info */ 310 *seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL); 311 if (*seg_info == NULL) 312 return -ENOMEM; 313 314 strcpy((*seg_info)->segment_name, name); 315 316 /* load the segment */ 317 rc = segment_load(name, SEGMENT_SHARED, 318 &(*seg_info)->start, &(*seg_info)->end); 319 if (rc < 0) { 320 segment_warning(rc, (*seg_info)->segment_name); 321 kfree(*seg_info); 322 } else { 323 INIT_LIST_HEAD(&(*seg_info)->lh); 324 (*seg_info)->segment_type = rc; 325 } 326 return rc; 327 } 328 329 /* 330 * device attribute for switching shared/nonshared (exclusive) 331 * operation (show + store) 332 */ 333 static ssize_t 334 dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf) 335 { 336 struct dcssblk_dev_info *dev_info; 337 338 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 339 return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n"); 340 } 341 342 static ssize_t 343 dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) 344 { 345 struct dcssblk_dev_info *dev_info; 346 struct segment_info *entry, *temp; 347 int rc; 348 349 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) 350 return -EINVAL; 351 down_write(&dcssblk_devices_sem); 352 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 353 if (atomic_read(&dev_info->use_count)) { 354 rc = -EBUSY; 355 goto out; 356 } 357 if (inbuf[0] == '1') { 358 /* reload segments in shared mode */ 359 list_for_each_entry(entry, &dev_info->seg_list, lh) { 360 rc = segment_modify_shared(entry->segment_name, 361 SEGMENT_SHARED); 362 if (rc < 0) { 363 BUG_ON(rc == -EINVAL); 364 if (rc != -EAGAIN) 365 goto removeseg; 366 } 367 } 368 dev_info->is_shared = 1; 369 switch (dev_info->segment_type) { 370 case SEG_TYPE_SR: 371 case SEG_TYPE_ER: 372 case SEG_TYPE_SC: 373 set_disk_ro(dev_info->gd, 1); 374 } 375 } else if (inbuf[0] == '0') { 376 /* reload segments in exclusive mode */ 377 if (dev_info->segment_type == SEG_TYPE_SC) { 378 pr_err("DCSS %s is of type SC and cannot be " 379 "loaded as exclusive-writable\n", 380 dev_info->segment_name); 381 rc = -EINVAL; 382 goto out; 383 } 384 list_for_each_entry(entry, &dev_info->seg_list, lh) { 385 rc = segment_modify_shared(entry->segment_name, 386 SEGMENT_EXCLUSIVE); 387 if (rc < 0) { 388 BUG_ON(rc == -EINVAL); 389 if (rc != -EAGAIN) 390 goto removeseg; 391 } 392 } 393 dev_info->is_shared = 0; 394 set_disk_ro(dev_info->gd, 0); 395 } else { 396 rc = -EINVAL; 397 goto out; 398 } 399 rc = count; 400 goto out; 401 402 removeseg: 403 pr_err("DCSS device %s is removed after a failed access mode " 404 "change\n", dev_info->segment_name); 405 temp = entry; 406 list_for_each_entry(entry, &dev_info->seg_list, lh) { 407 if (entry != temp) 408 segment_unload(entry->segment_name); 409 } 410 list_del(&dev_info->lh); 411 412 kill_dax(dev_info->dax_dev); 413 put_dax(dev_info->dax_dev); 414 del_gendisk(dev_info->gd); 415 blk_cleanup_disk(dev_info->gd); 416 up_write(&dcssblk_devices_sem); 417 418 if (device_remove_file_self(dev, attr)) { 419 device_unregister(dev); 420 put_device(dev); 421 } 422 return rc; 423 out: 424 up_write(&dcssblk_devices_sem); 425 return rc; 426 } 427 static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show, 428 dcssblk_shared_store); 429 430 /* 431 * device attribute for save operation on current copy 432 * of the segment. If the segment is busy, saving will 433 * become pending until it gets released, which can be 434 * undone by storing a non-true value to this entry. 435 * (show + store) 436 */ 437 static ssize_t 438 dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf) 439 { 440 struct dcssblk_dev_info *dev_info; 441 442 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 443 return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n"); 444 } 445 446 static ssize_t 447 dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) 448 { 449 struct dcssblk_dev_info *dev_info; 450 struct segment_info *entry; 451 452 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) 453 return -EINVAL; 454 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 455 456 down_write(&dcssblk_devices_sem); 457 if (inbuf[0] == '1') { 458 if (atomic_read(&dev_info->use_count) == 0) { 459 // device is idle => we save immediately 460 pr_info("All DCSSs that map to device %s are " 461 "saved\n", dev_info->segment_name); 462 list_for_each_entry(entry, &dev_info->seg_list, lh) { 463 if (entry->segment_type == SEG_TYPE_EN || 464 entry->segment_type == SEG_TYPE_SN) 465 pr_warn("DCSS %s is of type SN or EN" 466 " and cannot be saved\n", 467 entry->segment_name); 468 else 469 segment_save(entry->segment_name); 470 } 471 } else { 472 // device is busy => we save it when it becomes 473 // idle in dcssblk_release 474 pr_info("Device %s is in use, its DCSSs will be " 475 "saved when it becomes idle\n", 476 dev_info->segment_name); 477 dev_info->save_pending = 1; 478 } 479 } else if (inbuf[0] == '0') { 480 if (dev_info->save_pending) { 481 // device is busy & the user wants to undo his save 482 // request 483 dev_info->save_pending = 0; 484 pr_info("A pending save request for device %s " 485 "has been canceled\n", 486 dev_info->segment_name); 487 } 488 } else { 489 up_write(&dcssblk_devices_sem); 490 return -EINVAL; 491 } 492 up_write(&dcssblk_devices_sem); 493 return count; 494 } 495 static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show, 496 dcssblk_save_store); 497 498 /* 499 * device attribute for showing all segments in a device 500 */ 501 static ssize_t 502 dcssblk_seglist_show(struct device *dev, struct device_attribute *attr, 503 char *buf) 504 { 505 int i; 506 507 struct dcssblk_dev_info *dev_info; 508 struct segment_info *entry; 509 510 down_read(&dcssblk_devices_sem); 511 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 512 i = 0; 513 buf[0] = '\0'; 514 list_for_each_entry(entry, &dev_info->seg_list, lh) { 515 strcpy(&buf[i], entry->segment_name); 516 i += strlen(entry->segment_name); 517 buf[i] = '\n'; 518 i++; 519 } 520 up_read(&dcssblk_devices_sem); 521 return i; 522 } 523 static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL); 524 525 static struct attribute *dcssblk_dev_attrs[] = { 526 &dev_attr_shared.attr, 527 &dev_attr_save.attr, 528 &dev_attr_seglist.attr, 529 NULL, 530 }; 531 static struct attribute_group dcssblk_dev_attr_group = { 532 .attrs = dcssblk_dev_attrs, 533 }; 534 static const struct attribute_group *dcssblk_dev_attr_groups[] = { 535 &dcssblk_dev_attr_group, 536 NULL, 537 }; 538 539 /* 540 * device attribute for adding devices 541 */ 542 static ssize_t 543 dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 544 { 545 int rc, i, j, num_of_segments; 546 struct dcssblk_dev_info *dev_info; 547 struct segment_info *seg_info, *temp; 548 char *local_buf; 549 unsigned long seg_byte_size; 550 551 dev_info = NULL; 552 seg_info = NULL; 553 if (dev != dcssblk_root_dev) { 554 rc = -EINVAL; 555 goto out_nobuf; 556 } 557 if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) { 558 rc = -ENAMETOOLONG; 559 goto out_nobuf; 560 } 561 562 local_buf = kmalloc(count + 1, GFP_KERNEL); 563 if (local_buf == NULL) { 564 rc = -ENOMEM; 565 goto out_nobuf; 566 } 567 568 /* 569 * parse input 570 */ 571 num_of_segments = 0; 572 for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) { 573 for (j = i; j < count && 574 (buf[j] != ':') && 575 (buf[j] != '\0') && 576 (buf[j] != '\n'); j++) { 577 local_buf[j-i] = toupper(buf[j]); 578 } 579 local_buf[j-i] = '\0'; 580 if (((j - i) == 0) || ((j - i) > 8)) { 581 rc = -ENAMETOOLONG; 582 goto seg_list_del; 583 } 584 585 rc = dcssblk_load_segment(local_buf, &seg_info); 586 if (rc < 0) 587 goto seg_list_del; 588 /* 589 * get a struct dcssblk_dev_info 590 */ 591 if (num_of_segments == 0) { 592 dev_info = kzalloc(sizeof(struct dcssblk_dev_info), 593 GFP_KERNEL); 594 if (dev_info == NULL) { 595 rc = -ENOMEM; 596 goto out; 597 } 598 strcpy(dev_info->segment_name, local_buf); 599 dev_info->segment_type = seg_info->segment_type; 600 INIT_LIST_HEAD(&dev_info->seg_list); 601 } 602 list_add_tail(&seg_info->lh, &dev_info->seg_list); 603 num_of_segments++; 604 i = j; 605 606 if ((buf[j] == '\0') || (buf[j] == '\n')) 607 break; 608 } 609 610 /* no trailing colon at the end of the input */ 611 if ((i > 0) && (buf[i-1] == ':')) { 612 rc = -ENAMETOOLONG; 613 goto seg_list_del; 614 } 615 strlcpy(local_buf, buf, i + 1); 616 dev_info->num_of_segments = num_of_segments; 617 rc = dcssblk_is_continuous(dev_info); 618 if (rc < 0) 619 goto seg_list_del; 620 621 dev_info->start = dcssblk_find_lowest_addr(dev_info); 622 dev_info->end = dcssblk_find_highest_addr(dev_info); 623 624 dev_set_name(&dev_info->dev, "%s", dev_info->segment_name); 625 dev_info->dev.release = dcssblk_release_segment; 626 dev_info->dev.groups = dcssblk_dev_attr_groups; 627 INIT_LIST_HEAD(&dev_info->lh); 628 dev_info->gd = blk_alloc_disk(NUMA_NO_NODE); 629 if (dev_info->gd == NULL) { 630 rc = -ENOMEM; 631 goto seg_list_del; 632 } 633 dev_info->gd->major = dcssblk_major; 634 dev_info->gd->minors = DCSSBLK_MINORS_PER_DISK; 635 dev_info->gd->fops = &dcssblk_devops; 636 dev_info->gd->private_data = dev_info; 637 blk_queue_logical_block_size(dev_info->gd->queue, 4096); 638 blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue); 639 640 seg_byte_size = (dev_info->end - dev_info->start + 1); 641 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors 642 pr_info("Loaded %s with total size %lu bytes and capacity %lu " 643 "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9); 644 645 dev_info->save_pending = 0; 646 dev_info->is_shared = 1; 647 dev_info->dev.parent = dcssblk_root_dev; 648 649 /* 650 *get minor, add to list 651 */ 652 down_write(&dcssblk_devices_sem); 653 if (dcssblk_get_segment_by_name(local_buf)) { 654 rc = -EEXIST; 655 goto release_gd; 656 } 657 rc = dcssblk_assign_free_minor(dev_info); 658 if (rc) 659 goto release_gd; 660 sprintf(dev_info->gd->disk_name, "dcssblk%d", 661 dev_info->gd->first_minor); 662 list_add_tail(&dev_info->lh, &dcssblk_devices); 663 664 if (!try_module_get(THIS_MODULE)) { 665 rc = -ENODEV; 666 goto dev_list_del; 667 } 668 /* 669 * register the device 670 */ 671 rc = device_register(&dev_info->dev); 672 if (rc) 673 goto put_dev; 674 675 dev_info->dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops); 676 if (IS_ERR(dev_info->dax_dev)) { 677 rc = PTR_ERR(dev_info->dax_dev); 678 dev_info->dax_dev = NULL; 679 goto put_dev; 680 } 681 set_dax_synchronous(dev_info->dax_dev); 682 rc = dax_add_host(dev_info->dax_dev, dev_info->gd); 683 if (rc) 684 goto out_dax; 685 686 get_device(&dev_info->dev); 687 rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL); 688 if (rc) 689 goto out_dax_host; 690 691 switch (dev_info->segment_type) { 692 case SEG_TYPE_SR: 693 case SEG_TYPE_ER: 694 case SEG_TYPE_SC: 695 set_disk_ro(dev_info->gd,1); 696 break; 697 default: 698 set_disk_ro(dev_info->gd,0); 699 break; 700 } 701 up_write(&dcssblk_devices_sem); 702 rc = count; 703 goto out; 704 705 out_dax_host: 706 dax_remove_host(dev_info->gd); 707 out_dax: 708 put_device(&dev_info->dev); 709 kill_dax(dev_info->dax_dev); 710 put_dax(dev_info->dax_dev); 711 put_dev: 712 list_del(&dev_info->lh); 713 blk_cleanup_disk(dev_info->gd); 714 list_for_each_entry(seg_info, &dev_info->seg_list, lh) { 715 segment_unload(seg_info->segment_name); 716 } 717 put_device(&dev_info->dev); 718 up_write(&dcssblk_devices_sem); 719 goto out; 720 dev_list_del: 721 list_del(&dev_info->lh); 722 release_gd: 723 blk_cleanup_disk(dev_info->gd); 724 up_write(&dcssblk_devices_sem); 725 seg_list_del: 726 if (dev_info == NULL) 727 goto out; 728 list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) { 729 list_del(&seg_info->lh); 730 segment_unload(seg_info->segment_name); 731 kfree(seg_info); 732 } 733 kfree(dev_info); 734 out: 735 kfree(local_buf); 736 out_nobuf: 737 return rc; 738 } 739 740 /* 741 * device attribute for removing devices 742 */ 743 static ssize_t 744 dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 745 { 746 struct dcssblk_dev_info *dev_info; 747 struct segment_info *entry; 748 int rc, i; 749 char *local_buf; 750 751 if (dev != dcssblk_root_dev) { 752 return -EINVAL; 753 } 754 local_buf = kmalloc(count + 1, GFP_KERNEL); 755 if (local_buf == NULL) { 756 return -ENOMEM; 757 } 758 /* 759 * parse input 760 */ 761 for (i = 0; (i < count && (*(buf+i)!='\0') && (*(buf+i)!='\n')); i++) { 762 local_buf[i] = toupper(buf[i]); 763 } 764 local_buf[i] = '\0'; 765 if ((i == 0) || (i > 8)) { 766 rc = -ENAMETOOLONG; 767 goto out_buf; 768 } 769 770 down_write(&dcssblk_devices_sem); 771 dev_info = dcssblk_get_device_by_name(local_buf); 772 if (dev_info == NULL) { 773 up_write(&dcssblk_devices_sem); 774 pr_warn("Device %s cannot be removed because it is not a known device\n", 775 local_buf); 776 rc = -ENODEV; 777 goto out_buf; 778 } 779 if (atomic_read(&dev_info->use_count) != 0) { 780 up_write(&dcssblk_devices_sem); 781 pr_warn("Device %s cannot be removed while it is in use\n", 782 local_buf); 783 rc = -EBUSY; 784 goto out_buf; 785 } 786 787 list_del(&dev_info->lh); 788 kill_dax(dev_info->dax_dev); 789 put_dax(dev_info->dax_dev); 790 del_gendisk(dev_info->gd); 791 blk_cleanup_disk(dev_info->gd); 792 793 /* unload all related segments */ 794 list_for_each_entry(entry, &dev_info->seg_list, lh) 795 segment_unload(entry->segment_name); 796 797 up_write(&dcssblk_devices_sem); 798 799 device_unregister(&dev_info->dev); 800 put_device(&dev_info->dev); 801 802 rc = count; 803 out_buf: 804 kfree(local_buf); 805 return rc; 806 } 807 808 static int 809 dcssblk_open(struct block_device *bdev, fmode_t mode) 810 { 811 struct dcssblk_dev_info *dev_info; 812 int rc; 813 814 dev_info = bdev->bd_disk->private_data; 815 if (NULL == dev_info) { 816 rc = -ENODEV; 817 goto out; 818 } 819 atomic_inc(&dev_info->use_count); 820 rc = 0; 821 out: 822 return rc; 823 } 824 825 static void 826 dcssblk_release(struct gendisk *disk, fmode_t mode) 827 { 828 struct dcssblk_dev_info *dev_info = disk->private_data; 829 struct segment_info *entry; 830 831 if (!dev_info) { 832 WARN_ON(1); 833 return; 834 } 835 down_write(&dcssblk_devices_sem); 836 if (atomic_dec_and_test(&dev_info->use_count) 837 && (dev_info->save_pending)) { 838 pr_info("Device %s has become idle and is being saved " 839 "now\n", dev_info->segment_name); 840 list_for_each_entry(entry, &dev_info->seg_list, lh) { 841 if (entry->segment_type == SEG_TYPE_EN || 842 entry->segment_type == SEG_TYPE_SN) 843 pr_warn("DCSS %s is of type SN or EN and cannot" 844 " be saved\n", entry->segment_name); 845 else 846 segment_save(entry->segment_name); 847 } 848 dev_info->save_pending = 0; 849 } 850 up_write(&dcssblk_devices_sem); 851 } 852 853 static void 854 dcssblk_submit_bio(struct bio *bio) 855 { 856 struct dcssblk_dev_info *dev_info; 857 struct bio_vec bvec; 858 struct bvec_iter iter; 859 unsigned long index; 860 unsigned long page_addr; 861 unsigned long source_addr; 862 unsigned long bytes_done; 863 864 blk_queue_split(&bio); 865 866 bytes_done = 0; 867 dev_info = bio->bi_bdev->bd_disk->private_data; 868 if (dev_info == NULL) 869 goto fail; 870 if ((bio->bi_iter.bi_sector & 7) != 0 || 871 (bio->bi_iter.bi_size & 4095) != 0) 872 /* Request is not page-aligned. */ 873 goto fail; 874 /* verify data transfer direction */ 875 if (dev_info->is_shared) { 876 switch (dev_info->segment_type) { 877 case SEG_TYPE_SR: 878 case SEG_TYPE_ER: 879 case SEG_TYPE_SC: 880 /* cannot write to these segments */ 881 if (bio_data_dir(bio) == WRITE) { 882 pr_warn("Writing to %s failed because it is a read-only device\n", 883 dev_name(&dev_info->dev)); 884 goto fail; 885 } 886 } 887 } 888 889 index = (bio->bi_iter.bi_sector >> 3); 890 bio_for_each_segment(bvec, bio, iter) { 891 page_addr = (unsigned long)bvec_virt(&bvec); 892 source_addr = dev_info->start + (index<<12) + bytes_done; 893 if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) 894 // More paranoia. 895 goto fail; 896 if (bio_data_dir(bio) == READ) { 897 memcpy((void*)page_addr, (void*)source_addr, 898 bvec.bv_len); 899 } else { 900 memcpy((void*)source_addr, (void*)page_addr, 901 bvec.bv_len); 902 } 903 bytes_done += bvec.bv_len; 904 } 905 bio_endio(bio); 906 return; 907 fail: 908 bio_io_error(bio); 909 } 910 911 static long 912 __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, 913 long nr_pages, void **kaddr, pfn_t *pfn) 914 { 915 resource_size_t offset = pgoff * PAGE_SIZE; 916 unsigned long dev_sz; 917 918 dev_sz = dev_info->end - dev_info->start + 1; 919 if (kaddr) 920 *kaddr = (void *) dev_info->start + offset; 921 if (pfn) 922 *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), 923 PFN_DEV|PFN_SPECIAL); 924 925 return (dev_sz - offset) / PAGE_SIZE; 926 } 927 928 static long 929 dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 930 long nr_pages, void **kaddr, pfn_t *pfn) 931 { 932 struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev); 933 934 return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn); 935 } 936 937 static void 938 dcssblk_check_params(void) 939 { 940 int rc, i, j, k; 941 char buf[DCSSBLK_PARM_LEN + 1]; 942 struct dcssblk_dev_info *dev_info; 943 944 for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0'); 945 i++) { 946 for (j = i; (j < DCSSBLK_PARM_LEN) && 947 (dcssblk_segments[j] != ',') && 948 (dcssblk_segments[j] != '\0') && 949 (dcssblk_segments[j] != '('); j++) 950 { 951 buf[j-i] = dcssblk_segments[j]; 952 } 953 buf[j-i] = '\0'; 954 rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i); 955 if ((rc >= 0) && (dcssblk_segments[j] == '(')) { 956 for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++) 957 buf[k] = toupper(buf[k]); 958 buf[k] = '\0'; 959 if (!strncmp(&dcssblk_segments[j], "(local)", 7)) { 960 down_read(&dcssblk_devices_sem); 961 dev_info = dcssblk_get_device_by_name(buf); 962 up_read(&dcssblk_devices_sem); 963 if (dev_info) 964 dcssblk_shared_store(&dev_info->dev, 965 NULL, "0\n", 2); 966 } 967 } 968 while ((dcssblk_segments[j] != ',') && 969 (dcssblk_segments[j] != '\0')) 970 { 971 j++; 972 } 973 if (dcssblk_segments[j] == '\0') 974 break; 975 i = j; 976 } 977 } 978 979 /* 980 * The init/exit functions. 981 */ 982 static void __exit 983 dcssblk_exit(void) 984 { 985 root_device_unregister(dcssblk_root_dev); 986 unregister_blkdev(dcssblk_major, DCSSBLK_NAME); 987 } 988 989 static int __init 990 dcssblk_init(void) 991 { 992 int rc; 993 994 dcssblk_root_dev = root_device_register("dcssblk"); 995 if (IS_ERR(dcssblk_root_dev)) 996 return PTR_ERR(dcssblk_root_dev); 997 rc = device_create_file(dcssblk_root_dev, &dev_attr_add); 998 if (rc) 999 goto out_root; 1000 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); 1001 if (rc) 1002 goto out_root; 1003 rc = register_blkdev(0, DCSSBLK_NAME); 1004 if (rc < 0) 1005 goto out_root; 1006 dcssblk_major = rc; 1007 init_rwsem(&dcssblk_devices_sem); 1008 1009 dcssblk_check_params(); 1010 return 0; 1011 1012 out_root: 1013 root_device_unregister(dcssblk_root_dev); 1014 1015 return rc; 1016 } 1017 1018 module_init(dcssblk_init); 1019 module_exit(dcssblk_exit); 1020 1021 module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444); 1022 MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, " 1023 "comma-separated list, names in each set separated " 1024 "by commas are separated by colons, each set contains " 1025 "names of contiguous segments and each name max. 8 chars.\n" 1026 "Adding \"(local)\" to the end of each set equals echoing 0 " 1027 "to /sys/devices/dcssblk/<device name>/shared after loading " 1028 "the contiguous segments - \n" 1029 "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\""); 1030 1031 MODULE_LICENSE("GPL"); 1032