1 /* 2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de> 3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com> 4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de> 5 * 6 * May be copied or modified under the terms of the GNU General Public 7 * License. See linux/COPYING for more information. 8 * 9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and 10 * DVD-RAM devices. 11 * 12 * Theory of operation: 13 * 14 * At the lowest level, there is the standard driver for the CD/DVD device, 15 * typically ide-cd.c or sr.c. This driver can handle read and write requests, 16 * but it doesn't know anything about the special restrictions that apply to 17 * packet writing. One restriction is that write requests must be aligned to 18 * packet boundaries on the physical media, and the size of a write request 19 * must be equal to the packet size. Another restriction is that a 20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read 21 * command, if the previous command was a write. 22 * 23 * The purpose of the packet writing driver is to hide these restrictions from 24 * higher layers, such as file systems, and present a block device that can be 25 * randomly read and written using 2kB-sized blocks. 26 * 27 * The lowest layer in the packet writing driver is the packet I/O scheduler. 28 * Its data is defined by the struct packet_iosched and includes two bio 29 * queues with pending read and write requests. These queues are processed 30 * by the pkt_iosched_process_queue() function. The write requests in this 31 * queue are already properly aligned and sized. This layer is responsible for 32 * issuing the flush cache commands and scheduling the I/O in a good order. 33 * 34 * The next layer transforms unaligned write requests to aligned writes. This 35 * transformation requires reading missing pieces of data from the underlying 36 * block device, assembling the pieces to full packets and queuing them to the 37 * packet I/O scheduler. 38 * 39 * At the top layer there is a custom ->submit_bio function that forwards 40 * read requests directly to the iosched queue and puts write requests in the 41 * unaligned write queue. A kernel thread performs the necessary read 42 * gathering to convert the unaligned writes to aligned writes and then feeds 43 * them to the packet I/O scheduler. 44 * 45 *************************************************************************/ 46 47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 48 49 #include <linux/pktcdvd.h> 50 #include <linux/module.h> 51 #include <linux/types.h> 52 #include <linux/kernel.h> 53 #include <linux/compat.h> 54 #include <linux/kthread.h> 55 #include <linux/errno.h> 56 #include <linux/spinlock.h> 57 #include <linux/file.h> 58 #include <linux/proc_fs.h> 59 #include <linux/seq_file.h> 60 #include <linux/miscdevice.h> 61 #include <linux/freezer.h> 62 #include <linux/mutex.h> 63 #include <linux/slab.h> 64 #include <linux/backing-dev.h> 65 #include <scsi/scsi_cmnd.h> 66 #include <scsi/scsi_ioctl.h> 67 #include <scsi/scsi.h> 68 #include <linux/debugfs.h> 69 #include <linux/device.h> 70 #include <linux/nospec.h> 71 #include <linux/uaccess.h> 72 73 #define DRIVER_NAME "pktcdvd" 74 75 #define pkt_err(pd, fmt, ...) \ 76 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__) 77 #define pkt_notice(pd, fmt, ...) \ 78 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__) 79 #define pkt_info(pd, fmt, ...) \ 80 pr_info("%s: " fmt, pd->name, ##__VA_ARGS__) 81 82 #define pkt_dbg(level, pd, fmt, ...) \ 83 do { \ 84 if (level == 2 && PACKET_DEBUG >= 2) \ 85 pr_notice("%s: %s():" fmt, \ 86 pd->name, __func__, ##__VA_ARGS__); \ 87 else if (level == 1 && PACKET_DEBUG >= 1) \ 88 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \ 89 } while (0) 90 91 #define MAX_SPEED 0xffff 92 93 static DEFINE_MUTEX(pktcdvd_mutex); 94 static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; 95 static struct proc_dir_entry *pkt_proc; 96 static int pktdev_major; 97 static int write_congestion_on = PKT_WRITE_CONGESTION_ON; 98 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF; 99 static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */ 100 static mempool_t psd_pool; 101 static struct bio_set pkt_bio_set; 102 103 static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */ 104 static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */ 105 106 /* forward declaration */ 107 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev); 108 static int pkt_remove_dev(dev_t pkt_dev); 109 static int pkt_seq_show(struct seq_file *m, void *p); 110 111 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) 112 { 113 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); 114 } 115 116 /* 117 * create and register a pktcdvd kernel object. 118 */ 119 static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd, 120 const char* name, 121 struct kobject* parent, 122 struct kobj_type* ktype) 123 { 124 struct pktcdvd_kobj *p; 125 int error; 126 127 p = kzalloc(sizeof(*p), GFP_KERNEL); 128 if (!p) 129 return NULL; 130 p->pd = pd; 131 error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name); 132 if (error) { 133 kobject_put(&p->kobj); 134 return NULL; 135 } 136 kobject_uevent(&p->kobj, KOBJ_ADD); 137 return p; 138 } 139 /* 140 * remove a pktcdvd kernel object. 141 */ 142 static void pkt_kobj_remove(struct pktcdvd_kobj *p) 143 { 144 if (p) 145 kobject_put(&p->kobj); 146 } 147 /* 148 * default release function for pktcdvd kernel objects. 149 */ 150 static void pkt_kobj_release(struct kobject *kobj) 151 { 152 kfree(to_pktcdvdkobj(kobj)); 153 } 154 155 156 /********************************************************** 157 * 158 * sysfs interface for pktcdvd 159 * by (C) 2006 Thomas Maier <balagi@justmail.de> 160 * 161 **********************************************************/ 162 163 #define DEF_ATTR(_obj,_name,_mode) \ 164 static struct attribute _obj = { .name = _name, .mode = _mode } 165 166 /********************************************************** 167 /sys/class/pktcdvd/pktcdvd[0-7]/ 168 stat/reset 169 stat/packets_started 170 stat/packets_finished 171 stat/kb_written 172 stat/kb_read 173 stat/kb_read_gather 174 write_queue/size 175 write_queue/congestion_off 176 write_queue/congestion_on 177 **********************************************************/ 178 179 DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200); 180 DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444); 181 DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444); 182 DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444); 183 DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444); 184 DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444); 185 186 static struct attribute *kobj_pkt_attrs_stat[] = { 187 &kobj_pkt_attr_st1, 188 &kobj_pkt_attr_st2, 189 &kobj_pkt_attr_st3, 190 &kobj_pkt_attr_st4, 191 &kobj_pkt_attr_st5, 192 &kobj_pkt_attr_st6, 193 NULL 194 }; 195 196 DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444); 197 DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644); 198 DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644); 199 200 static struct attribute *kobj_pkt_attrs_wqueue[] = { 201 &kobj_pkt_attr_wq1, 202 &kobj_pkt_attr_wq2, 203 &kobj_pkt_attr_wq3, 204 NULL 205 }; 206 207 static ssize_t kobj_pkt_show(struct kobject *kobj, 208 struct attribute *attr, char *data) 209 { 210 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd; 211 int n = 0; 212 int v; 213 if (strcmp(attr->name, "packets_started") == 0) { 214 n = sprintf(data, "%lu\n", pd->stats.pkt_started); 215 216 } else if (strcmp(attr->name, "packets_finished") == 0) { 217 n = sprintf(data, "%lu\n", pd->stats.pkt_ended); 218 219 } else if (strcmp(attr->name, "kb_written") == 0) { 220 n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1); 221 222 } else if (strcmp(attr->name, "kb_read") == 0) { 223 n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1); 224 225 } else if (strcmp(attr->name, "kb_read_gather") == 0) { 226 n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1); 227 228 } else if (strcmp(attr->name, "size") == 0) { 229 spin_lock(&pd->lock); 230 v = pd->bio_queue_size; 231 spin_unlock(&pd->lock); 232 n = sprintf(data, "%d\n", v); 233 234 } else if (strcmp(attr->name, "congestion_off") == 0) { 235 spin_lock(&pd->lock); 236 v = pd->write_congestion_off; 237 spin_unlock(&pd->lock); 238 n = sprintf(data, "%d\n", v); 239 240 } else if (strcmp(attr->name, "congestion_on") == 0) { 241 spin_lock(&pd->lock); 242 v = pd->write_congestion_on; 243 spin_unlock(&pd->lock); 244 n = sprintf(data, "%d\n", v); 245 } 246 return n; 247 } 248 249 static void init_write_congestion_marks(int* lo, int* hi) 250 { 251 if (*hi > 0) { 252 *hi = max(*hi, 500); 253 *hi = min(*hi, 1000000); 254 if (*lo <= 0) 255 *lo = *hi - 100; 256 else { 257 *lo = min(*lo, *hi - 100); 258 *lo = max(*lo, 100); 259 } 260 } else { 261 *hi = -1; 262 *lo = -1; 263 } 264 } 265 266 static ssize_t kobj_pkt_store(struct kobject *kobj, 267 struct attribute *attr, 268 const char *data, size_t len) 269 { 270 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd; 271 int val; 272 273 if (strcmp(attr->name, "reset") == 0 && len > 0) { 274 pd->stats.pkt_started = 0; 275 pd->stats.pkt_ended = 0; 276 pd->stats.secs_w = 0; 277 pd->stats.secs_rg = 0; 278 pd->stats.secs_r = 0; 279 280 } else if (strcmp(attr->name, "congestion_off") == 0 281 && sscanf(data, "%d", &val) == 1) { 282 spin_lock(&pd->lock); 283 pd->write_congestion_off = val; 284 init_write_congestion_marks(&pd->write_congestion_off, 285 &pd->write_congestion_on); 286 spin_unlock(&pd->lock); 287 288 } else if (strcmp(attr->name, "congestion_on") == 0 289 && sscanf(data, "%d", &val) == 1) { 290 spin_lock(&pd->lock); 291 pd->write_congestion_on = val; 292 init_write_congestion_marks(&pd->write_congestion_off, 293 &pd->write_congestion_on); 294 spin_unlock(&pd->lock); 295 } 296 return len; 297 } 298 299 static const struct sysfs_ops kobj_pkt_ops = { 300 .show = kobj_pkt_show, 301 .store = kobj_pkt_store 302 }; 303 static struct kobj_type kobj_pkt_type_stat = { 304 .release = pkt_kobj_release, 305 .sysfs_ops = &kobj_pkt_ops, 306 .default_attrs = kobj_pkt_attrs_stat 307 }; 308 static struct kobj_type kobj_pkt_type_wqueue = { 309 .release = pkt_kobj_release, 310 .sysfs_ops = &kobj_pkt_ops, 311 .default_attrs = kobj_pkt_attrs_wqueue 312 }; 313 314 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) 315 { 316 if (class_pktcdvd) { 317 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL, 318 "%s", pd->name); 319 if (IS_ERR(pd->dev)) 320 pd->dev = NULL; 321 } 322 if (pd->dev) { 323 pd->kobj_stat = pkt_kobj_create(pd, "stat", 324 &pd->dev->kobj, 325 &kobj_pkt_type_stat); 326 pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue", 327 &pd->dev->kobj, 328 &kobj_pkt_type_wqueue); 329 } 330 } 331 332 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) 333 { 334 pkt_kobj_remove(pd->kobj_stat); 335 pkt_kobj_remove(pd->kobj_wqueue); 336 if (class_pktcdvd) 337 device_unregister(pd->dev); 338 } 339 340 341 /******************************************************************** 342 /sys/class/pktcdvd/ 343 add map block device 344 remove unmap packet dev 345 device_map show mappings 346 *******************************************************************/ 347 348 static void class_pktcdvd_release(struct class *cls) 349 { 350 kfree(cls); 351 } 352 353 static ssize_t device_map_show(struct class *c, struct class_attribute *attr, 354 char *data) 355 { 356 int n = 0; 357 int idx; 358 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 359 for (idx = 0; idx < MAX_WRITERS; idx++) { 360 struct pktcdvd_device *pd = pkt_devs[idx]; 361 if (!pd) 362 continue; 363 n += sprintf(data+n, "%s %u:%u %u:%u\n", 364 pd->name, 365 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), 366 MAJOR(pd->bdev->bd_dev), 367 MINOR(pd->bdev->bd_dev)); 368 } 369 mutex_unlock(&ctl_mutex); 370 return n; 371 } 372 static CLASS_ATTR_RO(device_map); 373 374 static ssize_t add_store(struct class *c, struct class_attribute *attr, 375 const char *buf, size_t count) 376 { 377 unsigned int major, minor; 378 379 if (sscanf(buf, "%u:%u", &major, &minor) == 2) { 380 /* pkt_setup_dev() expects caller to hold reference to self */ 381 if (!try_module_get(THIS_MODULE)) 382 return -ENODEV; 383 384 pkt_setup_dev(MKDEV(major, minor), NULL); 385 386 module_put(THIS_MODULE); 387 388 return count; 389 } 390 391 return -EINVAL; 392 } 393 static CLASS_ATTR_WO(add); 394 395 static ssize_t remove_store(struct class *c, struct class_attribute *attr, 396 const char *buf, size_t count) 397 { 398 unsigned int major, minor; 399 if (sscanf(buf, "%u:%u", &major, &minor) == 2) { 400 pkt_remove_dev(MKDEV(major, minor)); 401 return count; 402 } 403 return -EINVAL; 404 } 405 static CLASS_ATTR_WO(remove); 406 407 static struct attribute *class_pktcdvd_attrs[] = { 408 &class_attr_add.attr, 409 &class_attr_remove.attr, 410 &class_attr_device_map.attr, 411 NULL, 412 }; 413 ATTRIBUTE_GROUPS(class_pktcdvd); 414 415 static int pkt_sysfs_init(void) 416 { 417 int ret = 0; 418 419 /* 420 * create control files in sysfs 421 * /sys/class/pktcdvd/... 422 */ 423 class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL); 424 if (!class_pktcdvd) 425 return -ENOMEM; 426 class_pktcdvd->name = DRIVER_NAME; 427 class_pktcdvd->owner = THIS_MODULE; 428 class_pktcdvd->class_release = class_pktcdvd_release; 429 class_pktcdvd->class_groups = class_pktcdvd_groups; 430 ret = class_register(class_pktcdvd); 431 if (ret) { 432 kfree(class_pktcdvd); 433 class_pktcdvd = NULL; 434 pr_err("failed to create class pktcdvd\n"); 435 return ret; 436 } 437 return 0; 438 } 439 440 static void pkt_sysfs_cleanup(void) 441 { 442 if (class_pktcdvd) 443 class_destroy(class_pktcdvd); 444 class_pktcdvd = NULL; 445 } 446 447 /******************************************************************** 448 entries in debugfs 449 450 /sys/kernel/debug/pktcdvd[0-7]/ 451 info 452 453 *******************************************************************/ 454 455 static int pkt_debugfs_seq_show(struct seq_file *m, void *p) 456 { 457 return pkt_seq_show(m, p); 458 } 459 460 static int pkt_debugfs_fops_open(struct inode *inode, struct file *file) 461 { 462 return single_open(file, pkt_debugfs_seq_show, inode->i_private); 463 } 464 465 static const struct file_operations debug_fops = { 466 .open = pkt_debugfs_fops_open, 467 .read = seq_read, 468 .llseek = seq_lseek, 469 .release = single_release, 470 .owner = THIS_MODULE, 471 }; 472 473 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) 474 { 475 if (!pkt_debugfs_root) 476 return; 477 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root); 478 if (!pd->dfs_d_root) 479 return; 480 481 pd->dfs_f_info = debugfs_create_file("info", 0444, 482 pd->dfs_d_root, pd, &debug_fops); 483 } 484 485 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) 486 { 487 if (!pkt_debugfs_root) 488 return; 489 debugfs_remove(pd->dfs_f_info); 490 debugfs_remove(pd->dfs_d_root); 491 pd->dfs_f_info = NULL; 492 pd->dfs_d_root = NULL; 493 } 494 495 static void pkt_debugfs_init(void) 496 { 497 pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL); 498 } 499 500 static void pkt_debugfs_cleanup(void) 501 { 502 debugfs_remove(pkt_debugfs_root); 503 pkt_debugfs_root = NULL; 504 } 505 506 /* ----------------------------------------------------------*/ 507 508 509 static void pkt_bio_finished(struct pktcdvd_device *pd) 510 { 511 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); 512 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { 513 pkt_dbg(2, pd, "queue empty\n"); 514 atomic_set(&pd->iosched.attention, 1); 515 wake_up(&pd->wqueue); 516 } 517 } 518 519 /* 520 * Allocate a packet_data struct 521 */ 522 static struct packet_data *pkt_alloc_packet_data(int frames) 523 { 524 int i; 525 struct packet_data *pkt; 526 527 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL); 528 if (!pkt) 529 goto no_pkt; 530 531 pkt->frames = frames; 532 pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames); 533 if (!pkt->w_bio) 534 goto no_bio; 535 536 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) { 537 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); 538 if (!pkt->pages[i]) 539 goto no_page; 540 } 541 542 spin_lock_init(&pkt->lock); 543 bio_list_init(&pkt->orig_bios); 544 545 for (i = 0; i < frames; i++) { 546 struct bio *bio = bio_kmalloc(GFP_KERNEL, 1); 547 if (!bio) 548 goto no_rd_bio; 549 550 pkt->r_bios[i] = bio; 551 } 552 553 return pkt; 554 555 no_rd_bio: 556 for (i = 0; i < frames; i++) { 557 struct bio *bio = pkt->r_bios[i]; 558 if (bio) 559 bio_put(bio); 560 } 561 562 no_page: 563 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) 564 if (pkt->pages[i]) 565 __free_page(pkt->pages[i]); 566 bio_put(pkt->w_bio); 567 no_bio: 568 kfree(pkt); 569 no_pkt: 570 return NULL; 571 } 572 573 /* 574 * Free a packet_data struct 575 */ 576 static void pkt_free_packet_data(struct packet_data *pkt) 577 { 578 int i; 579 580 for (i = 0; i < pkt->frames; i++) { 581 struct bio *bio = pkt->r_bios[i]; 582 if (bio) 583 bio_put(bio); 584 } 585 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++) 586 __free_page(pkt->pages[i]); 587 bio_put(pkt->w_bio); 588 kfree(pkt); 589 } 590 591 static void pkt_shrink_pktlist(struct pktcdvd_device *pd) 592 { 593 struct packet_data *pkt, *next; 594 595 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list)); 596 597 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { 598 pkt_free_packet_data(pkt); 599 } 600 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); 601 } 602 603 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) 604 { 605 struct packet_data *pkt; 606 607 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list)); 608 609 while (nr_packets > 0) { 610 pkt = pkt_alloc_packet_data(pd->settings.size >> 2); 611 if (!pkt) { 612 pkt_shrink_pktlist(pd); 613 return 0; 614 } 615 pkt->id = nr_packets; 616 pkt->pd = pd; 617 list_add(&pkt->list, &pd->cdrw.pkt_free_list); 618 nr_packets--; 619 } 620 return 1; 621 } 622 623 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) 624 { 625 struct rb_node *n = rb_next(&node->rb_node); 626 if (!n) 627 return NULL; 628 return rb_entry(n, struct pkt_rb_node, rb_node); 629 } 630 631 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node) 632 { 633 rb_erase(&node->rb_node, &pd->bio_queue); 634 mempool_free(node, &pd->rb_pool); 635 pd->bio_queue_size--; 636 BUG_ON(pd->bio_queue_size < 0); 637 } 638 639 /* 640 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s. 641 */ 642 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s) 643 { 644 struct rb_node *n = pd->bio_queue.rb_node; 645 struct rb_node *next; 646 struct pkt_rb_node *tmp; 647 648 if (!n) { 649 BUG_ON(pd->bio_queue_size > 0); 650 return NULL; 651 } 652 653 for (;;) { 654 tmp = rb_entry(n, struct pkt_rb_node, rb_node); 655 if (s <= tmp->bio->bi_iter.bi_sector) 656 next = n->rb_left; 657 else 658 next = n->rb_right; 659 if (!next) 660 break; 661 n = next; 662 } 663 664 if (s > tmp->bio->bi_iter.bi_sector) { 665 tmp = pkt_rbtree_next(tmp); 666 if (!tmp) 667 return NULL; 668 } 669 BUG_ON(s > tmp->bio->bi_iter.bi_sector); 670 return tmp; 671 } 672 673 /* 674 * Insert a node into the pd->bio_queue rb tree. 675 */ 676 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node) 677 { 678 struct rb_node **p = &pd->bio_queue.rb_node; 679 struct rb_node *parent = NULL; 680 sector_t s = node->bio->bi_iter.bi_sector; 681 struct pkt_rb_node *tmp; 682 683 while (*p) { 684 parent = *p; 685 tmp = rb_entry(parent, struct pkt_rb_node, rb_node); 686 if (s < tmp->bio->bi_iter.bi_sector) 687 p = &(*p)->rb_left; 688 else 689 p = &(*p)->rb_right; 690 } 691 rb_link_node(&node->rb_node, parent, p); 692 rb_insert_color(&node->rb_node, &pd->bio_queue); 693 pd->bio_queue_size++; 694 } 695 696 /* 697 * Send a packet_command to the underlying block device and 698 * wait for completion. 699 */ 700 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) 701 { 702 struct request_queue *q = bdev_get_queue(pd->bdev); 703 struct request *rq; 704 int ret = 0; 705 706 rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? 707 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 708 if (IS_ERR(rq)) 709 return PTR_ERR(rq); 710 711 if (cgc->buflen) { 712 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, 713 GFP_NOIO); 714 if (ret) 715 goto out; 716 } 717 718 scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]); 719 memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE); 720 721 rq->timeout = 60*HZ; 722 if (cgc->quiet) 723 rq->rq_flags |= RQF_QUIET; 724 725 blk_execute_rq(pd->bdev->bd_disk, rq, 0); 726 if (scsi_req(rq)->result) 727 ret = -EIO; 728 out: 729 blk_mq_free_request(rq); 730 return ret; 731 } 732 733 static const char *sense_key_string(__u8 index) 734 { 735 static const char * const info[] = { 736 "No sense", "Recovered error", "Not ready", 737 "Medium error", "Hardware error", "Illegal request", 738 "Unit attention", "Data protect", "Blank check", 739 }; 740 741 return index < ARRAY_SIZE(info) ? info[index] : "INVALID"; 742 } 743 744 /* 745 * A generic sense dump / resolve mechanism should be implemented across 746 * all ATAPI + SCSI devices. 747 */ 748 static void pkt_dump_sense(struct pktcdvd_device *pd, 749 struct packet_command *cgc) 750 { 751 struct scsi_sense_hdr *sshdr = cgc->sshdr; 752 753 if (sshdr) 754 pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n", 755 CDROM_PACKET_SIZE, cgc->cmd, 756 sshdr->sense_key, sshdr->asc, sshdr->ascq, 757 sense_key_string(sshdr->sense_key)); 758 else 759 pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); 760 } 761 762 /* 763 * flush the drive cache to media 764 */ 765 static int pkt_flush_cache(struct pktcdvd_device *pd) 766 { 767 struct packet_command cgc; 768 769 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 770 cgc.cmd[0] = GPCMD_FLUSH_CACHE; 771 cgc.quiet = 1; 772 773 /* 774 * the IMMED bit -- we default to not setting it, although that 775 * would allow a much faster close, this is safer 776 */ 777 #if 0 778 cgc.cmd[1] = 1 << 1; 779 #endif 780 return pkt_generic_packet(pd, &cgc); 781 } 782 783 /* 784 * speed is given as the normal factor, e.g. 4 for 4x 785 */ 786 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, 787 unsigned write_speed, unsigned read_speed) 788 { 789 struct packet_command cgc; 790 struct scsi_sense_hdr sshdr; 791 int ret; 792 793 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 794 cgc.sshdr = &sshdr; 795 cgc.cmd[0] = GPCMD_SET_SPEED; 796 cgc.cmd[2] = (read_speed >> 8) & 0xff; 797 cgc.cmd[3] = read_speed & 0xff; 798 cgc.cmd[4] = (write_speed >> 8) & 0xff; 799 cgc.cmd[5] = write_speed & 0xff; 800 801 ret = pkt_generic_packet(pd, &cgc); 802 if (ret) 803 pkt_dump_sense(pd, &cgc); 804 805 return ret; 806 } 807 808 /* 809 * Queue a bio for processing by the low-level CD device. Must be called 810 * from process context. 811 */ 812 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) 813 { 814 spin_lock(&pd->iosched.lock); 815 if (bio_data_dir(bio) == READ) 816 bio_list_add(&pd->iosched.read_queue, bio); 817 else 818 bio_list_add(&pd->iosched.write_queue, bio); 819 spin_unlock(&pd->iosched.lock); 820 821 atomic_set(&pd->iosched.attention, 1); 822 wake_up(&pd->wqueue); 823 } 824 825 /* 826 * Process the queued read/write requests. This function handles special 827 * requirements for CDRW drives: 828 * - A cache flush command must be inserted before a read request if the 829 * previous request was a write. 830 * - Switching between reading and writing is slow, so don't do it more often 831 * than necessary. 832 * - Optimize for throughput at the expense of latency. This means that streaming 833 * writes will never be interrupted by a read, but if the drive has to seek 834 * before the next write, switch to reading instead if there are any pending 835 * read requests. 836 * - Set the read speed according to current usage pattern. When only reading 837 * from the device, it's best to use the highest possible read speed, but 838 * when switching often between reading and writing, it's better to have the 839 * same read and write speeds. 840 */ 841 static void pkt_iosched_process_queue(struct pktcdvd_device *pd) 842 { 843 844 if (atomic_read(&pd->iosched.attention) == 0) 845 return; 846 atomic_set(&pd->iosched.attention, 0); 847 848 for (;;) { 849 struct bio *bio; 850 int reads_queued, writes_queued; 851 852 spin_lock(&pd->iosched.lock); 853 reads_queued = !bio_list_empty(&pd->iosched.read_queue); 854 writes_queued = !bio_list_empty(&pd->iosched.write_queue); 855 spin_unlock(&pd->iosched.lock); 856 857 if (!reads_queued && !writes_queued) 858 break; 859 860 if (pd->iosched.writing) { 861 int need_write_seek = 1; 862 spin_lock(&pd->iosched.lock); 863 bio = bio_list_peek(&pd->iosched.write_queue); 864 spin_unlock(&pd->iosched.lock); 865 if (bio && (bio->bi_iter.bi_sector == 866 pd->iosched.last_write)) 867 need_write_seek = 0; 868 if (need_write_seek && reads_queued) { 869 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 870 pkt_dbg(2, pd, "write, waiting\n"); 871 break; 872 } 873 pkt_flush_cache(pd); 874 pd->iosched.writing = 0; 875 } 876 } else { 877 if (!reads_queued && writes_queued) { 878 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 879 pkt_dbg(2, pd, "read, waiting\n"); 880 break; 881 } 882 pd->iosched.writing = 1; 883 } 884 } 885 886 spin_lock(&pd->iosched.lock); 887 if (pd->iosched.writing) 888 bio = bio_list_pop(&pd->iosched.write_queue); 889 else 890 bio = bio_list_pop(&pd->iosched.read_queue); 891 spin_unlock(&pd->iosched.lock); 892 893 if (!bio) 894 continue; 895 896 if (bio_data_dir(bio) == READ) 897 pd->iosched.successive_reads += 898 bio->bi_iter.bi_size >> 10; 899 else { 900 pd->iosched.successive_reads = 0; 901 pd->iosched.last_write = bio_end_sector(bio); 902 } 903 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { 904 if (pd->read_speed == pd->write_speed) { 905 pd->read_speed = MAX_SPEED; 906 pkt_set_speed(pd, pd->write_speed, pd->read_speed); 907 } 908 } else { 909 if (pd->read_speed != pd->write_speed) { 910 pd->read_speed = pd->write_speed; 911 pkt_set_speed(pd, pd->write_speed, pd->read_speed); 912 } 913 } 914 915 atomic_inc(&pd->cdrw.pending_bios); 916 submit_bio_noacct(bio); 917 } 918 } 919 920 /* 921 * Special care is needed if the underlying block device has a small 922 * max_phys_segments value. 923 */ 924 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) 925 { 926 if ((pd->settings.size << 9) / CD_FRAMESIZE 927 <= queue_max_segments(q)) { 928 /* 929 * The cdrom device can handle one segment/frame 930 */ 931 clear_bit(PACKET_MERGE_SEGS, &pd->flags); 932 return 0; 933 } else if ((pd->settings.size << 9) / PAGE_SIZE 934 <= queue_max_segments(q)) { 935 /* 936 * We can handle this case at the expense of some extra memory 937 * copies during write operations 938 */ 939 set_bit(PACKET_MERGE_SEGS, &pd->flags); 940 return 0; 941 } else { 942 pkt_err(pd, "cdrom max_phys_segments too small\n"); 943 return -EIO; 944 } 945 } 946 947 static void pkt_end_io_read(struct bio *bio) 948 { 949 struct packet_data *pkt = bio->bi_private; 950 struct pktcdvd_device *pd = pkt->pd; 951 BUG_ON(!pd); 952 953 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", 954 bio, (unsigned long long)pkt->sector, 955 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status); 956 957 if (bio->bi_status) 958 atomic_inc(&pkt->io_errors); 959 if (atomic_dec_and_test(&pkt->io_wait)) { 960 atomic_inc(&pkt->run_sm); 961 wake_up(&pd->wqueue); 962 } 963 pkt_bio_finished(pd); 964 } 965 966 static void pkt_end_io_packet_write(struct bio *bio) 967 { 968 struct packet_data *pkt = bio->bi_private; 969 struct pktcdvd_device *pd = pkt->pd; 970 BUG_ON(!pd); 971 972 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status); 973 974 pd->stats.pkt_ended++; 975 976 pkt_bio_finished(pd); 977 atomic_dec(&pkt->io_wait); 978 atomic_inc(&pkt->run_sm); 979 wake_up(&pd->wqueue); 980 } 981 982 /* 983 * Schedule reads for the holes in a packet 984 */ 985 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) 986 { 987 int frames_read = 0; 988 struct bio *bio; 989 int f; 990 char written[PACKET_MAX_SIZE]; 991 992 BUG_ON(bio_list_empty(&pkt->orig_bios)); 993 994 atomic_set(&pkt->io_wait, 0); 995 atomic_set(&pkt->io_errors, 0); 996 997 /* 998 * Figure out which frames we need to read before we can write. 999 */ 1000 memset(written, 0, sizeof(written)); 1001 spin_lock(&pkt->lock); 1002 bio_list_for_each(bio, &pkt->orig_bios) { 1003 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / 1004 (CD_FRAMESIZE >> 9); 1005 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE; 1006 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); 1007 BUG_ON(first_frame < 0); 1008 BUG_ON(first_frame + num_frames > pkt->frames); 1009 for (f = first_frame; f < first_frame + num_frames; f++) 1010 written[f] = 1; 1011 } 1012 spin_unlock(&pkt->lock); 1013 1014 if (pkt->cache_valid) { 1015 pkt_dbg(2, pd, "zone %llx cached\n", 1016 (unsigned long long)pkt->sector); 1017 goto out_account; 1018 } 1019 1020 /* 1021 * Schedule reads for missing parts of the packet. 1022 */ 1023 for (f = 0; f < pkt->frames; f++) { 1024 int p, offset; 1025 1026 if (written[f]) 1027 continue; 1028 1029 bio = pkt->r_bios[f]; 1030 bio_reset(bio); 1031 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1032 bio_set_dev(bio, pd->bdev); 1033 bio->bi_end_io = pkt_end_io_read; 1034 bio->bi_private = pkt; 1035 1036 p = (f * CD_FRAMESIZE) / PAGE_SIZE; 1037 offset = (f * CD_FRAMESIZE) % PAGE_SIZE; 1038 pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n", 1039 f, pkt->pages[p], offset); 1040 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) 1041 BUG(); 1042 1043 atomic_inc(&pkt->io_wait); 1044 bio_set_op_attrs(bio, REQ_OP_READ, 0); 1045 pkt_queue_bio(pd, bio); 1046 frames_read++; 1047 } 1048 1049 out_account: 1050 pkt_dbg(2, pd, "need %d frames for zone %llx\n", 1051 frames_read, (unsigned long long)pkt->sector); 1052 pd->stats.pkt_started++; 1053 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); 1054 } 1055 1056 /* 1057 * Find a packet matching zone, or the least recently used packet if 1058 * there is no match. 1059 */ 1060 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone) 1061 { 1062 struct packet_data *pkt; 1063 1064 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) { 1065 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) { 1066 list_del_init(&pkt->list); 1067 if (pkt->sector != zone) 1068 pkt->cache_valid = 0; 1069 return pkt; 1070 } 1071 } 1072 BUG(); 1073 return NULL; 1074 } 1075 1076 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) 1077 { 1078 if (pkt->cache_valid) { 1079 list_add(&pkt->list, &pd->cdrw.pkt_free_list); 1080 } else { 1081 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list); 1082 } 1083 } 1084 1085 static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state) 1086 { 1087 #if PACKET_DEBUG > 1 1088 static const char *state_name[] = { 1089 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED" 1090 }; 1091 enum packet_data_state old_state = pkt->state; 1092 pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n", 1093 pkt->id, (unsigned long long)pkt->sector, 1094 state_name[old_state], state_name[state]); 1095 #endif 1096 pkt->state = state; 1097 } 1098 1099 /* 1100 * Scan the work queue to see if we can start a new packet. 1101 * returns non-zero if any work was done. 1102 */ 1103 static int pkt_handle_queue(struct pktcdvd_device *pd) 1104 { 1105 struct packet_data *pkt, *p; 1106 struct bio *bio = NULL; 1107 sector_t zone = 0; /* Suppress gcc warning */ 1108 struct pkt_rb_node *node, *first_node; 1109 struct rb_node *n; 1110 int wakeup; 1111 1112 atomic_set(&pd->scan_queue, 0); 1113 1114 if (list_empty(&pd->cdrw.pkt_free_list)) { 1115 pkt_dbg(2, pd, "no pkt\n"); 1116 return 0; 1117 } 1118 1119 /* 1120 * Try to find a zone we are not already working on. 1121 */ 1122 spin_lock(&pd->lock); 1123 first_node = pkt_rbtree_find(pd, pd->current_sector); 1124 if (!first_node) { 1125 n = rb_first(&pd->bio_queue); 1126 if (n) 1127 first_node = rb_entry(n, struct pkt_rb_node, rb_node); 1128 } 1129 node = first_node; 1130 while (node) { 1131 bio = node->bio; 1132 zone = get_zone(bio->bi_iter.bi_sector, pd); 1133 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { 1134 if (p->sector == zone) { 1135 bio = NULL; 1136 goto try_next_bio; 1137 } 1138 } 1139 break; 1140 try_next_bio: 1141 node = pkt_rbtree_next(node); 1142 if (!node) { 1143 n = rb_first(&pd->bio_queue); 1144 if (n) 1145 node = rb_entry(n, struct pkt_rb_node, rb_node); 1146 } 1147 if (node == first_node) 1148 node = NULL; 1149 } 1150 spin_unlock(&pd->lock); 1151 if (!bio) { 1152 pkt_dbg(2, pd, "no bio\n"); 1153 return 0; 1154 } 1155 1156 pkt = pkt_get_packet_data(pd, zone); 1157 1158 pd->current_sector = zone + pd->settings.size; 1159 pkt->sector = zone; 1160 BUG_ON(pkt->frames != pd->settings.size >> 2); 1161 pkt->write_size = 0; 1162 1163 /* 1164 * Scan work queue for bios in the same zone and link them 1165 * to this packet. 1166 */ 1167 spin_lock(&pd->lock); 1168 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); 1169 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { 1170 bio = node->bio; 1171 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long) 1172 get_zone(bio->bi_iter.bi_sector, pd)); 1173 if (get_zone(bio->bi_iter.bi_sector, pd) != zone) 1174 break; 1175 pkt_rbtree_erase(pd, node); 1176 spin_lock(&pkt->lock); 1177 bio_list_add(&pkt->orig_bios, bio); 1178 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE; 1179 spin_unlock(&pkt->lock); 1180 } 1181 /* check write congestion marks, and if bio_queue_size is 1182 below, wake up any waiters */ 1183 wakeup = (pd->write_congestion_on > 0 1184 && pd->bio_queue_size <= pd->write_congestion_off); 1185 spin_unlock(&pd->lock); 1186 if (wakeup) 1187 clear_bdi_congested(pd->disk->bdi, BLK_RW_ASYNC); 1188 1189 pkt->sleep_time = max(PACKET_WAIT_TIME, 1); 1190 pkt_set_state(pkt, PACKET_WAITING_STATE); 1191 atomic_set(&pkt->run_sm, 1); 1192 1193 spin_lock(&pd->cdrw.active_list_lock); 1194 list_add(&pkt->list, &pd->cdrw.pkt_active_list); 1195 spin_unlock(&pd->cdrw.active_list_lock); 1196 1197 return 1; 1198 } 1199 1200 /** 1201 * bio_list_copy_data - copy contents of data buffers from one chain of bios to 1202 * another 1203 * @src: source bio list 1204 * @dst: destination bio list 1205 * 1206 * Stops when it reaches the end of either the @src list or @dst list - that is, 1207 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of 1208 * bios). 1209 */ 1210 static void bio_list_copy_data(struct bio *dst, struct bio *src) 1211 { 1212 struct bvec_iter src_iter = src->bi_iter; 1213 struct bvec_iter dst_iter = dst->bi_iter; 1214 1215 while (1) { 1216 if (!src_iter.bi_size) { 1217 src = src->bi_next; 1218 if (!src) 1219 break; 1220 1221 src_iter = src->bi_iter; 1222 } 1223 1224 if (!dst_iter.bi_size) { 1225 dst = dst->bi_next; 1226 if (!dst) 1227 break; 1228 1229 dst_iter = dst->bi_iter; 1230 } 1231 1232 bio_copy_data_iter(dst, &dst_iter, src, &src_iter); 1233 } 1234 } 1235 1236 /* 1237 * Assemble a bio to write one packet and queue the bio for processing 1238 * by the underlying block device. 1239 */ 1240 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) 1241 { 1242 int f; 1243 1244 bio_reset(pkt->w_bio); 1245 pkt->w_bio->bi_iter.bi_sector = pkt->sector; 1246 bio_set_dev(pkt->w_bio, pd->bdev); 1247 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1248 pkt->w_bio->bi_private = pkt; 1249 1250 /* XXX: locking? */ 1251 for (f = 0; f < pkt->frames; f++) { 1252 struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; 1253 unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE; 1254 1255 if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset)) 1256 BUG(); 1257 } 1258 pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt); 1259 1260 /* 1261 * Fill-in bvec with data from orig_bios. 1262 */ 1263 spin_lock(&pkt->lock); 1264 bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head); 1265 1266 pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE); 1267 spin_unlock(&pkt->lock); 1268 1269 pkt_dbg(2, pd, "Writing %d frames for zone %llx\n", 1270 pkt->write_size, (unsigned long long)pkt->sector); 1271 1272 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) 1273 pkt->cache_valid = 1; 1274 else 1275 pkt->cache_valid = 0; 1276 1277 /* Start the write request */ 1278 atomic_set(&pkt->io_wait, 1); 1279 bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0); 1280 pkt_queue_bio(pd, pkt->w_bio); 1281 } 1282 1283 static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status) 1284 { 1285 struct bio *bio; 1286 1287 if (status) 1288 pkt->cache_valid = 0; 1289 1290 /* Finish all bios corresponding to this packet */ 1291 while ((bio = bio_list_pop(&pkt->orig_bios))) { 1292 bio->bi_status = status; 1293 bio_endio(bio); 1294 } 1295 } 1296 1297 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) 1298 { 1299 pkt_dbg(2, pd, "pkt %d\n", pkt->id); 1300 1301 for (;;) { 1302 switch (pkt->state) { 1303 case PACKET_WAITING_STATE: 1304 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0)) 1305 return; 1306 1307 pkt->sleep_time = 0; 1308 pkt_gather_data(pd, pkt); 1309 pkt_set_state(pkt, PACKET_READ_WAIT_STATE); 1310 break; 1311 1312 case PACKET_READ_WAIT_STATE: 1313 if (atomic_read(&pkt->io_wait) > 0) 1314 return; 1315 1316 if (atomic_read(&pkt->io_errors) > 0) { 1317 pkt_set_state(pkt, PACKET_RECOVERY_STATE); 1318 } else { 1319 pkt_start_write(pd, pkt); 1320 } 1321 break; 1322 1323 case PACKET_WRITE_WAIT_STATE: 1324 if (atomic_read(&pkt->io_wait) > 0) 1325 return; 1326 1327 if (!pkt->w_bio->bi_status) { 1328 pkt_set_state(pkt, PACKET_FINISHED_STATE); 1329 } else { 1330 pkt_set_state(pkt, PACKET_RECOVERY_STATE); 1331 } 1332 break; 1333 1334 case PACKET_RECOVERY_STATE: 1335 pkt_dbg(2, pd, "No recovery possible\n"); 1336 pkt_set_state(pkt, PACKET_FINISHED_STATE); 1337 break; 1338 1339 case PACKET_FINISHED_STATE: 1340 pkt_finish_packet(pkt, pkt->w_bio->bi_status); 1341 return; 1342 1343 default: 1344 BUG(); 1345 break; 1346 } 1347 } 1348 } 1349 1350 static void pkt_handle_packets(struct pktcdvd_device *pd) 1351 { 1352 struct packet_data *pkt, *next; 1353 1354 /* 1355 * Run state machine for active packets 1356 */ 1357 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1358 if (atomic_read(&pkt->run_sm) > 0) { 1359 atomic_set(&pkt->run_sm, 0); 1360 pkt_run_state_machine(pd, pkt); 1361 } 1362 } 1363 1364 /* 1365 * Move no longer active packets to the free list 1366 */ 1367 spin_lock(&pd->cdrw.active_list_lock); 1368 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) { 1369 if (pkt->state == PACKET_FINISHED_STATE) { 1370 list_del(&pkt->list); 1371 pkt_put_packet_data(pd, pkt); 1372 pkt_set_state(pkt, PACKET_IDLE_STATE); 1373 atomic_set(&pd->scan_queue, 1); 1374 } 1375 } 1376 spin_unlock(&pd->cdrw.active_list_lock); 1377 } 1378 1379 static void pkt_count_states(struct pktcdvd_device *pd, int *states) 1380 { 1381 struct packet_data *pkt; 1382 int i; 1383 1384 for (i = 0; i < PACKET_NUM_STATES; i++) 1385 states[i] = 0; 1386 1387 spin_lock(&pd->cdrw.active_list_lock); 1388 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1389 states[pkt->state]++; 1390 } 1391 spin_unlock(&pd->cdrw.active_list_lock); 1392 } 1393 1394 /* 1395 * kcdrwd is woken up when writes have been queued for one of our 1396 * registered devices 1397 */ 1398 static int kcdrwd(void *foobar) 1399 { 1400 struct pktcdvd_device *pd = foobar; 1401 struct packet_data *pkt; 1402 long min_sleep_time, residue; 1403 1404 set_user_nice(current, MIN_NICE); 1405 set_freezable(); 1406 1407 for (;;) { 1408 DECLARE_WAITQUEUE(wait, current); 1409 1410 /* 1411 * Wait until there is something to do 1412 */ 1413 add_wait_queue(&pd->wqueue, &wait); 1414 for (;;) { 1415 set_current_state(TASK_INTERRUPTIBLE); 1416 1417 /* Check if we need to run pkt_handle_queue */ 1418 if (atomic_read(&pd->scan_queue) > 0) 1419 goto work_to_do; 1420 1421 /* Check if we need to run the state machine for some packet */ 1422 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1423 if (atomic_read(&pkt->run_sm) > 0) 1424 goto work_to_do; 1425 } 1426 1427 /* Check if we need to process the iosched queues */ 1428 if (atomic_read(&pd->iosched.attention) != 0) 1429 goto work_to_do; 1430 1431 /* Otherwise, go to sleep */ 1432 if (PACKET_DEBUG > 1) { 1433 int states[PACKET_NUM_STATES]; 1434 pkt_count_states(pd, states); 1435 pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", 1436 states[0], states[1], states[2], 1437 states[3], states[4], states[5]); 1438 } 1439 1440 min_sleep_time = MAX_SCHEDULE_TIMEOUT; 1441 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1442 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time) 1443 min_sleep_time = pkt->sleep_time; 1444 } 1445 1446 pkt_dbg(2, pd, "sleeping\n"); 1447 residue = schedule_timeout(min_sleep_time); 1448 pkt_dbg(2, pd, "wake up\n"); 1449 1450 /* make swsusp happy with our thread */ 1451 try_to_freeze(); 1452 1453 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1454 if (!pkt->sleep_time) 1455 continue; 1456 pkt->sleep_time -= min_sleep_time - residue; 1457 if (pkt->sleep_time <= 0) { 1458 pkt->sleep_time = 0; 1459 atomic_inc(&pkt->run_sm); 1460 } 1461 } 1462 1463 if (kthread_should_stop()) 1464 break; 1465 } 1466 work_to_do: 1467 set_current_state(TASK_RUNNING); 1468 remove_wait_queue(&pd->wqueue, &wait); 1469 1470 if (kthread_should_stop()) 1471 break; 1472 1473 /* 1474 * if pkt_handle_queue returns true, we can queue 1475 * another request. 1476 */ 1477 while (pkt_handle_queue(pd)) 1478 ; 1479 1480 /* 1481 * Handle packet state machine 1482 */ 1483 pkt_handle_packets(pd); 1484 1485 /* 1486 * Handle iosched queues 1487 */ 1488 pkt_iosched_process_queue(pd); 1489 } 1490 1491 return 0; 1492 } 1493 1494 static void pkt_print_settings(struct pktcdvd_device *pd) 1495 { 1496 pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n", 1497 pd->settings.fp ? "Fixed" : "Variable", 1498 pd->settings.size >> 2, 1499 pd->settings.block_mode == 8 ? '1' : '2'); 1500 } 1501 1502 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control) 1503 { 1504 memset(cgc->cmd, 0, sizeof(cgc->cmd)); 1505 1506 cgc->cmd[0] = GPCMD_MODE_SENSE_10; 1507 cgc->cmd[2] = page_code | (page_control << 6); 1508 cgc->cmd[7] = cgc->buflen >> 8; 1509 cgc->cmd[8] = cgc->buflen & 0xff; 1510 cgc->data_direction = CGC_DATA_READ; 1511 return pkt_generic_packet(pd, cgc); 1512 } 1513 1514 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc) 1515 { 1516 memset(cgc->cmd, 0, sizeof(cgc->cmd)); 1517 memset(cgc->buffer, 0, 2); 1518 cgc->cmd[0] = GPCMD_MODE_SELECT_10; 1519 cgc->cmd[1] = 0x10; /* PF */ 1520 cgc->cmd[7] = cgc->buflen >> 8; 1521 cgc->cmd[8] = cgc->buflen & 0xff; 1522 cgc->data_direction = CGC_DATA_WRITE; 1523 return pkt_generic_packet(pd, cgc); 1524 } 1525 1526 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di) 1527 { 1528 struct packet_command cgc; 1529 int ret; 1530 1531 /* set up command and get the disc info */ 1532 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ); 1533 cgc.cmd[0] = GPCMD_READ_DISC_INFO; 1534 cgc.cmd[8] = cgc.buflen = 2; 1535 cgc.quiet = 1; 1536 1537 ret = pkt_generic_packet(pd, &cgc); 1538 if (ret) 1539 return ret; 1540 1541 /* not all drives have the same disc_info length, so requeue 1542 * packet with the length the drive tells us it can supply 1543 */ 1544 cgc.buflen = be16_to_cpu(di->disc_information_length) + 1545 sizeof(di->disc_information_length); 1546 1547 if (cgc.buflen > sizeof(disc_information)) 1548 cgc.buflen = sizeof(disc_information); 1549 1550 cgc.cmd[8] = cgc.buflen; 1551 return pkt_generic_packet(pd, &cgc); 1552 } 1553 1554 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti) 1555 { 1556 struct packet_command cgc; 1557 int ret; 1558 1559 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); 1560 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO; 1561 cgc.cmd[1] = type & 3; 1562 cgc.cmd[4] = (track & 0xff00) >> 8; 1563 cgc.cmd[5] = track & 0xff; 1564 cgc.cmd[8] = 8; 1565 cgc.quiet = 1; 1566 1567 ret = pkt_generic_packet(pd, &cgc); 1568 if (ret) 1569 return ret; 1570 1571 cgc.buflen = be16_to_cpu(ti->track_information_length) + 1572 sizeof(ti->track_information_length); 1573 1574 if (cgc.buflen > sizeof(track_information)) 1575 cgc.buflen = sizeof(track_information); 1576 1577 cgc.cmd[8] = cgc.buflen; 1578 return pkt_generic_packet(pd, &cgc); 1579 } 1580 1581 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, 1582 long *last_written) 1583 { 1584 disc_information di; 1585 track_information ti; 1586 __u32 last_track; 1587 int ret; 1588 1589 ret = pkt_get_disc_info(pd, &di); 1590 if (ret) 1591 return ret; 1592 1593 last_track = (di.last_track_msb << 8) | di.last_track_lsb; 1594 ret = pkt_get_track_info(pd, last_track, 1, &ti); 1595 if (ret) 1596 return ret; 1597 1598 /* if this track is blank, try the previous. */ 1599 if (ti.blank) { 1600 last_track--; 1601 ret = pkt_get_track_info(pd, last_track, 1, &ti); 1602 if (ret) 1603 return ret; 1604 } 1605 1606 /* if last recorded field is valid, return it. */ 1607 if (ti.lra_v) { 1608 *last_written = be32_to_cpu(ti.last_rec_address); 1609 } else { 1610 /* make it up instead */ 1611 *last_written = be32_to_cpu(ti.track_start) + 1612 be32_to_cpu(ti.track_size); 1613 if (ti.free_blocks) 1614 *last_written -= (be32_to_cpu(ti.free_blocks) + 7); 1615 } 1616 return 0; 1617 } 1618 1619 /* 1620 * write mode select package based on pd->settings 1621 */ 1622 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) 1623 { 1624 struct packet_command cgc; 1625 struct scsi_sense_hdr sshdr; 1626 write_param_page *wp; 1627 char buffer[128]; 1628 int ret, size; 1629 1630 /* doesn't apply to DVD+RW or DVD-RAM */ 1631 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12)) 1632 return 0; 1633 1634 memset(buffer, 0, sizeof(buffer)); 1635 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ); 1636 cgc.sshdr = &sshdr; 1637 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); 1638 if (ret) { 1639 pkt_dump_sense(pd, &cgc); 1640 return ret; 1641 } 1642 1643 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff)); 1644 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff); 1645 if (size > sizeof(buffer)) 1646 size = sizeof(buffer); 1647 1648 /* 1649 * now get it all 1650 */ 1651 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ); 1652 cgc.sshdr = &sshdr; 1653 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); 1654 if (ret) { 1655 pkt_dump_sense(pd, &cgc); 1656 return ret; 1657 } 1658 1659 /* 1660 * write page is offset header + block descriptor length 1661 */ 1662 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset]; 1663 1664 wp->fp = pd->settings.fp; 1665 wp->track_mode = pd->settings.track_mode; 1666 wp->write_type = pd->settings.write_type; 1667 wp->data_block_type = pd->settings.block_mode; 1668 1669 wp->multi_session = 0; 1670 1671 #ifdef PACKET_USE_LS 1672 wp->link_size = 7; 1673 wp->ls_v = 1; 1674 #endif 1675 1676 if (wp->data_block_type == PACKET_BLOCK_MODE1) { 1677 wp->session_format = 0; 1678 wp->subhdr2 = 0x20; 1679 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) { 1680 wp->session_format = 0x20; 1681 wp->subhdr2 = 8; 1682 #if 0 1683 wp->mcn[0] = 0x80; 1684 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1); 1685 #endif 1686 } else { 1687 /* 1688 * paranoia 1689 */ 1690 pkt_err(pd, "write mode wrong %d\n", wp->data_block_type); 1691 return 1; 1692 } 1693 wp->packet_size = cpu_to_be32(pd->settings.size >> 2); 1694 1695 cgc.buflen = cgc.cmd[8] = size; 1696 ret = pkt_mode_select(pd, &cgc); 1697 if (ret) { 1698 pkt_dump_sense(pd, &cgc); 1699 return ret; 1700 } 1701 1702 pkt_print_settings(pd); 1703 return 0; 1704 } 1705 1706 /* 1707 * 1 -- we can write to this track, 0 -- we can't 1708 */ 1709 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) 1710 { 1711 switch (pd->mmc3_profile) { 1712 case 0x1a: /* DVD+RW */ 1713 case 0x12: /* DVD-RAM */ 1714 /* The track is always writable on DVD+RW/DVD-RAM */ 1715 return 1; 1716 default: 1717 break; 1718 } 1719 1720 if (!ti->packet || !ti->fp) 1721 return 0; 1722 1723 /* 1724 * "good" settings as per Mt Fuji. 1725 */ 1726 if (ti->rt == 0 && ti->blank == 0) 1727 return 1; 1728 1729 if (ti->rt == 0 && ti->blank == 1) 1730 return 1; 1731 1732 if (ti->rt == 1 && ti->blank == 0) 1733 return 1; 1734 1735 pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); 1736 return 0; 1737 } 1738 1739 /* 1740 * 1 -- we can write to this disc, 0 -- we can't 1741 */ 1742 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) 1743 { 1744 switch (pd->mmc3_profile) { 1745 case 0x0a: /* CD-RW */ 1746 case 0xffff: /* MMC3 not supported */ 1747 break; 1748 case 0x1a: /* DVD+RW */ 1749 case 0x13: /* DVD-RW */ 1750 case 0x12: /* DVD-RAM */ 1751 return 1; 1752 default: 1753 pkt_dbg(2, pd, "Wrong disc profile (%x)\n", 1754 pd->mmc3_profile); 1755 return 0; 1756 } 1757 1758 /* 1759 * for disc type 0xff we should probably reserve a new track. 1760 * but i'm not sure, should we leave this to user apps? probably. 1761 */ 1762 if (di->disc_type == 0xff) { 1763 pkt_notice(pd, "unknown disc - no track?\n"); 1764 return 0; 1765 } 1766 1767 if (di->disc_type != 0x20 && di->disc_type != 0) { 1768 pkt_err(pd, "wrong disc type (%x)\n", di->disc_type); 1769 return 0; 1770 } 1771 1772 if (di->erasable == 0) { 1773 pkt_notice(pd, "disc not erasable\n"); 1774 return 0; 1775 } 1776 1777 if (di->border_status == PACKET_SESSION_RESERVED) { 1778 pkt_err(pd, "can't write to last track (reserved)\n"); 1779 return 0; 1780 } 1781 1782 return 1; 1783 } 1784 1785 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) 1786 { 1787 struct packet_command cgc; 1788 unsigned char buf[12]; 1789 disc_information di; 1790 track_information ti; 1791 int ret, track; 1792 1793 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); 1794 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; 1795 cgc.cmd[8] = 8; 1796 ret = pkt_generic_packet(pd, &cgc); 1797 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7]; 1798 1799 memset(&di, 0, sizeof(disc_information)); 1800 memset(&ti, 0, sizeof(track_information)); 1801 1802 ret = pkt_get_disc_info(pd, &di); 1803 if (ret) { 1804 pkt_err(pd, "failed get_disc\n"); 1805 return ret; 1806 } 1807 1808 if (!pkt_writable_disc(pd, &di)) 1809 return -EROFS; 1810 1811 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; 1812 1813 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ 1814 ret = pkt_get_track_info(pd, track, 1, &ti); 1815 if (ret) { 1816 pkt_err(pd, "failed get_track\n"); 1817 return ret; 1818 } 1819 1820 if (!pkt_writable_track(pd, &ti)) { 1821 pkt_err(pd, "can't write to this track\n"); 1822 return -EROFS; 1823 } 1824 1825 /* 1826 * we keep packet size in 512 byte units, makes it easier to 1827 * deal with request calculations. 1828 */ 1829 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; 1830 if (pd->settings.size == 0) { 1831 pkt_notice(pd, "detected zero packet size!\n"); 1832 return -ENXIO; 1833 } 1834 if (pd->settings.size > PACKET_MAX_SECTORS) { 1835 pkt_err(pd, "packet size is too big\n"); 1836 return -EROFS; 1837 } 1838 pd->settings.fp = ti.fp; 1839 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); 1840 1841 if (ti.nwa_v) { 1842 pd->nwa = be32_to_cpu(ti.next_writable); 1843 set_bit(PACKET_NWA_VALID, &pd->flags); 1844 } 1845 1846 /* 1847 * in theory we could use lra on -RW media as well and just zero 1848 * blocks that haven't been written yet, but in practice that 1849 * is just a no-go. we'll use that for -R, naturally. 1850 */ 1851 if (ti.lra_v) { 1852 pd->lra = be32_to_cpu(ti.last_rec_address); 1853 set_bit(PACKET_LRA_VALID, &pd->flags); 1854 } else { 1855 pd->lra = 0xffffffff; 1856 set_bit(PACKET_LRA_VALID, &pd->flags); 1857 } 1858 1859 /* 1860 * fine for now 1861 */ 1862 pd->settings.link_loss = 7; 1863 pd->settings.write_type = 0; /* packet */ 1864 pd->settings.track_mode = ti.track_mode; 1865 1866 /* 1867 * mode1 or mode2 disc 1868 */ 1869 switch (ti.data_mode) { 1870 case PACKET_MODE1: 1871 pd->settings.block_mode = PACKET_BLOCK_MODE1; 1872 break; 1873 case PACKET_MODE2: 1874 pd->settings.block_mode = PACKET_BLOCK_MODE2; 1875 break; 1876 default: 1877 pkt_err(pd, "unknown data mode\n"); 1878 return -EROFS; 1879 } 1880 return 0; 1881 } 1882 1883 /* 1884 * enable/disable write caching on drive 1885 */ 1886 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, 1887 int set) 1888 { 1889 struct packet_command cgc; 1890 struct scsi_sense_hdr sshdr; 1891 unsigned char buf[64]; 1892 int ret; 1893 1894 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); 1895 cgc.sshdr = &sshdr; 1896 cgc.buflen = pd->mode_offset + 12; 1897 1898 /* 1899 * caching mode page might not be there, so quiet this command 1900 */ 1901 cgc.quiet = 1; 1902 1903 ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0); 1904 if (ret) 1905 return ret; 1906 1907 buf[pd->mode_offset + 10] |= (!!set << 2); 1908 1909 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff)); 1910 ret = pkt_mode_select(pd, &cgc); 1911 if (ret) { 1912 pkt_err(pd, "write caching control failed\n"); 1913 pkt_dump_sense(pd, &cgc); 1914 } else if (!ret && set) 1915 pkt_notice(pd, "enabled write caching\n"); 1916 return ret; 1917 } 1918 1919 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) 1920 { 1921 struct packet_command cgc; 1922 1923 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 1924 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; 1925 cgc.cmd[4] = lockflag ? 1 : 0; 1926 return pkt_generic_packet(pd, &cgc); 1927 } 1928 1929 /* 1930 * Returns drive maximum write speed 1931 */ 1932 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, 1933 unsigned *write_speed) 1934 { 1935 struct packet_command cgc; 1936 struct scsi_sense_hdr sshdr; 1937 unsigned char buf[256+18]; 1938 unsigned char *cap_buf; 1939 int ret, offset; 1940 1941 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; 1942 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN); 1943 cgc.sshdr = &sshdr; 1944 1945 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); 1946 if (ret) { 1947 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 + 1948 sizeof(struct mode_page_header); 1949 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); 1950 if (ret) { 1951 pkt_dump_sense(pd, &cgc); 1952 return ret; 1953 } 1954 } 1955 1956 offset = 20; /* Obsoleted field, used by older drives */ 1957 if (cap_buf[1] >= 28) 1958 offset = 28; /* Current write speed selected */ 1959 if (cap_buf[1] >= 30) { 1960 /* If the drive reports at least one "Logical Unit Write 1961 * Speed Performance Descriptor Block", use the information 1962 * in the first block. (contains the highest speed) 1963 */ 1964 int num_spdb = (cap_buf[30] << 8) + cap_buf[31]; 1965 if (num_spdb > 0) 1966 offset = 34; 1967 } 1968 1969 *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1]; 1970 return 0; 1971 } 1972 1973 /* These tables from cdrecord - I don't have orange book */ 1974 /* standard speed CD-RW (1-4x) */ 1975 static char clv_to_speed[16] = { 1976 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ 1977 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 1978 }; 1979 /* high speed CD-RW (-10x) */ 1980 static char hs_clv_to_speed[16] = { 1981 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ 1982 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 1983 }; 1984 /* ultra high speed CD-RW */ 1985 static char us_clv_to_speed[16] = { 1986 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ 1987 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0 1988 }; 1989 1990 /* 1991 * reads the maximum media speed from ATIP 1992 */ 1993 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, 1994 unsigned *speed) 1995 { 1996 struct packet_command cgc; 1997 struct scsi_sense_hdr sshdr; 1998 unsigned char buf[64]; 1999 unsigned int size, st, sp; 2000 int ret; 2001 2002 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ); 2003 cgc.sshdr = &sshdr; 2004 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; 2005 cgc.cmd[1] = 2; 2006 cgc.cmd[2] = 4; /* READ ATIP */ 2007 cgc.cmd[8] = 2; 2008 ret = pkt_generic_packet(pd, &cgc); 2009 if (ret) { 2010 pkt_dump_sense(pd, &cgc); 2011 return ret; 2012 } 2013 size = ((unsigned int) buf[0]<<8) + buf[1] + 2; 2014 if (size > sizeof(buf)) 2015 size = sizeof(buf); 2016 2017 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); 2018 cgc.sshdr = &sshdr; 2019 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; 2020 cgc.cmd[1] = 2; 2021 cgc.cmd[2] = 4; 2022 cgc.cmd[8] = size; 2023 ret = pkt_generic_packet(pd, &cgc); 2024 if (ret) { 2025 pkt_dump_sense(pd, &cgc); 2026 return ret; 2027 } 2028 2029 if (!(buf[6] & 0x40)) { 2030 pkt_notice(pd, "disc type is not CD-RW\n"); 2031 return 1; 2032 } 2033 if (!(buf[6] & 0x4)) { 2034 pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n"); 2035 return 1; 2036 } 2037 2038 st = (buf[6] >> 3) & 0x7; /* disc sub-type */ 2039 2040 sp = buf[16] & 0xf; /* max speed from ATIP A1 field */ 2041 2042 /* Info from cdrecord */ 2043 switch (st) { 2044 case 0: /* standard speed */ 2045 *speed = clv_to_speed[sp]; 2046 break; 2047 case 1: /* high speed */ 2048 *speed = hs_clv_to_speed[sp]; 2049 break; 2050 case 2: /* ultra high speed */ 2051 *speed = us_clv_to_speed[sp]; 2052 break; 2053 default: 2054 pkt_notice(pd, "unknown disc sub-type %d\n", st); 2055 return 1; 2056 } 2057 if (*speed) { 2058 pkt_info(pd, "maximum media speed: %d\n", *speed); 2059 return 0; 2060 } else { 2061 pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st); 2062 return 1; 2063 } 2064 } 2065 2066 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) 2067 { 2068 struct packet_command cgc; 2069 struct scsi_sense_hdr sshdr; 2070 int ret; 2071 2072 pkt_dbg(2, pd, "Performing OPC\n"); 2073 2074 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 2075 cgc.sshdr = &sshdr; 2076 cgc.timeout = 60*HZ; 2077 cgc.cmd[0] = GPCMD_SEND_OPC; 2078 cgc.cmd[1] = 1; 2079 ret = pkt_generic_packet(pd, &cgc); 2080 if (ret) 2081 pkt_dump_sense(pd, &cgc); 2082 return ret; 2083 } 2084 2085 static int pkt_open_write(struct pktcdvd_device *pd) 2086 { 2087 int ret; 2088 unsigned int write_speed, media_write_speed, read_speed; 2089 2090 ret = pkt_probe_settings(pd); 2091 if (ret) { 2092 pkt_dbg(2, pd, "failed probe\n"); 2093 return ret; 2094 } 2095 2096 ret = pkt_set_write_settings(pd); 2097 if (ret) { 2098 pkt_dbg(1, pd, "failed saving write settings\n"); 2099 return -EIO; 2100 } 2101 2102 pkt_write_caching(pd, USE_WCACHING); 2103 2104 ret = pkt_get_max_speed(pd, &write_speed); 2105 if (ret) 2106 write_speed = 16 * 177; 2107 switch (pd->mmc3_profile) { 2108 case 0x13: /* DVD-RW */ 2109 case 0x1a: /* DVD+RW */ 2110 case 0x12: /* DVD-RAM */ 2111 pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed); 2112 break; 2113 default: 2114 ret = pkt_media_speed(pd, &media_write_speed); 2115 if (ret) 2116 media_write_speed = 16; 2117 write_speed = min(write_speed, media_write_speed * 177); 2118 pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176); 2119 break; 2120 } 2121 read_speed = write_speed; 2122 2123 ret = pkt_set_speed(pd, write_speed, read_speed); 2124 if (ret) { 2125 pkt_dbg(1, pd, "couldn't set write speed\n"); 2126 return -EIO; 2127 } 2128 pd->write_speed = write_speed; 2129 pd->read_speed = read_speed; 2130 2131 ret = pkt_perform_opc(pd); 2132 if (ret) { 2133 pkt_dbg(1, pd, "Optimum Power Calibration failed\n"); 2134 } 2135 2136 return 0; 2137 } 2138 2139 /* 2140 * called at open time. 2141 */ 2142 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) 2143 { 2144 int ret; 2145 long lba; 2146 struct request_queue *q; 2147 struct block_device *bdev; 2148 2149 /* 2150 * We need to re-open the cdrom device without O_NONBLOCK to be able 2151 * to read/write from/to it. It is already opened in O_NONBLOCK mode 2152 * so open should not fail. 2153 */ 2154 bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd); 2155 if (IS_ERR(bdev)) { 2156 ret = PTR_ERR(bdev); 2157 goto out; 2158 } 2159 2160 ret = pkt_get_last_written(pd, &lba); 2161 if (ret) { 2162 pkt_err(pd, "pkt_get_last_written failed\n"); 2163 goto out_putdev; 2164 } 2165 2166 set_capacity(pd->disk, lba << 2); 2167 set_capacity_and_notify(pd->bdev->bd_disk, lba << 2); 2168 2169 q = bdev_get_queue(pd->bdev); 2170 if (write) { 2171 ret = pkt_open_write(pd); 2172 if (ret) 2173 goto out_putdev; 2174 /* 2175 * Some CDRW drives can not handle writes larger than one packet, 2176 * even if the size is a multiple of the packet size. 2177 */ 2178 blk_queue_max_hw_sectors(q, pd->settings.size); 2179 set_bit(PACKET_WRITABLE, &pd->flags); 2180 } else { 2181 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); 2182 clear_bit(PACKET_WRITABLE, &pd->flags); 2183 } 2184 2185 ret = pkt_set_segment_merging(pd, q); 2186 if (ret) 2187 goto out_putdev; 2188 2189 if (write) { 2190 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { 2191 pkt_err(pd, "not enough memory for buffers\n"); 2192 ret = -ENOMEM; 2193 goto out_putdev; 2194 } 2195 pkt_info(pd, "%lukB available on disc\n", lba << 1); 2196 } 2197 2198 return 0; 2199 2200 out_putdev: 2201 blkdev_put(bdev, FMODE_READ | FMODE_EXCL); 2202 out: 2203 return ret; 2204 } 2205 2206 /* 2207 * called when the device is closed. makes sure that the device flushes 2208 * the internal cache before we close. 2209 */ 2210 static void pkt_release_dev(struct pktcdvd_device *pd, int flush) 2211 { 2212 if (flush && pkt_flush_cache(pd)) 2213 pkt_dbg(1, pd, "not flushing cache\n"); 2214 2215 pkt_lock_door(pd, 0); 2216 2217 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); 2218 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); 2219 2220 pkt_shrink_pktlist(pd); 2221 } 2222 2223 static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) 2224 { 2225 if (dev_minor >= MAX_WRITERS) 2226 return NULL; 2227 2228 dev_minor = array_index_nospec(dev_minor, MAX_WRITERS); 2229 return pkt_devs[dev_minor]; 2230 } 2231 2232 static int pkt_open(struct block_device *bdev, fmode_t mode) 2233 { 2234 struct pktcdvd_device *pd = NULL; 2235 int ret; 2236 2237 mutex_lock(&pktcdvd_mutex); 2238 mutex_lock(&ctl_mutex); 2239 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); 2240 if (!pd) { 2241 ret = -ENODEV; 2242 goto out; 2243 } 2244 BUG_ON(pd->refcnt < 0); 2245 2246 pd->refcnt++; 2247 if (pd->refcnt > 1) { 2248 if ((mode & FMODE_WRITE) && 2249 !test_bit(PACKET_WRITABLE, &pd->flags)) { 2250 ret = -EBUSY; 2251 goto out_dec; 2252 } 2253 } else { 2254 ret = pkt_open_dev(pd, mode & FMODE_WRITE); 2255 if (ret) 2256 goto out_dec; 2257 /* 2258 * needed here as well, since ext2 (among others) may change 2259 * the blocksize at mount time 2260 */ 2261 set_blocksize(bdev, CD_FRAMESIZE); 2262 } 2263 2264 mutex_unlock(&ctl_mutex); 2265 mutex_unlock(&pktcdvd_mutex); 2266 return 0; 2267 2268 out_dec: 2269 pd->refcnt--; 2270 out: 2271 mutex_unlock(&ctl_mutex); 2272 mutex_unlock(&pktcdvd_mutex); 2273 return ret; 2274 } 2275 2276 static void pkt_close(struct gendisk *disk, fmode_t mode) 2277 { 2278 struct pktcdvd_device *pd = disk->private_data; 2279 2280 mutex_lock(&pktcdvd_mutex); 2281 mutex_lock(&ctl_mutex); 2282 pd->refcnt--; 2283 BUG_ON(pd->refcnt < 0); 2284 if (pd->refcnt == 0) { 2285 int flush = test_bit(PACKET_WRITABLE, &pd->flags); 2286 pkt_release_dev(pd, flush); 2287 } 2288 mutex_unlock(&ctl_mutex); 2289 mutex_unlock(&pktcdvd_mutex); 2290 } 2291 2292 2293 static void pkt_end_io_read_cloned(struct bio *bio) 2294 { 2295 struct packet_stacked_data *psd = bio->bi_private; 2296 struct pktcdvd_device *pd = psd->pd; 2297 2298 psd->bio->bi_status = bio->bi_status; 2299 bio_put(bio); 2300 bio_endio(psd->bio); 2301 mempool_free(psd, &psd_pool); 2302 pkt_bio_finished(pd); 2303 } 2304 2305 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) 2306 { 2307 struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, &pkt_bio_set); 2308 struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO); 2309 2310 psd->pd = pd; 2311 psd->bio = bio; 2312 bio_set_dev(cloned_bio, pd->bdev); 2313 cloned_bio->bi_private = psd; 2314 cloned_bio->bi_end_io = pkt_end_io_read_cloned; 2315 pd->stats.secs_r += bio_sectors(bio); 2316 pkt_queue_bio(pd, cloned_bio); 2317 } 2318 2319 static void pkt_make_request_write(struct request_queue *q, struct bio *bio) 2320 { 2321 struct pktcdvd_device *pd = q->queuedata; 2322 sector_t zone; 2323 struct packet_data *pkt; 2324 int was_empty, blocked_bio; 2325 struct pkt_rb_node *node; 2326 2327 zone = get_zone(bio->bi_iter.bi_sector, pd); 2328 2329 /* 2330 * If we find a matching packet in state WAITING or READ_WAIT, we can 2331 * just append this bio to that packet. 2332 */ 2333 spin_lock(&pd->cdrw.active_list_lock); 2334 blocked_bio = 0; 2335 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 2336 if (pkt->sector == zone) { 2337 spin_lock(&pkt->lock); 2338 if ((pkt->state == PACKET_WAITING_STATE) || 2339 (pkt->state == PACKET_READ_WAIT_STATE)) { 2340 bio_list_add(&pkt->orig_bios, bio); 2341 pkt->write_size += 2342 bio->bi_iter.bi_size / CD_FRAMESIZE; 2343 if ((pkt->write_size >= pkt->frames) && 2344 (pkt->state == PACKET_WAITING_STATE)) { 2345 atomic_inc(&pkt->run_sm); 2346 wake_up(&pd->wqueue); 2347 } 2348 spin_unlock(&pkt->lock); 2349 spin_unlock(&pd->cdrw.active_list_lock); 2350 return; 2351 } else { 2352 blocked_bio = 1; 2353 } 2354 spin_unlock(&pkt->lock); 2355 } 2356 } 2357 spin_unlock(&pd->cdrw.active_list_lock); 2358 2359 /* 2360 * Test if there is enough room left in the bio work queue 2361 * (queue size >= congestion on mark). 2362 * If not, wait till the work queue size is below the congestion off mark. 2363 */ 2364 spin_lock(&pd->lock); 2365 if (pd->write_congestion_on > 0 2366 && pd->bio_queue_size >= pd->write_congestion_on) { 2367 set_bdi_congested(bio->bi_bdev->bd_disk->bdi, BLK_RW_ASYNC); 2368 do { 2369 spin_unlock(&pd->lock); 2370 congestion_wait(BLK_RW_ASYNC, HZ); 2371 spin_lock(&pd->lock); 2372 } while(pd->bio_queue_size > pd->write_congestion_off); 2373 } 2374 spin_unlock(&pd->lock); 2375 2376 /* 2377 * No matching packet found. Store the bio in the work queue. 2378 */ 2379 node = mempool_alloc(&pd->rb_pool, GFP_NOIO); 2380 node->bio = bio; 2381 spin_lock(&pd->lock); 2382 BUG_ON(pd->bio_queue_size < 0); 2383 was_empty = (pd->bio_queue_size == 0); 2384 pkt_rbtree_insert(pd, node); 2385 spin_unlock(&pd->lock); 2386 2387 /* 2388 * Wake up the worker thread. 2389 */ 2390 atomic_set(&pd->scan_queue, 1); 2391 if (was_empty) { 2392 /* This wake_up is required for correct operation */ 2393 wake_up(&pd->wqueue); 2394 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) { 2395 /* 2396 * This wake up is not required for correct operation, 2397 * but improves performance in some cases. 2398 */ 2399 wake_up(&pd->wqueue); 2400 } 2401 } 2402 2403 static void pkt_submit_bio(struct bio *bio) 2404 { 2405 struct pktcdvd_device *pd; 2406 char b[BDEVNAME_SIZE]; 2407 struct bio *split; 2408 2409 blk_queue_split(&bio); 2410 2411 pd = bio->bi_bdev->bd_disk->queue->queuedata; 2412 if (!pd) { 2413 pr_err("%s incorrect request queue\n", bio_devname(bio, b)); 2414 goto end_io; 2415 } 2416 2417 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", 2418 (unsigned long long)bio->bi_iter.bi_sector, 2419 (unsigned long long)bio_end_sector(bio)); 2420 2421 /* 2422 * Clone READ bios so we can have our own bi_end_io callback. 2423 */ 2424 if (bio_data_dir(bio) == READ) { 2425 pkt_make_request_read(pd, bio); 2426 return; 2427 } 2428 2429 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { 2430 pkt_notice(pd, "WRITE for ro device (%llu)\n", 2431 (unsigned long long)bio->bi_iter.bi_sector); 2432 goto end_io; 2433 } 2434 2435 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { 2436 pkt_err(pd, "wrong bio size\n"); 2437 goto end_io; 2438 } 2439 2440 do { 2441 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); 2442 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); 2443 2444 if (last_zone != zone) { 2445 BUG_ON(last_zone != zone + pd->settings.size); 2446 2447 split = bio_split(bio, last_zone - 2448 bio->bi_iter.bi_sector, 2449 GFP_NOIO, &pkt_bio_set); 2450 bio_chain(split, bio); 2451 } else { 2452 split = bio; 2453 } 2454 2455 pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split); 2456 } while (split != bio); 2457 2458 return; 2459 end_io: 2460 bio_io_error(bio); 2461 } 2462 2463 static void pkt_init_queue(struct pktcdvd_device *pd) 2464 { 2465 struct request_queue *q = pd->disk->queue; 2466 2467 blk_queue_logical_block_size(q, CD_FRAMESIZE); 2468 blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); 2469 q->queuedata = pd; 2470 } 2471 2472 static int pkt_seq_show(struct seq_file *m, void *p) 2473 { 2474 struct pktcdvd_device *pd = m->private; 2475 char *msg; 2476 char bdev_buf[BDEVNAME_SIZE]; 2477 int states[PACKET_NUM_STATES]; 2478 2479 seq_printf(m, "Writer %s mapped to %s:\n", pd->name, 2480 bdevname(pd->bdev, bdev_buf)); 2481 2482 seq_printf(m, "\nSettings:\n"); 2483 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); 2484 2485 if (pd->settings.write_type == 0) 2486 msg = "Packet"; 2487 else 2488 msg = "Unknown"; 2489 seq_printf(m, "\twrite type:\t\t%s\n", msg); 2490 2491 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); 2492 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); 2493 2494 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); 2495 2496 if (pd->settings.block_mode == PACKET_BLOCK_MODE1) 2497 msg = "Mode 1"; 2498 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) 2499 msg = "Mode 2"; 2500 else 2501 msg = "Unknown"; 2502 seq_printf(m, "\tblock mode:\t\t%s\n", msg); 2503 2504 seq_printf(m, "\nStatistics:\n"); 2505 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); 2506 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); 2507 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); 2508 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); 2509 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); 2510 2511 seq_printf(m, "\nMisc:\n"); 2512 seq_printf(m, "\treference count:\t%d\n", pd->refcnt); 2513 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); 2514 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); 2515 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); 2516 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); 2517 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); 2518 2519 seq_printf(m, "\nQueue state:\n"); 2520 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); 2521 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); 2522 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector); 2523 2524 pkt_count_states(pd, states); 2525 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", 2526 states[0], states[1], states[2], states[3], states[4], states[5]); 2527 2528 seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n", 2529 pd->write_congestion_off, 2530 pd->write_congestion_on); 2531 return 0; 2532 } 2533 2534 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) 2535 { 2536 int i; 2537 char b[BDEVNAME_SIZE]; 2538 struct block_device *bdev; 2539 struct scsi_device *sdev; 2540 2541 if (pd->pkt_dev == dev) { 2542 pkt_err(pd, "recursive setup not allowed\n"); 2543 return -EBUSY; 2544 } 2545 for (i = 0; i < MAX_WRITERS; i++) { 2546 struct pktcdvd_device *pd2 = pkt_devs[i]; 2547 if (!pd2) 2548 continue; 2549 if (pd2->bdev->bd_dev == dev) { 2550 pkt_err(pd, "%s already setup\n", 2551 bdevname(pd2->bdev, b)); 2552 return -EBUSY; 2553 } 2554 if (pd2->pkt_dev == dev) { 2555 pkt_err(pd, "can't chain pktcdvd devices\n"); 2556 return -EBUSY; 2557 } 2558 } 2559 2560 bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL); 2561 if (IS_ERR(bdev)) 2562 return PTR_ERR(bdev); 2563 sdev = scsi_device_from_queue(bdev->bd_disk->queue); 2564 if (!sdev) { 2565 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); 2566 return -EINVAL; 2567 } 2568 put_device(&sdev->sdev_gendev); 2569 2570 /* This is safe, since we have a reference from open(). */ 2571 __module_get(THIS_MODULE); 2572 2573 pd->bdev = bdev; 2574 set_blocksize(bdev, CD_FRAMESIZE); 2575 2576 pkt_init_queue(pd); 2577 2578 atomic_set(&pd->cdrw.pending_bios, 0); 2579 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); 2580 if (IS_ERR(pd->cdrw.thread)) { 2581 pkt_err(pd, "can't start kernel thread\n"); 2582 goto out_mem; 2583 } 2584 2585 proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd); 2586 pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b)); 2587 return 0; 2588 2589 out_mem: 2590 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); 2591 /* This is safe: open() is still holding a reference. */ 2592 module_put(THIS_MODULE); 2593 return -ENOMEM; 2594 } 2595 2596 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) 2597 { 2598 struct pktcdvd_device *pd = bdev->bd_disk->private_data; 2599 int ret; 2600 2601 pkt_dbg(2, pd, "cmd %x, dev %d:%d\n", 2602 cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); 2603 2604 mutex_lock(&pktcdvd_mutex); 2605 switch (cmd) { 2606 case CDROMEJECT: 2607 /* 2608 * The door gets locked when the device is opened, so we 2609 * have to unlock it or else the eject command fails. 2610 */ 2611 if (pd->refcnt == 1) 2612 pkt_lock_door(pd, 0); 2613 fallthrough; 2614 /* 2615 * forward selected CDROM ioctls to CD-ROM, for UDF 2616 */ 2617 case CDROMMULTISESSION: 2618 case CDROMREADTOCENTRY: 2619 case CDROM_LAST_WRITTEN: 2620 case CDROM_SEND_PACKET: 2621 case SCSI_IOCTL_SEND_COMMAND: 2622 if (!bdev->bd_disk->fops->ioctl) 2623 ret = -ENOTTY; 2624 else 2625 ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 2626 break; 2627 default: 2628 pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd); 2629 ret = -ENOTTY; 2630 } 2631 mutex_unlock(&pktcdvd_mutex); 2632 2633 return ret; 2634 } 2635 2636 static unsigned int pkt_check_events(struct gendisk *disk, 2637 unsigned int clearing) 2638 { 2639 struct pktcdvd_device *pd = disk->private_data; 2640 struct gendisk *attached_disk; 2641 2642 if (!pd) 2643 return 0; 2644 if (!pd->bdev) 2645 return 0; 2646 attached_disk = pd->bdev->bd_disk; 2647 if (!attached_disk || !attached_disk->fops->check_events) 2648 return 0; 2649 return attached_disk->fops->check_events(attached_disk, clearing); 2650 } 2651 2652 static char *pkt_devnode(struct gendisk *disk, umode_t *mode) 2653 { 2654 return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name); 2655 } 2656 2657 static const struct block_device_operations pktcdvd_ops = { 2658 .owner = THIS_MODULE, 2659 .submit_bio = pkt_submit_bio, 2660 .open = pkt_open, 2661 .release = pkt_close, 2662 .ioctl = pkt_ioctl, 2663 .compat_ioctl = blkdev_compat_ptr_ioctl, 2664 .check_events = pkt_check_events, 2665 .devnode = pkt_devnode, 2666 }; 2667 2668 /* 2669 * Set up mapping from pktcdvd device to CD-ROM device. 2670 */ 2671 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) 2672 { 2673 int idx; 2674 int ret = -ENOMEM; 2675 struct pktcdvd_device *pd; 2676 struct gendisk *disk; 2677 2678 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 2679 2680 for (idx = 0; idx < MAX_WRITERS; idx++) 2681 if (!pkt_devs[idx]) 2682 break; 2683 if (idx == MAX_WRITERS) { 2684 pr_err("max %d writers supported\n", MAX_WRITERS); 2685 ret = -EBUSY; 2686 goto out_mutex; 2687 } 2688 2689 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); 2690 if (!pd) 2691 goto out_mutex; 2692 2693 ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE, 2694 sizeof(struct pkt_rb_node)); 2695 if (ret) 2696 goto out_mem; 2697 2698 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); 2699 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); 2700 spin_lock_init(&pd->cdrw.active_list_lock); 2701 2702 spin_lock_init(&pd->lock); 2703 spin_lock_init(&pd->iosched.lock); 2704 bio_list_init(&pd->iosched.read_queue); 2705 bio_list_init(&pd->iosched.write_queue); 2706 sprintf(pd->name, DRIVER_NAME"%d", idx); 2707 init_waitqueue_head(&pd->wqueue); 2708 pd->bio_queue = RB_ROOT; 2709 2710 pd->write_congestion_on = write_congestion_on; 2711 pd->write_congestion_off = write_congestion_off; 2712 2713 ret = -ENOMEM; 2714 disk = blk_alloc_disk(NUMA_NO_NODE); 2715 if (!disk) 2716 goto out_mem; 2717 pd->disk = disk; 2718 disk->major = pktdev_major; 2719 disk->first_minor = idx; 2720 disk->minors = 1; 2721 disk->fops = &pktcdvd_ops; 2722 disk->flags = GENHD_FL_REMOVABLE; 2723 strcpy(disk->disk_name, pd->name); 2724 disk->private_data = pd; 2725 2726 pd->pkt_dev = MKDEV(pktdev_major, idx); 2727 ret = pkt_new_dev(pd, dev); 2728 if (ret) 2729 goto out_mem2; 2730 2731 /* inherit events of the host device */ 2732 disk->events = pd->bdev->bd_disk->events; 2733 2734 ret = add_disk(disk); 2735 if (ret) 2736 goto out_mem2; 2737 2738 pkt_sysfs_dev_new(pd); 2739 pkt_debugfs_dev_new(pd); 2740 2741 pkt_devs[idx] = pd; 2742 if (pkt_dev) 2743 *pkt_dev = pd->pkt_dev; 2744 2745 mutex_unlock(&ctl_mutex); 2746 return 0; 2747 2748 out_mem2: 2749 blk_cleanup_disk(disk); 2750 out_mem: 2751 mempool_exit(&pd->rb_pool); 2752 kfree(pd); 2753 out_mutex: 2754 mutex_unlock(&ctl_mutex); 2755 pr_err("setup of pktcdvd device failed\n"); 2756 return ret; 2757 } 2758 2759 /* 2760 * Tear down mapping from pktcdvd device to CD-ROM device. 2761 */ 2762 static int pkt_remove_dev(dev_t pkt_dev) 2763 { 2764 struct pktcdvd_device *pd; 2765 int idx; 2766 int ret = 0; 2767 2768 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 2769 2770 for (idx = 0; idx < MAX_WRITERS; idx++) { 2771 pd = pkt_devs[idx]; 2772 if (pd && (pd->pkt_dev == pkt_dev)) 2773 break; 2774 } 2775 if (idx == MAX_WRITERS) { 2776 pr_debug("dev not setup\n"); 2777 ret = -ENXIO; 2778 goto out; 2779 } 2780 2781 if (pd->refcnt > 0) { 2782 ret = -EBUSY; 2783 goto out; 2784 } 2785 if (!IS_ERR(pd->cdrw.thread)) 2786 kthread_stop(pd->cdrw.thread); 2787 2788 pkt_devs[idx] = NULL; 2789 2790 pkt_debugfs_dev_remove(pd); 2791 pkt_sysfs_dev_remove(pd); 2792 2793 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); 2794 2795 remove_proc_entry(pd->name, pkt_proc); 2796 pkt_dbg(1, pd, "writer unmapped\n"); 2797 2798 del_gendisk(pd->disk); 2799 blk_cleanup_disk(pd->disk); 2800 2801 mempool_exit(&pd->rb_pool); 2802 kfree(pd); 2803 2804 /* This is safe: open() is still holding a reference. */ 2805 module_put(THIS_MODULE); 2806 2807 out: 2808 mutex_unlock(&ctl_mutex); 2809 return ret; 2810 } 2811 2812 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd) 2813 { 2814 struct pktcdvd_device *pd; 2815 2816 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 2817 2818 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index); 2819 if (pd) { 2820 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev); 2821 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev); 2822 } else { 2823 ctrl_cmd->dev = 0; 2824 ctrl_cmd->pkt_dev = 0; 2825 } 2826 ctrl_cmd->num_devices = MAX_WRITERS; 2827 2828 mutex_unlock(&ctl_mutex); 2829 } 2830 2831 static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2832 { 2833 void __user *argp = (void __user *)arg; 2834 struct pkt_ctrl_command ctrl_cmd; 2835 int ret = 0; 2836 dev_t pkt_dev = 0; 2837 2838 if (cmd != PACKET_CTRL_CMD) 2839 return -ENOTTY; 2840 2841 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command))) 2842 return -EFAULT; 2843 2844 switch (ctrl_cmd.command) { 2845 case PKT_CTRL_CMD_SETUP: 2846 if (!capable(CAP_SYS_ADMIN)) 2847 return -EPERM; 2848 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev); 2849 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev); 2850 break; 2851 case PKT_CTRL_CMD_TEARDOWN: 2852 if (!capable(CAP_SYS_ADMIN)) 2853 return -EPERM; 2854 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev)); 2855 break; 2856 case PKT_CTRL_CMD_STATUS: 2857 pkt_get_status(&ctrl_cmd); 2858 break; 2859 default: 2860 return -ENOTTY; 2861 } 2862 2863 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command))) 2864 return -EFAULT; 2865 return ret; 2866 } 2867 2868 #ifdef CONFIG_COMPAT 2869 static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2870 { 2871 return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2872 } 2873 #endif 2874 2875 static const struct file_operations pkt_ctl_fops = { 2876 .open = nonseekable_open, 2877 .unlocked_ioctl = pkt_ctl_ioctl, 2878 #ifdef CONFIG_COMPAT 2879 .compat_ioctl = pkt_ctl_compat_ioctl, 2880 #endif 2881 .owner = THIS_MODULE, 2882 .llseek = no_llseek, 2883 }; 2884 2885 static struct miscdevice pkt_misc = { 2886 .minor = MISC_DYNAMIC_MINOR, 2887 .name = DRIVER_NAME, 2888 .nodename = "pktcdvd/control", 2889 .fops = &pkt_ctl_fops 2890 }; 2891 2892 static int __init pkt_init(void) 2893 { 2894 int ret; 2895 2896 mutex_init(&ctl_mutex); 2897 2898 ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE, 2899 sizeof(struct packet_stacked_data)); 2900 if (ret) 2901 return ret; 2902 ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0); 2903 if (ret) { 2904 mempool_exit(&psd_pool); 2905 return ret; 2906 } 2907 2908 ret = register_blkdev(pktdev_major, DRIVER_NAME); 2909 if (ret < 0) { 2910 pr_err("unable to register block device\n"); 2911 goto out2; 2912 } 2913 if (!pktdev_major) 2914 pktdev_major = ret; 2915 2916 ret = pkt_sysfs_init(); 2917 if (ret) 2918 goto out; 2919 2920 pkt_debugfs_init(); 2921 2922 ret = misc_register(&pkt_misc); 2923 if (ret) { 2924 pr_err("unable to register misc device\n"); 2925 goto out_misc; 2926 } 2927 2928 pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL); 2929 2930 return 0; 2931 2932 out_misc: 2933 pkt_debugfs_cleanup(); 2934 pkt_sysfs_cleanup(); 2935 out: 2936 unregister_blkdev(pktdev_major, DRIVER_NAME); 2937 out2: 2938 mempool_exit(&psd_pool); 2939 bioset_exit(&pkt_bio_set); 2940 return ret; 2941 } 2942 2943 static void __exit pkt_exit(void) 2944 { 2945 remove_proc_entry("driver/"DRIVER_NAME, NULL); 2946 misc_deregister(&pkt_misc); 2947 2948 pkt_debugfs_cleanup(); 2949 pkt_sysfs_cleanup(); 2950 2951 unregister_blkdev(pktdev_major, DRIVER_NAME); 2952 mempool_exit(&psd_pool); 2953 bioset_exit(&pkt_bio_set); 2954 } 2955 2956 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives"); 2957 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>"); 2958 MODULE_LICENSE("GPL"); 2959 2960 module_init(pkt_init); 2961 module_exit(pkt_exit); 2962