xref: /openbmc/linux/drivers/block/pktcdvd.c (revision 31e67366)
1 /*
2  * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3  * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4  * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
5  *
6  * May be copied or modified under the terms of the GNU General Public
7  * License.  See linux/COPYING for more information.
8  *
9  * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
10  * DVD-RAM devices.
11  *
12  * Theory of operation:
13  *
14  * At the lowest level, there is the standard driver for the CD/DVD device,
15  * typically ide-cd.c or sr.c. This driver can handle read and write requests,
16  * but it doesn't know anything about the special restrictions that apply to
17  * packet writing. One restriction is that write requests must be aligned to
18  * packet boundaries on the physical media, and the size of a write request
19  * must be equal to the packet size. Another restriction is that a
20  * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21  * command, if the previous command was a write.
22  *
23  * The purpose of the packet writing driver is to hide these restrictions from
24  * higher layers, such as file systems, and present a block device that can be
25  * randomly read and written using 2kB-sized blocks.
26  *
27  * The lowest layer in the packet writing driver is the packet I/O scheduler.
28  * Its data is defined by the struct packet_iosched and includes two bio
29  * queues with pending read and write requests. These queues are processed
30  * by the pkt_iosched_process_queue() function. The write requests in this
31  * queue are already properly aligned and sized. This layer is responsible for
32  * issuing the flush cache commands and scheduling the I/O in a good order.
33  *
34  * The next layer transforms unaligned write requests to aligned writes. This
35  * transformation requires reading missing pieces of data from the underlying
36  * block device, assembling the pieces to full packets and queuing them to the
37  * packet I/O scheduler.
38  *
39  * At the top layer there is a custom ->submit_bio function that forwards
40  * read requests directly to the iosched queue and puts write requests in the
41  * unaligned write queue. A kernel thread performs the necessary read
42  * gathering to convert the unaligned writes to aligned writes and then feeds
43  * them to the packet I/O scheduler.
44  *
45  *************************************************************************/
46 
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48 
49 #include <linux/pktcdvd.h>
50 #include <linux/module.h>
51 #include <linux/types.h>
52 #include <linux/kernel.h>
53 #include <linux/compat.h>
54 #include <linux/kthread.h>
55 #include <linux/errno.h>
56 #include <linux/spinlock.h>
57 #include <linux/file.h>
58 #include <linux/proc_fs.h>
59 #include <linux/seq_file.h>
60 #include <linux/miscdevice.h>
61 #include <linux/freezer.h>
62 #include <linux/mutex.h>
63 #include <linux/slab.h>
64 #include <linux/backing-dev.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_ioctl.h>
67 #include <scsi/scsi.h>
68 #include <linux/debugfs.h>
69 #include <linux/device.h>
70 #include <linux/nospec.h>
71 #include <linux/uaccess.h>
72 
73 #define DRIVER_NAME	"pktcdvd"
74 
75 #define pkt_err(pd, fmt, ...)						\
76 	pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
77 #define pkt_notice(pd, fmt, ...)					\
78 	pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
79 #define pkt_info(pd, fmt, ...)						\
80 	pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
81 
82 #define pkt_dbg(level, pd, fmt, ...)					\
83 do {									\
84 	if (level == 2 && PACKET_DEBUG >= 2)				\
85 		pr_notice("%s: %s():" fmt,				\
86 			  pd->name, __func__, ##__VA_ARGS__);		\
87 	else if (level == 1 && PACKET_DEBUG >= 1)			\
88 		pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__);		\
89 } while (0)
90 
91 #define MAX_SPEED 0xffff
92 
93 static DEFINE_MUTEX(pktcdvd_mutex);
94 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
95 static struct proc_dir_entry *pkt_proc;
96 static int pktdev_major;
97 static int write_congestion_on  = PKT_WRITE_CONGESTION_ON;
98 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
99 static struct mutex ctl_mutex;	/* Serialize open/close/setup/teardown */
100 static mempool_t psd_pool;
101 static struct bio_set pkt_bio_set;
102 
103 static struct class	*class_pktcdvd = NULL;    /* /sys/class/pktcdvd */
104 static struct dentry	*pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
105 
106 /* forward declaration */
107 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
108 static int pkt_remove_dev(dev_t pkt_dev);
109 static int pkt_seq_show(struct seq_file *m, void *p);
110 
111 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
112 {
113 	return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
114 }
115 
116 /*
117  * create and register a pktcdvd kernel object.
118  */
119 static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
120 					const char* name,
121 					struct kobject* parent,
122 					struct kobj_type* ktype)
123 {
124 	struct pktcdvd_kobj *p;
125 	int error;
126 
127 	p = kzalloc(sizeof(*p), GFP_KERNEL);
128 	if (!p)
129 		return NULL;
130 	p->pd = pd;
131 	error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
132 	if (error) {
133 		kobject_put(&p->kobj);
134 		return NULL;
135 	}
136 	kobject_uevent(&p->kobj, KOBJ_ADD);
137 	return p;
138 }
139 /*
140  * remove a pktcdvd kernel object.
141  */
142 static void pkt_kobj_remove(struct pktcdvd_kobj *p)
143 {
144 	if (p)
145 		kobject_put(&p->kobj);
146 }
147 /*
148  * default release function for pktcdvd kernel objects.
149  */
150 static void pkt_kobj_release(struct kobject *kobj)
151 {
152 	kfree(to_pktcdvdkobj(kobj));
153 }
154 
155 
156 /**********************************************************
157  *
158  * sysfs interface for pktcdvd
159  * by (C) 2006  Thomas Maier <balagi@justmail.de>
160  *
161  **********************************************************/
162 
163 #define DEF_ATTR(_obj,_name,_mode) \
164 	static struct attribute _obj = { .name = _name, .mode = _mode }
165 
166 /**********************************************************
167   /sys/class/pktcdvd/pktcdvd[0-7]/
168                      stat/reset
169                      stat/packets_started
170                      stat/packets_finished
171                      stat/kb_written
172                      stat/kb_read
173                      stat/kb_read_gather
174                      write_queue/size
175                      write_queue/congestion_off
176                      write_queue/congestion_on
177  **********************************************************/
178 
179 DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
180 DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
181 DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
182 DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
183 DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
184 DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
185 
186 static struct attribute *kobj_pkt_attrs_stat[] = {
187 	&kobj_pkt_attr_st1,
188 	&kobj_pkt_attr_st2,
189 	&kobj_pkt_attr_st3,
190 	&kobj_pkt_attr_st4,
191 	&kobj_pkt_attr_st5,
192 	&kobj_pkt_attr_st6,
193 	NULL
194 };
195 
196 DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
197 DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
198 DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on",  0644);
199 
200 static struct attribute *kobj_pkt_attrs_wqueue[] = {
201 	&kobj_pkt_attr_wq1,
202 	&kobj_pkt_attr_wq2,
203 	&kobj_pkt_attr_wq3,
204 	NULL
205 };
206 
207 static ssize_t kobj_pkt_show(struct kobject *kobj,
208 			struct attribute *attr, char *data)
209 {
210 	struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
211 	int n = 0;
212 	int v;
213 	if (strcmp(attr->name, "packets_started") == 0) {
214 		n = sprintf(data, "%lu\n", pd->stats.pkt_started);
215 
216 	} else if (strcmp(attr->name, "packets_finished") == 0) {
217 		n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
218 
219 	} else if (strcmp(attr->name, "kb_written") == 0) {
220 		n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
221 
222 	} else if (strcmp(attr->name, "kb_read") == 0) {
223 		n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
224 
225 	} else if (strcmp(attr->name, "kb_read_gather") == 0) {
226 		n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
227 
228 	} else if (strcmp(attr->name, "size") == 0) {
229 		spin_lock(&pd->lock);
230 		v = pd->bio_queue_size;
231 		spin_unlock(&pd->lock);
232 		n = sprintf(data, "%d\n", v);
233 
234 	} else if (strcmp(attr->name, "congestion_off") == 0) {
235 		spin_lock(&pd->lock);
236 		v = pd->write_congestion_off;
237 		spin_unlock(&pd->lock);
238 		n = sprintf(data, "%d\n", v);
239 
240 	} else if (strcmp(attr->name, "congestion_on") == 0) {
241 		spin_lock(&pd->lock);
242 		v = pd->write_congestion_on;
243 		spin_unlock(&pd->lock);
244 		n = sprintf(data, "%d\n", v);
245 	}
246 	return n;
247 }
248 
249 static void init_write_congestion_marks(int* lo, int* hi)
250 {
251 	if (*hi > 0) {
252 		*hi = max(*hi, 500);
253 		*hi = min(*hi, 1000000);
254 		if (*lo <= 0)
255 			*lo = *hi - 100;
256 		else {
257 			*lo = min(*lo, *hi - 100);
258 			*lo = max(*lo, 100);
259 		}
260 	} else {
261 		*hi = -1;
262 		*lo = -1;
263 	}
264 }
265 
266 static ssize_t kobj_pkt_store(struct kobject *kobj,
267 			struct attribute *attr,
268 			const char *data, size_t len)
269 {
270 	struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
271 	int val;
272 
273 	if (strcmp(attr->name, "reset") == 0 && len > 0) {
274 		pd->stats.pkt_started = 0;
275 		pd->stats.pkt_ended = 0;
276 		pd->stats.secs_w = 0;
277 		pd->stats.secs_rg = 0;
278 		pd->stats.secs_r = 0;
279 
280 	} else if (strcmp(attr->name, "congestion_off") == 0
281 		   && sscanf(data, "%d", &val) == 1) {
282 		spin_lock(&pd->lock);
283 		pd->write_congestion_off = val;
284 		init_write_congestion_marks(&pd->write_congestion_off,
285 					&pd->write_congestion_on);
286 		spin_unlock(&pd->lock);
287 
288 	} else if (strcmp(attr->name, "congestion_on") == 0
289 		   && sscanf(data, "%d", &val) == 1) {
290 		spin_lock(&pd->lock);
291 		pd->write_congestion_on = val;
292 		init_write_congestion_marks(&pd->write_congestion_off,
293 					&pd->write_congestion_on);
294 		spin_unlock(&pd->lock);
295 	}
296 	return len;
297 }
298 
299 static const struct sysfs_ops kobj_pkt_ops = {
300 	.show = kobj_pkt_show,
301 	.store = kobj_pkt_store
302 };
303 static struct kobj_type kobj_pkt_type_stat = {
304 	.release = pkt_kobj_release,
305 	.sysfs_ops = &kobj_pkt_ops,
306 	.default_attrs = kobj_pkt_attrs_stat
307 };
308 static struct kobj_type kobj_pkt_type_wqueue = {
309 	.release = pkt_kobj_release,
310 	.sysfs_ops = &kobj_pkt_ops,
311 	.default_attrs = kobj_pkt_attrs_wqueue
312 };
313 
314 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
315 {
316 	if (class_pktcdvd) {
317 		pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
318 					"%s", pd->name);
319 		if (IS_ERR(pd->dev))
320 			pd->dev = NULL;
321 	}
322 	if (pd->dev) {
323 		pd->kobj_stat = pkt_kobj_create(pd, "stat",
324 					&pd->dev->kobj,
325 					&kobj_pkt_type_stat);
326 		pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
327 					&pd->dev->kobj,
328 					&kobj_pkt_type_wqueue);
329 	}
330 }
331 
332 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
333 {
334 	pkt_kobj_remove(pd->kobj_stat);
335 	pkt_kobj_remove(pd->kobj_wqueue);
336 	if (class_pktcdvd)
337 		device_unregister(pd->dev);
338 }
339 
340 
341 /********************************************************************
342   /sys/class/pktcdvd/
343                      add            map block device
344                      remove         unmap packet dev
345                      device_map     show mappings
346  *******************************************************************/
347 
348 static void class_pktcdvd_release(struct class *cls)
349 {
350 	kfree(cls);
351 }
352 
353 static ssize_t device_map_show(struct class *c, struct class_attribute *attr,
354 			       char *data)
355 {
356 	int n = 0;
357 	int idx;
358 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
359 	for (idx = 0; idx < MAX_WRITERS; idx++) {
360 		struct pktcdvd_device *pd = pkt_devs[idx];
361 		if (!pd)
362 			continue;
363 		n += sprintf(data+n, "%s %u:%u %u:%u\n",
364 			pd->name,
365 			MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
366 			MAJOR(pd->bdev->bd_dev),
367 			MINOR(pd->bdev->bd_dev));
368 	}
369 	mutex_unlock(&ctl_mutex);
370 	return n;
371 }
372 static CLASS_ATTR_RO(device_map);
373 
374 static ssize_t add_store(struct class *c, struct class_attribute *attr,
375 			 const char *buf, size_t count)
376 {
377 	unsigned int major, minor;
378 
379 	if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
380 		/* pkt_setup_dev() expects caller to hold reference to self */
381 		if (!try_module_get(THIS_MODULE))
382 			return -ENODEV;
383 
384 		pkt_setup_dev(MKDEV(major, minor), NULL);
385 
386 		module_put(THIS_MODULE);
387 
388 		return count;
389 	}
390 
391 	return -EINVAL;
392 }
393 static CLASS_ATTR_WO(add);
394 
395 static ssize_t remove_store(struct class *c, struct class_attribute *attr,
396 			    const char *buf, size_t count)
397 {
398 	unsigned int major, minor;
399 	if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
400 		pkt_remove_dev(MKDEV(major, minor));
401 		return count;
402 	}
403 	return -EINVAL;
404 }
405 static CLASS_ATTR_WO(remove);
406 
407 static struct attribute *class_pktcdvd_attrs[] = {
408 	&class_attr_add.attr,
409 	&class_attr_remove.attr,
410 	&class_attr_device_map.attr,
411 	NULL,
412 };
413 ATTRIBUTE_GROUPS(class_pktcdvd);
414 
415 static int pkt_sysfs_init(void)
416 {
417 	int ret = 0;
418 
419 	/*
420 	 * create control files in sysfs
421 	 * /sys/class/pktcdvd/...
422 	 */
423 	class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
424 	if (!class_pktcdvd)
425 		return -ENOMEM;
426 	class_pktcdvd->name = DRIVER_NAME;
427 	class_pktcdvd->owner = THIS_MODULE;
428 	class_pktcdvd->class_release = class_pktcdvd_release;
429 	class_pktcdvd->class_groups = class_pktcdvd_groups;
430 	ret = class_register(class_pktcdvd);
431 	if (ret) {
432 		kfree(class_pktcdvd);
433 		class_pktcdvd = NULL;
434 		pr_err("failed to create class pktcdvd\n");
435 		return ret;
436 	}
437 	return 0;
438 }
439 
440 static void pkt_sysfs_cleanup(void)
441 {
442 	if (class_pktcdvd)
443 		class_destroy(class_pktcdvd);
444 	class_pktcdvd = NULL;
445 }
446 
447 /********************************************************************
448   entries in debugfs
449 
450   /sys/kernel/debug/pktcdvd[0-7]/
451 			info
452 
453  *******************************************************************/
454 
455 static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
456 {
457 	return pkt_seq_show(m, p);
458 }
459 
460 static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
461 {
462 	return single_open(file, pkt_debugfs_seq_show, inode->i_private);
463 }
464 
465 static const struct file_operations debug_fops = {
466 	.open		= pkt_debugfs_fops_open,
467 	.read		= seq_read,
468 	.llseek		= seq_lseek,
469 	.release	= single_release,
470 	.owner		= THIS_MODULE,
471 };
472 
473 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
474 {
475 	if (!pkt_debugfs_root)
476 		return;
477 	pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
478 	if (!pd->dfs_d_root)
479 		return;
480 
481 	pd->dfs_f_info = debugfs_create_file("info", 0444,
482 					     pd->dfs_d_root, pd, &debug_fops);
483 }
484 
485 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
486 {
487 	if (!pkt_debugfs_root)
488 		return;
489 	debugfs_remove(pd->dfs_f_info);
490 	debugfs_remove(pd->dfs_d_root);
491 	pd->dfs_f_info = NULL;
492 	pd->dfs_d_root = NULL;
493 }
494 
495 static void pkt_debugfs_init(void)
496 {
497 	pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
498 }
499 
500 static void pkt_debugfs_cleanup(void)
501 {
502 	debugfs_remove(pkt_debugfs_root);
503 	pkt_debugfs_root = NULL;
504 }
505 
506 /* ----------------------------------------------------------*/
507 
508 
509 static void pkt_bio_finished(struct pktcdvd_device *pd)
510 {
511 	BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
512 	if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
513 		pkt_dbg(2, pd, "queue empty\n");
514 		atomic_set(&pd->iosched.attention, 1);
515 		wake_up(&pd->wqueue);
516 	}
517 }
518 
519 /*
520  * Allocate a packet_data struct
521  */
522 static struct packet_data *pkt_alloc_packet_data(int frames)
523 {
524 	int i;
525 	struct packet_data *pkt;
526 
527 	pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
528 	if (!pkt)
529 		goto no_pkt;
530 
531 	pkt->frames = frames;
532 	pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
533 	if (!pkt->w_bio)
534 		goto no_bio;
535 
536 	for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
537 		pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
538 		if (!pkt->pages[i])
539 			goto no_page;
540 	}
541 
542 	spin_lock_init(&pkt->lock);
543 	bio_list_init(&pkt->orig_bios);
544 
545 	for (i = 0; i < frames; i++) {
546 		struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
547 		if (!bio)
548 			goto no_rd_bio;
549 
550 		pkt->r_bios[i] = bio;
551 	}
552 
553 	return pkt;
554 
555 no_rd_bio:
556 	for (i = 0; i < frames; i++) {
557 		struct bio *bio = pkt->r_bios[i];
558 		if (bio)
559 			bio_put(bio);
560 	}
561 
562 no_page:
563 	for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
564 		if (pkt->pages[i])
565 			__free_page(pkt->pages[i]);
566 	bio_put(pkt->w_bio);
567 no_bio:
568 	kfree(pkt);
569 no_pkt:
570 	return NULL;
571 }
572 
573 /*
574  * Free a packet_data struct
575  */
576 static void pkt_free_packet_data(struct packet_data *pkt)
577 {
578 	int i;
579 
580 	for (i = 0; i < pkt->frames; i++) {
581 		struct bio *bio = pkt->r_bios[i];
582 		if (bio)
583 			bio_put(bio);
584 	}
585 	for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
586 		__free_page(pkt->pages[i]);
587 	bio_put(pkt->w_bio);
588 	kfree(pkt);
589 }
590 
591 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
592 {
593 	struct packet_data *pkt, *next;
594 
595 	BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
596 
597 	list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
598 		pkt_free_packet_data(pkt);
599 	}
600 	INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
601 }
602 
603 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
604 {
605 	struct packet_data *pkt;
606 
607 	BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
608 
609 	while (nr_packets > 0) {
610 		pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
611 		if (!pkt) {
612 			pkt_shrink_pktlist(pd);
613 			return 0;
614 		}
615 		pkt->id = nr_packets;
616 		pkt->pd = pd;
617 		list_add(&pkt->list, &pd->cdrw.pkt_free_list);
618 		nr_packets--;
619 	}
620 	return 1;
621 }
622 
623 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
624 {
625 	struct rb_node *n = rb_next(&node->rb_node);
626 	if (!n)
627 		return NULL;
628 	return rb_entry(n, struct pkt_rb_node, rb_node);
629 }
630 
631 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
632 {
633 	rb_erase(&node->rb_node, &pd->bio_queue);
634 	mempool_free(node, &pd->rb_pool);
635 	pd->bio_queue_size--;
636 	BUG_ON(pd->bio_queue_size < 0);
637 }
638 
639 /*
640  * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
641  */
642 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
643 {
644 	struct rb_node *n = pd->bio_queue.rb_node;
645 	struct rb_node *next;
646 	struct pkt_rb_node *tmp;
647 
648 	if (!n) {
649 		BUG_ON(pd->bio_queue_size > 0);
650 		return NULL;
651 	}
652 
653 	for (;;) {
654 		tmp = rb_entry(n, struct pkt_rb_node, rb_node);
655 		if (s <= tmp->bio->bi_iter.bi_sector)
656 			next = n->rb_left;
657 		else
658 			next = n->rb_right;
659 		if (!next)
660 			break;
661 		n = next;
662 	}
663 
664 	if (s > tmp->bio->bi_iter.bi_sector) {
665 		tmp = pkt_rbtree_next(tmp);
666 		if (!tmp)
667 			return NULL;
668 	}
669 	BUG_ON(s > tmp->bio->bi_iter.bi_sector);
670 	return tmp;
671 }
672 
673 /*
674  * Insert a node into the pd->bio_queue rb tree.
675  */
676 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
677 {
678 	struct rb_node **p = &pd->bio_queue.rb_node;
679 	struct rb_node *parent = NULL;
680 	sector_t s = node->bio->bi_iter.bi_sector;
681 	struct pkt_rb_node *tmp;
682 
683 	while (*p) {
684 		parent = *p;
685 		tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
686 		if (s < tmp->bio->bi_iter.bi_sector)
687 			p = &(*p)->rb_left;
688 		else
689 			p = &(*p)->rb_right;
690 	}
691 	rb_link_node(&node->rb_node, parent, p);
692 	rb_insert_color(&node->rb_node, &pd->bio_queue);
693 	pd->bio_queue_size++;
694 }
695 
696 /*
697  * Send a packet_command to the underlying block device and
698  * wait for completion.
699  */
700 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
701 {
702 	struct request_queue *q = bdev_get_queue(pd->bdev);
703 	struct request *rq;
704 	int ret = 0;
705 
706 	rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
707 			     REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
708 	if (IS_ERR(rq))
709 		return PTR_ERR(rq);
710 
711 	if (cgc->buflen) {
712 		ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
713 				      GFP_NOIO);
714 		if (ret)
715 			goto out;
716 	}
717 
718 	scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
719 	memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE);
720 
721 	rq->timeout = 60*HZ;
722 	if (cgc->quiet)
723 		rq->rq_flags |= RQF_QUIET;
724 
725 	blk_execute_rq(pd->bdev->bd_disk, rq, 0);
726 	if (scsi_req(rq)->result)
727 		ret = -EIO;
728 out:
729 	blk_put_request(rq);
730 	return ret;
731 }
732 
733 static const char *sense_key_string(__u8 index)
734 {
735 	static const char * const info[] = {
736 		"No sense", "Recovered error", "Not ready",
737 		"Medium error", "Hardware error", "Illegal request",
738 		"Unit attention", "Data protect", "Blank check",
739 	};
740 
741 	return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
742 }
743 
744 /*
745  * A generic sense dump / resolve mechanism should be implemented across
746  * all ATAPI + SCSI devices.
747  */
748 static void pkt_dump_sense(struct pktcdvd_device *pd,
749 			   struct packet_command *cgc)
750 {
751 	struct scsi_sense_hdr *sshdr = cgc->sshdr;
752 
753 	if (sshdr)
754 		pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
755 			CDROM_PACKET_SIZE, cgc->cmd,
756 			sshdr->sense_key, sshdr->asc, sshdr->ascq,
757 			sense_key_string(sshdr->sense_key));
758 	else
759 		pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
760 }
761 
762 /*
763  * flush the drive cache to media
764  */
765 static int pkt_flush_cache(struct pktcdvd_device *pd)
766 {
767 	struct packet_command cgc;
768 
769 	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
770 	cgc.cmd[0] = GPCMD_FLUSH_CACHE;
771 	cgc.quiet = 1;
772 
773 	/*
774 	 * the IMMED bit -- we default to not setting it, although that
775 	 * would allow a much faster close, this is safer
776 	 */
777 #if 0
778 	cgc.cmd[1] = 1 << 1;
779 #endif
780 	return pkt_generic_packet(pd, &cgc);
781 }
782 
783 /*
784  * speed is given as the normal factor, e.g. 4 for 4x
785  */
786 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
787 				unsigned write_speed, unsigned read_speed)
788 {
789 	struct packet_command cgc;
790 	struct scsi_sense_hdr sshdr;
791 	int ret;
792 
793 	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
794 	cgc.sshdr = &sshdr;
795 	cgc.cmd[0] = GPCMD_SET_SPEED;
796 	cgc.cmd[2] = (read_speed >> 8) & 0xff;
797 	cgc.cmd[3] = read_speed & 0xff;
798 	cgc.cmd[4] = (write_speed >> 8) & 0xff;
799 	cgc.cmd[5] = write_speed & 0xff;
800 
801 	ret = pkt_generic_packet(pd, &cgc);
802 	if (ret)
803 		pkt_dump_sense(pd, &cgc);
804 
805 	return ret;
806 }
807 
808 /*
809  * Queue a bio for processing by the low-level CD device. Must be called
810  * from process context.
811  */
812 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
813 {
814 	spin_lock(&pd->iosched.lock);
815 	if (bio_data_dir(bio) == READ)
816 		bio_list_add(&pd->iosched.read_queue, bio);
817 	else
818 		bio_list_add(&pd->iosched.write_queue, bio);
819 	spin_unlock(&pd->iosched.lock);
820 
821 	atomic_set(&pd->iosched.attention, 1);
822 	wake_up(&pd->wqueue);
823 }
824 
825 /*
826  * Process the queued read/write requests. This function handles special
827  * requirements for CDRW drives:
828  * - A cache flush command must be inserted before a read request if the
829  *   previous request was a write.
830  * - Switching between reading and writing is slow, so don't do it more often
831  *   than necessary.
832  * - Optimize for throughput at the expense of latency. This means that streaming
833  *   writes will never be interrupted by a read, but if the drive has to seek
834  *   before the next write, switch to reading instead if there are any pending
835  *   read requests.
836  * - Set the read speed according to current usage pattern. When only reading
837  *   from the device, it's best to use the highest possible read speed, but
838  *   when switching often between reading and writing, it's better to have the
839  *   same read and write speeds.
840  */
841 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
842 {
843 
844 	if (atomic_read(&pd->iosched.attention) == 0)
845 		return;
846 	atomic_set(&pd->iosched.attention, 0);
847 
848 	for (;;) {
849 		struct bio *bio;
850 		int reads_queued, writes_queued;
851 
852 		spin_lock(&pd->iosched.lock);
853 		reads_queued = !bio_list_empty(&pd->iosched.read_queue);
854 		writes_queued = !bio_list_empty(&pd->iosched.write_queue);
855 		spin_unlock(&pd->iosched.lock);
856 
857 		if (!reads_queued && !writes_queued)
858 			break;
859 
860 		if (pd->iosched.writing) {
861 			int need_write_seek = 1;
862 			spin_lock(&pd->iosched.lock);
863 			bio = bio_list_peek(&pd->iosched.write_queue);
864 			spin_unlock(&pd->iosched.lock);
865 			if (bio && (bio->bi_iter.bi_sector ==
866 				    pd->iosched.last_write))
867 				need_write_seek = 0;
868 			if (need_write_seek && reads_queued) {
869 				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
870 					pkt_dbg(2, pd, "write, waiting\n");
871 					break;
872 				}
873 				pkt_flush_cache(pd);
874 				pd->iosched.writing = 0;
875 			}
876 		} else {
877 			if (!reads_queued && writes_queued) {
878 				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
879 					pkt_dbg(2, pd, "read, waiting\n");
880 					break;
881 				}
882 				pd->iosched.writing = 1;
883 			}
884 		}
885 
886 		spin_lock(&pd->iosched.lock);
887 		if (pd->iosched.writing)
888 			bio = bio_list_pop(&pd->iosched.write_queue);
889 		else
890 			bio = bio_list_pop(&pd->iosched.read_queue);
891 		spin_unlock(&pd->iosched.lock);
892 
893 		if (!bio)
894 			continue;
895 
896 		if (bio_data_dir(bio) == READ)
897 			pd->iosched.successive_reads +=
898 				bio->bi_iter.bi_size >> 10;
899 		else {
900 			pd->iosched.successive_reads = 0;
901 			pd->iosched.last_write = bio_end_sector(bio);
902 		}
903 		if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
904 			if (pd->read_speed == pd->write_speed) {
905 				pd->read_speed = MAX_SPEED;
906 				pkt_set_speed(pd, pd->write_speed, pd->read_speed);
907 			}
908 		} else {
909 			if (pd->read_speed != pd->write_speed) {
910 				pd->read_speed = pd->write_speed;
911 				pkt_set_speed(pd, pd->write_speed, pd->read_speed);
912 			}
913 		}
914 
915 		atomic_inc(&pd->cdrw.pending_bios);
916 		submit_bio_noacct(bio);
917 	}
918 }
919 
920 /*
921  * Special care is needed if the underlying block device has a small
922  * max_phys_segments value.
923  */
924 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
925 {
926 	if ((pd->settings.size << 9) / CD_FRAMESIZE
927 	    <= queue_max_segments(q)) {
928 		/*
929 		 * The cdrom device can handle one segment/frame
930 		 */
931 		clear_bit(PACKET_MERGE_SEGS, &pd->flags);
932 		return 0;
933 	} else if ((pd->settings.size << 9) / PAGE_SIZE
934 		   <= queue_max_segments(q)) {
935 		/*
936 		 * We can handle this case at the expense of some extra memory
937 		 * copies during write operations
938 		 */
939 		set_bit(PACKET_MERGE_SEGS, &pd->flags);
940 		return 0;
941 	} else {
942 		pkt_err(pd, "cdrom max_phys_segments too small\n");
943 		return -EIO;
944 	}
945 }
946 
947 static void pkt_end_io_read(struct bio *bio)
948 {
949 	struct packet_data *pkt = bio->bi_private;
950 	struct pktcdvd_device *pd = pkt->pd;
951 	BUG_ON(!pd);
952 
953 	pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
954 		bio, (unsigned long long)pkt->sector,
955 		(unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
956 
957 	if (bio->bi_status)
958 		atomic_inc(&pkt->io_errors);
959 	if (atomic_dec_and_test(&pkt->io_wait)) {
960 		atomic_inc(&pkt->run_sm);
961 		wake_up(&pd->wqueue);
962 	}
963 	pkt_bio_finished(pd);
964 }
965 
966 static void pkt_end_io_packet_write(struct bio *bio)
967 {
968 	struct packet_data *pkt = bio->bi_private;
969 	struct pktcdvd_device *pd = pkt->pd;
970 	BUG_ON(!pd);
971 
972 	pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
973 
974 	pd->stats.pkt_ended++;
975 
976 	pkt_bio_finished(pd);
977 	atomic_dec(&pkt->io_wait);
978 	atomic_inc(&pkt->run_sm);
979 	wake_up(&pd->wqueue);
980 }
981 
982 /*
983  * Schedule reads for the holes in a packet
984  */
985 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
986 {
987 	int frames_read = 0;
988 	struct bio *bio;
989 	int f;
990 	char written[PACKET_MAX_SIZE];
991 
992 	BUG_ON(bio_list_empty(&pkt->orig_bios));
993 
994 	atomic_set(&pkt->io_wait, 0);
995 	atomic_set(&pkt->io_errors, 0);
996 
997 	/*
998 	 * Figure out which frames we need to read before we can write.
999 	 */
1000 	memset(written, 0, sizeof(written));
1001 	spin_lock(&pkt->lock);
1002 	bio_list_for_each(bio, &pkt->orig_bios) {
1003 		int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
1004 			(CD_FRAMESIZE >> 9);
1005 		int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
1006 		pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1007 		BUG_ON(first_frame < 0);
1008 		BUG_ON(first_frame + num_frames > pkt->frames);
1009 		for (f = first_frame; f < first_frame + num_frames; f++)
1010 			written[f] = 1;
1011 	}
1012 	spin_unlock(&pkt->lock);
1013 
1014 	if (pkt->cache_valid) {
1015 		pkt_dbg(2, pd, "zone %llx cached\n",
1016 			(unsigned long long)pkt->sector);
1017 		goto out_account;
1018 	}
1019 
1020 	/*
1021 	 * Schedule reads for missing parts of the packet.
1022 	 */
1023 	for (f = 0; f < pkt->frames; f++) {
1024 		int p, offset;
1025 
1026 		if (written[f])
1027 			continue;
1028 
1029 		bio = pkt->r_bios[f];
1030 		bio_reset(bio);
1031 		bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1032 		bio_set_dev(bio, pd->bdev);
1033 		bio->bi_end_io = pkt_end_io_read;
1034 		bio->bi_private = pkt;
1035 
1036 		p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1037 		offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1038 		pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
1039 			f, pkt->pages[p], offset);
1040 		if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
1041 			BUG();
1042 
1043 		atomic_inc(&pkt->io_wait);
1044 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
1045 		pkt_queue_bio(pd, bio);
1046 		frames_read++;
1047 	}
1048 
1049 out_account:
1050 	pkt_dbg(2, pd, "need %d frames for zone %llx\n",
1051 		frames_read, (unsigned long long)pkt->sector);
1052 	pd->stats.pkt_started++;
1053 	pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1054 }
1055 
1056 /*
1057  * Find a packet matching zone, or the least recently used packet if
1058  * there is no match.
1059  */
1060 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1061 {
1062 	struct packet_data *pkt;
1063 
1064 	list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1065 		if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1066 			list_del_init(&pkt->list);
1067 			if (pkt->sector != zone)
1068 				pkt->cache_valid = 0;
1069 			return pkt;
1070 		}
1071 	}
1072 	BUG();
1073 	return NULL;
1074 }
1075 
1076 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1077 {
1078 	if (pkt->cache_valid) {
1079 		list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1080 	} else {
1081 		list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1082 	}
1083 }
1084 
1085 static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
1086 {
1087 #if PACKET_DEBUG > 1
1088 	static const char *state_name[] = {
1089 		"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1090 	};
1091 	enum packet_data_state old_state = pkt->state;
1092 	pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
1093 		pkt->id, (unsigned long long)pkt->sector,
1094 		state_name[old_state], state_name[state]);
1095 #endif
1096 	pkt->state = state;
1097 }
1098 
1099 /*
1100  * Scan the work queue to see if we can start a new packet.
1101  * returns non-zero if any work was done.
1102  */
1103 static int pkt_handle_queue(struct pktcdvd_device *pd)
1104 {
1105 	struct packet_data *pkt, *p;
1106 	struct bio *bio = NULL;
1107 	sector_t zone = 0; /* Suppress gcc warning */
1108 	struct pkt_rb_node *node, *first_node;
1109 	struct rb_node *n;
1110 	int wakeup;
1111 
1112 	atomic_set(&pd->scan_queue, 0);
1113 
1114 	if (list_empty(&pd->cdrw.pkt_free_list)) {
1115 		pkt_dbg(2, pd, "no pkt\n");
1116 		return 0;
1117 	}
1118 
1119 	/*
1120 	 * Try to find a zone we are not already working on.
1121 	 */
1122 	spin_lock(&pd->lock);
1123 	first_node = pkt_rbtree_find(pd, pd->current_sector);
1124 	if (!first_node) {
1125 		n = rb_first(&pd->bio_queue);
1126 		if (n)
1127 			first_node = rb_entry(n, struct pkt_rb_node, rb_node);
1128 	}
1129 	node = first_node;
1130 	while (node) {
1131 		bio = node->bio;
1132 		zone = get_zone(bio->bi_iter.bi_sector, pd);
1133 		list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1134 			if (p->sector == zone) {
1135 				bio = NULL;
1136 				goto try_next_bio;
1137 			}
1138 		}
1139 		break;
1140 try_next_bio:
1141 		node = pkt_rbtree_next(node);
1142 		if (!node) {
1143 			n = rb_first(&pd->bio_queue);
1144 			if (n)
1145 				node = rb_entry(n, struct pkt_rb_node, rb_node);
1146 		}
1147 		if (node == first_node)
1148 			node = NULL;
1149 	}
1150 	spin_unlock(&pd->lock);
1151 	if (!bio) {
1152 		pkt_dbg(2, pd, "no bio\n");
1153 		return 0;
1154 	}
1155 
1156 	pkt = pkt_get_packet_data(pd, zone);
1157 
1158 	pd->current_sector = zone + pd->settings.size;
1159 	pkt->sector = zone;
1160 	BUG_ON(pkt->frames != pd->settings.size >> 2);
1161 	pkt->write_size = 0;
1162 
1163 	/*
1164 	 * Scan work queue for bios in the same zone and link them
1165 	 * to this packet.
1166 	 */
1167 	spin_lock(&pd->lock);
1168 	pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1169 	while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1170 		bio = node->bio;
1171 		pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1172 			get_zone(bio->bi_iter.bi_sector, pd));
1173 		if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1174 			break;
1175 		pkt_rbtree_erase(pd, node);
1176 		spin_lock(&pkt->lock);
1177 		bio_list_add(&pkt->orig_bios, bio);
1178 		pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1179 		spin_unlock(&pkt->lock);
1180 	}
1181 	/* check write congestion marks, and if bio_queue_size is
1182 	   below, wake up any waiters */
1183 	wakeup = (pd->write_congestion_on > 0
1184 	 		&& pd->bio_queue_size <= pd->write_congestion_off);
1185 	spin_unlock(&pd->lock);
1186 	if (wakeup) {
1187 		clear_bdi_congested(pd->disk->queue->backing_dev_info,
1188 					BLK_RW_ASYNC);
1189 	}
1190 
1191 	pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
1192 	pkt_set_state(pkt, PACKET_WAITING_STATE);
1193 	atomic_set(&pkt->run_sm, 1);
1194 
1195 	spin_lock(&pd->cdrw.active_list_lock);
1196 	list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1197 	spin_unlock(&pd->cdrw.active_list_lock);
1198 
1199 	return 1;
1200 }
1201 
1202 /*
1203  * Assemble a bio to write one packet and queue the bio for processing
1204  * by the underlying block device.
1205  */
1206 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1207 {
1208 	int f;
1209 
1210 	bio_reset(pkt->w_bio);
1211 	pkt->w_bio->bi_iter.bi_sector = pkt->sector;
1212 	bio_set_dev(pkt->w_bio, pd->bdev);
1213 	pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1214 	pkt->w_bio->bi_private = pkt;
1215 
1216 	/* XXX: locking? */
1217 	for (f = 0; f < pkt->frames; f++) {
1218 		struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
1219 		unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1220 
1221 		if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
1222 			BUG();
1223 	}
1224 	pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
1225 
1226 	/*
1227 	 * Fill-in bvec with data from orig_bios.
1228 	 */
1229 	spin_lock(&pkt->lock);
1230 	bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
1231 
1232 	pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1233 	spin_unlock(&pkt->lock);
1234 
1235 	pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
1236 		pkt->write_size, (unsigned long long)pkt->sector);
1237 
1238 	if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
1239 		pkt->cache_valid = 1;
1240 	else
1241 		pkt->cache_valid = 0;
1242 
1243 	/* Start the write request */
1244 	atomic_set(&pkt->io_wait, 1);
1245 	bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0);
1246 	pkt_queue_bio(pd, pkt->w_bio);
1247 }
1248 
1249 static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
1250 {
1251 	struct bio *bio;
1252 
1253 	if (status)
1254 		pkt->cache_valid = 0;
1255 
1256 	/* Finish all bios corresponding to this packet */
1257 	while ((bio = bio_list_pop(&pkt->orig_bios))) {
1258 		bio->bi_status = status;
1259 		bio_endio(bio);
1260 	}
1261 }
1262 
1263 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1264 {
1265 	pkt_dbg(2, pd, "pkt %d\n", pkt->id);
1266 
1267 	for (;;) {
1268 		switch (pkt->state) {
1269 		case PACKET_WAITING_STATE:
1270 			if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1271 				return;
1272 
1273 			pkt->sleep_time = 0;
1274 			pkt_gather_data(pd, pkt);
1275 			pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1276 			break;
1277 
1278 		case PACKET_READ_WAIT_STATE:
1279 			if (atomic_read(&pkt->io_wait) > 0)
1280 				return;
1281 
1282 			if (atomic_read(&pkt->io_errors) > 0) {
1283 				pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1284 			} else {
1285 				pkt_start_write(pd, pkt);
1286 			}
1287 			break;
1288 
1289 		case PACKET_WRITE_WAIT_STATE:
1290 			if (atomic_read(&pkt->io_wait) > 0)
1291 				return;
1292 
1293 			if (!pkt->w_bio->bi_status) {
1294 				pkt_set_state(pkt, PACKET_FINISHED_STATE);
1295 			} else {
1296 				pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1297 			}
1298 			break;
1299 
1300 		case PACKET_RECOVERY_STATE:
1301 			pkt_dbg(2, pd, "No recovery possible\n");
1302 			pkt_set_state(pkt, PACKET_FINISHED_STATE);
1303 			break;
1304 
1305 		case PACKET_FINISHED_STATE:
1306 			pkt_finish_packet(pkt, pkt->w_bio->bi_status);
1307 			return;
1308 
1309 		default:
1310 			BUG();
1311 			break;
1312 		}
1313 	}
1314 }
1315 
1316 static void pkt_handle_packets(struct pktcdvd_device *pd)
1317 {
1318 	struct packet_data *pkt, *next;
1319 
1320 	/*
1321 	 * Run state machine for active packets
1322 	 */
1323 	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1324 		if (atomic_read(&pkt->run_sm) > 0) {
1325 			atomic_set(&pkt->run_sm, 0);
1326 			pkt_run_state_machine(pd, pkt);
1327 		}
1328 	}
1329 
1330 	/*
1331 	 * Move no longer active packets to the free list
1332 	 */
1333 	spin_lock(&pd->cdrw.active_list_lock);
1334 	list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1335 		if (pkt->state == PACKET_FINISHED_STATE) {
1336 			list_del(&pkt->list);
1337 			pkt_put_packet_data(pd, pkt);
1338 			pkt_set_state(pkt, PACKET_IDLE_STATE);
1339 			atomic_set(&pd->scan_queue, 1);
1340 		}
1341 	}
1342 	spin_unlock(&pd->cdrw.active_list_lock);
1343 }
1344 
1345 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1346 {
1347 	struct packet_data *pkt;
1348 	int i;
1349 
1350 	for (i = 0; i < PACKET_NUM_STATES; i++)
1351 		states[i] = 0;
1352 
1353 	spin_lock(&pd->cdrw.active_list_lock);
1354 	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1355 		states[pkt->state]++;
1356 	}
1357 	spin_unlock(&pd->cdrw.active_list_lock);
1358 }
1359 
1360 /*
1361  * kcdrwd is woken up when writes have been queued for one of our
1362  * registered devices
1363  */
1364 static int kcdrwd(void *foobar)
1365 {
1366 	struct pktcdvd_device *pd = foobar;
1367 	struct packet_data *pkt;
1368 	long min_sleep_time, residue;
1369 
1370 	set_user_nice(current, MIN_NICE);
1371 	set_freezable();
1372 
1373 	for (;;) {
1374 		DECLARE_WAITQUEUE(wait, current);
1375 
1376 		/*
1377 		 * Wait until there is something to do
1378 		 */
1379 		add_wait_queue(&pd->wqueue, &wait);
1380 		for (;;) {
1381 			set_current_state(TASK_INTERRUPTIBLE);
1382 
1383 			/* Check if we need to run pkt_handle_queue */
1384 			if (atomic_read(&pd->scan_queue) > 0)
1385 				goto work_to_do;
1386 
1387 			/* Check if we need to run the state machine for some packet */
1388 			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1389 				if (atomic_read(&pkt->run_sm) > 0)
1390 					goto work_to_do;
1391 			}
1392 
1393 			/* Check if we need to process the iosched queues */
1394 			if (atomic_read(&pd->iosched.attention) != 0)
1395 				goto work_to_do;
1396 
1397 			/* Otherwise, go to sleep */
1398 			if (PACKET_DEBUG > 1) {
1399 				int states[PACKET_NUM_STATES];
1400 				pkt_count_states(pd, states);
1401 				pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1402 					states[0], states[1], states[2],
1403 					states[3], states[4], states[5]);
1404 			}
1405 
1406 			min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1407 			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1408 				if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1409 					min_sleep_time = pkt->sleep_time;
1410 			}
1411 
1412 			pkt_dbg(2, pd, "sleeping\n");
1413 			residue = schedule_timeout(min_sleep_time);
1414 			pkt_dbg(2, pd, "wake up\n");
1415 
1416 			/* make swsusp happy with our thread */
1417 			try_to_freeze();
1418 
1419 			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1420 				if (!pkt->sleep_time)
1421 					continue;
1422 				pkt->sleep_time -= min_sleep_time - residue;
1423 				if (pkt->sleep_time <= 0) {
1424 					pkt->sleep_time = 0;
1425 					atomic_inc(&pkt->run_sm);
1426 				}
1427 			}
1428 
1429 			if (kthread_should_stop())
1430 				break;
1431 		}
1432 work_to_do:
1433 		set_current_state(TASK_RUNNING);
1434 		remove_wait_queue(&pd->wqueue, &wait);
1435 
1436 		if (kthread_should_stop())
1437 			break;
1438 
1439 		/*
1440 		 * if pkt_handle_queue returns true, we can queue
1441 		 * another request.
1442 		 */
1443 		while (pkt_handle_queue(pd))
1444 			;
1445 
1446 		/*
1447 		 * Handle packet state machine
1448 		 */
1449 		pkt_handle_packets(pd);
1450 
1451 		/*
1452 		 * Handle iosched queues
1453 		 */
1454 		pkt_iosched_process_queue(pd);
1455 	}
1456 
1457 	return 0;
1458 }
1459 
1460 static void pkt_print_settings(struct pktcdvd_device *pd)
1461 {
1462 	pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
1463 		 pd->settings.fp ? "Fixed" : "Variable",
1464 		 pd->settings.size >> 2,
1465 		 pd->settings.block_mode == 8 ? '1' : '2');
1466 }
1467 
1468 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1469 {
1470 	memset(cgc->cmd, 0, sizeof(cgc->cmd));
1471 
1472 	cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1473 	cgc->cmd[2] = page_code | (page_control << 6);
1474 	cgc->cmd[7] = cgc->buflen >> 8;
1475 	cgc->cmd[8] = cgc->buflen & 0xff;
1476 	cgc->data_direction = CGC_DATA_READ;
1477 	return pkt_generic_packet(pd, cgc);
1478 }
1479 
1480 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1481 {
1482 	memset(cgc->cmd, 0, sizeof(cgc->cmd));
1483 	memset(cgc->buffer, 0, 2);
1484 	cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1485 	cgc->cmd[1] = 0x10;		/* PF */
1486 	cgc->cmd[7] = cgc->buflen >> 8;
1487 	cgc->cmd[8] = cgc->buflen & 0xff;
1488 	cgc->data_direction = CGC_DATA_WRITE;
1489 	return pkt_generic_packet(pd, cgc);
1490 }
1491 
1492 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1493 {
1494 	struct packet_command cgc;
1495 	int ret;
1496 
1497 	/* set up command and get the disc info */
1498 	init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1499 	cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1500 	cgc.cmd[8] = cgc.buflen = 2;
1501 	cgc.quiet = 1;
1502 
1503 	ret = pkt_generic_packet(pd, &cgc);
1504 	if (ret)
1505 		return ret;
1506 
1507 	/* not all drives have the same disc_info length, so requeue
1508 	 * packet with the length the drive tells us it can supply
1509 	 */
1510 	cgc.buflen = be16_to_cpu(di->disc_information_length) +
1511 		     sizeof(di->disc_information_length);
1512 
1513 	if (cgc.buflen > sizeof(disc_information))
1514 		cgc.buflen = sizeof(disc_information);
1515 
1516 	cgc.cmd[8] = cgc.buflen;
1517 	return pkt_generic_packet(pd, &cgc);
1518 }
1519 
1520 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1521 {
1522 	struct packet_command cgc;
1523 	int ret;
1524 
1525 	init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1526 	cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1527 	cgc.cmd[1] = type & 3;
1528 	cgc.cmd[4] = (track & 0xff00) >> 8;
1529 	cgc.cmd[5] = track & 0xff;
1530 	cgc.cmd[8] = 8;
1531 	cgc.quiet = 1;
1532 
1533 	ret = pkt_generic_packet(pd, &cgc);
1534 	if (ret)
1535 		return ret;
1536 
1537 	cgc.buflen = be16_to_cpu(ti->track_information_length) +
1538 		     sizeof(ti->track_information_length);
1539 
1540 	if (cgc.buflen > sizeof(track_information))
1541 		cgc.buflen = sizeof(track_information);
1542 
1543 	cgc.cmd[8] = cgc.buflen;
1544 	return pkt_generic_packet(pd, &cgc);
1545 }
1546 
1547 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1548 						long *last_written)
1549 {
1550 	disc_information di;
1551 	track_information ti;
1552 	__u32 last_track;
1553 	int ret;
1554 
1555 	ret = pkt_get_disc_info(pd, &di);
1556 	if (ret)
1557 		return ret;
1558 
1559 	last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1560 	ret = pkt_get_track_info(pd, last_track, 1, &ti);
1561 	if (ret)
1562 		return ret;
1563 
1564 	/* if this track is blank, try the previous. */
1565 	if (ti.blank) {
1566 		last_track--;
1567 		ret = pkt_get_track_info(pd, last_track, 1, &ti);
1568 		if (ret)
1569 			return ret;
1570 	}
1571 
1572 	/* if last recorded field is valid, return it. */
1573 	if (ti.lra_v) {
1574 		*last_written = be32_to_cpu(ti.last_rec_address);
1575 	} else {
1576 		/* make it up instead */
1577 		*last_written = be32_to_cpu(ti.track_start) +
1578 				be32_to_cpu(ti.track_size);
1579 		if (ti.free_blocks)
1580 			*last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1581 	}
1582 	return 0;
1583 }
1584 
1585 /*
1586  * write mode select package based on pd->settings
1587  */
1588 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1589 {
1590 	struct packet_command cgc;
1591 	struct scsi_sense_hdr sshdr;
1592 	write_param_page *wp;
1593 	char buffer[128];
1594 	int ret, size;
1595 
1596 	/* doesn't apply to DVD+RW or DVD-RAM */
1597 	if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1598 		return 0;
1599 
1600 	memset(buffer, 0, sizeof(buffer));
1601 	init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1602 	cgc.sshdr = &sshdr;
1603 	ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1604 	if (ret) {
1605 		pkt_dump_sense(pd, &cgc);
1606 		return ret;
1607 	}
1608 
1609 	size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1610 	pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1611 	if (size > sizeof(buffer))
1612 		size = sizeof(buffer);
1613 
1614 	/*
1615 	 * now get it all
1616 	 */
1617 	init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1618 	cgc.sshdr = &sshdr;
1619 	ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1620 	if (ret) {
1621 		pkt_dump_sense(pd, &cgc);
1622 		return ret;
1623 	}
1624 
1625 	/*
1626 	 * write page is offset header + block descriptor length
1627 	 */
1628 	wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1629 
1630 	wp->fp = pd->settings.fp;
1631 	wp->track_mode = pd->settings.track_mode;
1632 	wp->write_type = pd->settings.write_type;
1633 	wp->data_block_type = pd->settings.block_mode;
1634 
1635 	wp->multi_session = 0;
1636 
1637 #ifdef PACKET_USE_LS
1638 	wp->link_size = 7;
1639 	wp->ls_v = 1;
1640 #endif
1641 
1642 	if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1643 		wp->session_format = 0;
1644 		wp->subhdr2 = 0x20;
1645 	} else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1646 		wp->session_format = 0x20;
1647 		wp->subhdr2 = 8;
1648 #if 0
1649 		wp->mcn[0] = 0x80;
1650 		memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1651 #endif
1652 	} else {
1653 		/*
1654 		 * paranoia
1655 		 */
1656 		pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
1657 		return 1;
1658 	}
1659 	wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1660 
1661 	cgc.buflen = cgc.cmd[8] = size;
1662 	ret = pkt_mode_select(pd, &cgc);
1663 	if (ret) {
1664 		pkt_dump_sense(pd, &cgc);
1665 		return ret;
1666 	}
1667 
1668 	pkt_print_settings(pd);
1669 	return 0;
1670 }
1671 
1672 /*
1673  * 1 -- we can write to this track, 0 -- we can't
1674  */
1675 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1676 {
1677 	switch (pd->mmc3_profile) {
1678 		case 0x1a: /* DVD+RW */
1679 		case 0x12: /* DVD-RAM */
1680 			/* The track is always writable on DVD+RW/DVD-RAM */
1681 			return 1;
1682 		default:
1683 			break;
1684 	}
1685 
1686 	if (!ti->packet || !ti->fp)
1687 		return 0;
1688 
1689 	/*
1690 	 * "good" settings as per Mt Fuji.
1691 	 */
1692 	if (ti->rt == 0 && ti->blank == 0)
1693 		return 1;
1694 
1695 	if (ti->rt == 0 && ti->blank == 1)
1696 		return 1;
1697 
1698 	if (ti->rt == 1 && ti->blank == 0)
1699 		return 1;
1700 
1701 	pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1702 	return 0;
1703 }
1704 
1705 /*
1706  * 1 -- we can write to this disc, 0 -- we can't
1707  */
1708 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1709 {
1710 	switch (pd->mmc3_profile) {
1711 		case 0x0a: /* CD-RW */
1712 		case 0xffff: /* MMC3 not supported */
1713 			break;
1714 		case 0x1a: /* DVD+RW */
1715 		case 0x13: /* DVD-RW */
1716 		case 0x12: /* DVD-RAM */
1717 			return 1;
1718 		default:
1719 			pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
1720 				pd->mmc3_profile);
1721 			return 0;
1722 	}
1723 
1724 	/*
1725 	 * for disc type 0xff we should probably reserve a new track.
1726 	 * but i'm not sure, should we leave this to user apps? probably.
1727 	 */
1728 	if (di->disc_type == 0xff) {
1729 		pkt_notice(pd, "unknown disc - no track?\n");
1730 		return 0;
1731 	}
1732 
1733 	if (di->disc_type != 0x20 && di->disc_type != 0) {
1734 		pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
1735 		return 0;
1736 	}
1737 
1738 	if (di->erasable == 0) {
1739 		pkt_notice(pd, "disc not erasable\n");
1740 		return 0;
1741 	}
1742 
1743 	if (di->border_status == PACKET_SESSION_RESERVED) {
1744 		pkt_err(pd, "can't write to last track (reserved)\n");
1745 		return 0;
1746 	}
1747 
1748 	return 1;
1749 }
1750 
1751 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1752 {
1753 	struct packet_command cgc;
1754 	unsigned char buf[12];
1755 	disc_information di;
1756 	track_information ti;
1757 	int ret, track;
1758 
1759 	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1760 	cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1761 	cgc.cmd[8] = 8;
1762 	ret = pkt_generic_packet(pd, &cgc);
1763 	pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1764 
1765 	memset(&di, 0, sizeof(disc_information));
1766 	memset(&ti, 0, sizeof(track_information));
1767 
1768 	ret = pkt_get_disc_info(pd, &di);
1769 	if (ret) {
1770 		pkt_err(pd, "failed get_disc\n");
1771 		return ret;
1772 	}
1773 
1774 	if (!pkt_writable_disc(pd, &di))
1775 		return -EROFS;
1776 
1777 	pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1778 
1779 	track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1780 	ret = pkt_get_track_info(pd, track, 1, &ti);
1781 	if (ret) {
1782 		pkt_err(pd, "failed get_track\n");
1783 		return ret;
1784 	}
1785 
1786 	if (!pkt_writable_track(pd, &ti)) {
1787 		pkt_err(pd, "can't write to this track\n");
1788 		return -EROFS;
1789 	}
1790 
1791 	/*
1792 	 * we keep packet size in 512 byte units, makes it easier to
1793 	 * deal with request calculations.
1794 	 */
1795 	pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1796 	if (pd->settings.size == 0) {
1797 		pkt_notice(pd, "detected zero packet size!\n");
1798 		return -ENXIO;
1799 	}
1800 	if (pd->settings.size > PACKET_MAX_SECTORS) {
1801 		pkt_err(pd, "packet size is too big\n");
1802 		return -EROFS;
1803 	}
1804 	pd->settings.fp = ti.fp;
1805 	pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1806 
1807 	if (ti.nwa_v) {
1808 		pd->nwa = be32_to_cpu(ti.next_writable);
1809 		set_bit(PACKET_NWA_VALID, &pd->flags);
1810 	}
1811 
1812 	/*
1813 	 * in theory we could use lra on -RW media as well and just zero
1814 	 * blocks that haven't been written yet, but in practice that
1815 	 * is just a no-go. we'll use that for -R, naturally.
1816 	 */
1817 	if (ti.lra_v) {
1818 		pd->lra = be32_to_cpu(ti.last_rec_address);
1819 		set_bit(PACKET_LRA_VALID, &pd->flags);
1820 	} else {
1821 		pd->lra = 0xffffffff;
1822 		set_bit(PACKET_LRA_VALID, &pd->flags);
1823 	}
1824 
1825 	/*
1826 	 * fine for now
1827 	 */
1828 	pd->settings.link_loss = 7;
1829 	pd->settings.write_type = 0;	/* packet */
1830 	pd->settings.track_mode = ti.track_mode;
1831 
1832 	/*
1833 	 * mode1 or mode2 disc
1834 	 */
1835 	switch (ti.data_mode) {
1836 		case PACKET_MODE1:
1837 			pd->settings.block_mode = PACKET_BLOCK_MODE1;
1838 			break;
1839 		case PACKET_MODE2:
1840 			pd->settings.block_mode = PACKET_BLOCK_MODE2;
1841 			break;
1842 		default:
1843 			pkt_err(pd, "unknown data mode\n");
1844 			return -EROFS;
1845 	}
1846 	return 0;
1847 }
1848 
1849 /*
1850  * enable/disable write caching on drive
1851  */
1852 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
1853 						int set)
1854 {
1855 	struct packet_command cgc;
1856 	struct scsi_sense_hdr sshdr;
1857 	unsigned char buf[64];
1858 	int ret;
1859 
1860 	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1861 	cgc.sshdr = &sshdr;
1862 	cgc.buflen = pd->mode_offset + 12;
1863 
1864 	/*
1865 	 * caching mode page might not be there, so quiet this command
1866 	 */
1867 	cgc.quiet = 1;
1868 
1869 	ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
1870 	if (ret)
1871 		return ret;
1872 
1873 	buf[pd->mode_offset + 10] |= (!!set << 2);
1874 
1875 	cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1876 	ret = pkt_mode_select(pd, &cgc);
1877 	if (ret) {
1878 		pkt_err(pd, "write caching control failed\n");
1879 		pkt_dump_sense(pd, &cgc);
1880 	} else if (!ret && set)
1881 		pkt_notice(pd, "enabled write caching\n");
1882 	return ret;
1883 }
1884 
1885 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1886 {
1887 	struct packet_command cgc;
1888 
1889 	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1890 	cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1891 	cgc.cmd[4] = lockflag ? 1 : 0;
1892 	return pkt_generic_packet(pd, &cgc);
1893 }
1894 
1895 /*
1896  * Returns drive maximum write speed
1897  */
1898 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1899 						unsigned *write_speed)
1900 {
1901 	struct packet_command cgc;
1902 	struct scsi_sense_hdr sshdr;
1903 	unsigned char buf[256+18];
1904 	unsigned char *cap_buf;
1905 	int ret, offset;
1906 
1907 	cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1908 	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1909 	cgc.sshdr = &sshdr;
1910 
1911 	ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1912 	if (ret) {
1913 		cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1914 			     sizeof(struct mode_page_header);
1915 		ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1916 		if (ret) {
1917 			pkt_dump_sense(pd, &cgc);
1918 			return ret;
1919 		}
1920 	}
1921 
1922 	offset = 20;			    /* Obsoleted field, used by older drives */
1923 	if (cap_buf[1] >= 28)
1924 		offset = 28;		    /* Current write speed selected */
1925 	if (cap_buf[1] >= 30) {
1926 		/* If the drive reports at least one "Logical Unit Write
1927 		 * Speed Performance Descriptor Block", use the information
1928 		 * in the first block. (contains the highest speed)
1929 		 */
1930 		int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
1931 		if (num_spdb > 0)
1932 			offset = 34;
1933 	}
1934 
1935 	*write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
1936 	return 0;
1937 }
1938 
1939 /* These tables from cdrecord - I don't have orange book */
1940 /* standard speed CD-RW (1-4x) */
1941 static char clv_to_speed[16] = {
1942 	/* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
1943 	   0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1944 };
1945 /* high speed CD-RW (-10x) */
1946 static char hs_clv_to_speed[16] = {
1947 	/* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
1948 	   0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1949 };
1950 /* ultra high speed CD-RW */
1951 static char us_clv_to_speed[16] = {
1952 	/* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
1953 	   0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
1954 };
1955 
1956 /*
1957  * reads the maximum media speed from ATIP
1958  */
1959 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
1960 						unsigned *speed)
1961 {
1962 	struct packet_command cgc;
1963 	struct scsi_sense_hdr sshdr;
1964 	unsigned char buf[64];
1965 	unsigned int size, st, sp;
1966 	int ret;
1967 
1968 	init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
1969 	cgc.sshdr = &sshdr;
1970 	cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1971 	cgc.cmd[1] = 2;
1972 	cgc.cmd[2] = 4; /* READ ATIP */
1973 	cgc.cmd[8] = 2;
1974 	ret = pkt_generic_packet(pd, &cgc);
1975 	if (ret) {
1976 		pkt_dump_sense(pd, &cgc);
1977 		return ret;
1978 	}
1979 	size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
1980 	if (size > sizeof(buf))
1981 		size = sizeof(buf);
1982 
1983 	init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
1984 	cgc.sshdr = &sshdr;
1985 	cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1986 	cgc.cmd[1] = 2;
1987 	cgc.cmd[2] = 4;
1988 	cgc.cmd[8] = size;
1989 	ret = pkt_generic_packet(pd, &cgc);
1990 	if (ret) {
1991 		pkt_dump_sense(pd, &cgc);
1992 		return ret;
1993 	}
1994 
1995 	if (!(buf[6] & 0x40)) {
1996 		pkt_notice(pd, "disc type is not CD-RW\n");
1997 		return 1;
1998 	}
1999 	if (!(buf[6] & 0x4)) {
2000 		pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
2001 		return 1;
2002 	}
2003 
2004 	st = (buf[6] >> 3) & 0x7; /* disc sub-type */
2005 
2006 	sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
2007 
2008 	/* Info from cdrecord */
2009 	switch (st) {
2010 		case 0: /* standard speed */
2011 			*speed = clv_to_speed[sp];
2012 			break;
2013 		case 1: /* high speed */
2014 			*speed = hs_clv_to_speed[sp];
2015 			break;
2016 		case 2: /* ultra high speed */
2017 			*speed = us_clv_to_speed[sp];
2018 			break;
2019 		default:
2020 			pkt_notice(pd, "unknown disc sub-type %d\n", st);
2021 			return 1;
2022 	}
2023 	if (*speed) {
2024 		pkt_info(pd, "maximum media speed: %d\n", *speed);
2025 		return 0;
2026 	} else {
2027 		pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
2028 		return 1;
2029 	}
2030 }
2031 
2032 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2033 {
2034 	struct packet_command cgc;
2035 	struct scsi_sense_hdr sshdr;
2036 	int ret;
2037 
2038 	pkt_dbg(2, pd, "Performing OPC\n");
2039 
2040 	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2041 	cgc.sshdr = &sshdr;
2042 	cgc.timeout = 60*HZ;
2043 	cgc.cmd[0] = GPCMD_SEND_OPC;
2044 	cgc.cmd[1] = 1;
2045 	ret = pkt_generic_packet(pd, &cgc);
2046 	if (ret)
2047 		pkt_dump_sense(pd, &cgc);
2048 	return ret;
2049 }
2050 
2051 static int pkt_open_write(struct pktcdvd_device *pd)
2052 {
2053 	int ret;
2054 	unsigned int write_speed, media_write_speed, read_speed;
2055 
2056 	ret = pkt_probe_settings(pd);
2057 	if (ret) {
2058 		pkt_dbg(2, pd, "failed probe\n");
2059 		return ret;
2060 	}
2061 
2062 	ret = pkt_set_write_settings(pd);
2063 	if (ret) {
2064 		pkt_dbg(1, pd, "failed saving write settings\n");
2065 		return -EIO;
2066 	}
2067 
2068 	pkt_write_caching(pd, USE_WCACHING);
2069 
2070 	ret = pkt_get_max_speed(pd, &write_speed);
2071 	if (ret)
2072 		write_speed = 16 * 177;
2073 	switch (pd->mmc3_profile) {
2074 		case 0x13: /* DVD-RW */
2075 		case 0x1a: /* DVD+RW */
2076 		case 0x12: /* DVD-RAM */
2077 			pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
2078 			break;
2079 		default:
2080 			ret = pkt_media_speed(pd, &media_write_speed);
2081 			if (ret)
2082 				media_write_speed = 16;
2083 			write_speed = min(write_speed, media_write_speed * 177);
2084 			pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
2085 			break;
2086 	}
2087 	read_speed = write_speed;
2088 
2089 	ret = pkt_set_speed(pd, write_speed, read_speed);
2090 	if (ret) {
2091 		pkt_dbg(1, pd, "couldn't set write speed\n");
2092 		return -EIO;
2093 	}
2094 	pd->write_speed = write_speed;
2095 	pd->read_speed = read_speed;
2096 
2097 	ret = pkt_perform_opc(pd);
2098 	if (ret) {
2099 		pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
2100 	}
2101 
2102 	return 0;
2103 }
2104 
2105 /*
2106  * called at open time.
2107  */
2108 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2109 {
2110 	int ret;
2111 	long lba;
2112 	struct request_queue *q;
2113 	struct block_device *bdev;
2114 
2115 	/*
2116 	 * We need to re-open the cdrom device without O_NONBLOCK to be able
2117 	 * to read/write from/to it. It is already opened in O_NONBLOCK mode
2118 	 * so open should not fail.
2119 	 */
2120 	bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
2121 	if (IS_ERR(bdev)) {
2122 		ret = PTR_ERR(bdev);
2123 		goto out;
2124 	}
2125 
2126 	ret = pkt_get_last_written(pd, &lba);
2127 	if (ret) {
2128 		pkt_err(pd, "pkt_get_last_written failed\n");
2129 		goto out_putdev;
2130 	}
2131 
2132 	set_capacity(pd->disk, lba << 2);
2133 	set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
2134 
2135 	q = bdev_get_queue(pd->bdev);
2136 	if (write) {
2137 		ret = pkt_open_write(pd);
2138 		if (ret)
2139 			goto out_putdev;
2140 		/*
2141 		 * Some CDRW drives can not handle writes larger than one packet,
2142 		 * even if the size is a multiple of the packet size.
2143 		 */
2144 		blk_queue_max_hw_sectors(q, pd->settings.size);
2145 		set_bit(PACKET_WRITABLE, &pd->flags);
2146 	} else {
2147 		pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2148 		clear_bit(PACKET_WRITABLE, &pd->flags);
2149 	}
2150 
2151 	ret = pkt_set_segment_merging(pd, q);
2152 	if (ret)
2153 		goto out_putdev;
2154 
2155 	if (write) {
2156 		if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2157 			pkt_err(pd, "not enough memory for buffers\n");
2158 			ret = -ENOMEM;
2159 			goto out_putdev;
2160 		}
2161 		pkt_info(pd, "%lukB available on disc\n", lba << 1);
2162 	}
2163 
2164 	return 0;
2165 
2166 out_putdev:
2167 	blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
2168 out:
2169 	return ret;
2170 }
2171 
2172 /*
2173  * called when the device is closed. makes sure that the device flushes
2174  * the internal cache before we close.
2175  */
2176 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2177 {
2178 	if (flush && pkt_flush_cache(pd))
2179 		pkt_dbg(1, pd, "not flushing cache\n");
2180 
2181 	pkt_lock_door(pd, 0);
2182 
2183 	pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2184 	blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2185 
2186 	pkt_shrink_pktlist(pd);
2187 }
2188 
2189 static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2190 {
2191 	if (dev_minor >= MAX_WRITERS)
2192 		return NULL;
2193 
2194 	dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
2195 	return pkt_devs[dev_minor];
2196 }
2197 
2198 static int pkt_open(struct block_device *bdev, fmode_t mode)
2199 {
2200 	struct pktcdvd_device *pd = NULL;
2201 	int ret;
2202 
2203 	mutex_lock(&pktcdvd_mutex);
2204 	mutex_lock(&ctl_mutex);
2205 	pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
2206 	if (!pd) {
2207 		ret = -ENODEV;
2208 		goto out;
2209 	}
2210 	BUG_ON(pd->refcnt < 0);
2211 
2212 	pd->refcnt++;
2213 	if (pd->refcnt > 1) {
2214 		if ((mode & FMODE_WRITE) &&
2215 		    !test_bit(PACKET_WRITABLE, &pd->flags)) {
2216 			ret = -EBUSY;
2217 			goto out_dec;
2218 		}
2219 	} else {
2220 		ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2221 		if (ret)
2222 			goto out_dec;
2223 		/*
2224 		 * needed here as well, since ext2 (among others) may change
2225 		 * the blocksize at mount time
2226 		 */
2227 		set_blocksize(bdev, CD_FRAMESIZE);
2228 	}
2229 
2230 	mutex_unlock(&ctl_mutex);
2231 	mutex_unlock(&pktcdvd_mutex);
2232 	return 0;
2233 
2234 out_dec:
2235 	pd->refcnt--;
2236 out:
2237 	mutex_unlock(&ctl_mutex);
2238 	mutex_unlock(&pktcdvd_mutex);
2239 	return ret;
2240 }
2241 
2242 static void pkt_close(struct gendisk *disk, fmode_t mode)
2243 {
2244 	struct pktcdvd_device *pd = disk->private_data;
2245 
2246 	mutex_lock(&pktcdvd_mutex);
2247 	mutex_lock(&ctl_mutex);
2248 	pd->refcnt--;
2249 	BUG_ON(pd->refcnt < 0);
2250 	if (pd->refcnt == 0) {
2251 		int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2252 		pkt_release_dev(pd, flush);
2253 	}
2254 	mutex_unlock(&ctl_mutex);
2255 	mutex_unlock(&pktcdvd_mutex);
2256 }
2257 
2258 
2259 static void pkt_end_io_read_cloned(struct bio *bio)
2260 {
2261 	struct packet_stacked_data *psd = bio->bi_private;
2262 	struct pktcdvd_device *pd = psd->pd;
2263 
2264 	psd->bio->bi_status = bio->bi_status;
2265 	bio_put(bio);
2266 	bio_endio(psd->bio);
2267 	mempool_free(psd, &psd_pool);
2268 	pkt_bio_finished(pd);
2269 }
2270 
2271 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
2272 {
2273 	struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, &pkt_bio_set);
2274 	struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
2275 
2276 	psd->pd = pd;
2277 	psd->bio = bio;
2278 	bio_set_dev(cloned_bio, pd->bdev);
2279 	cloned_bio->bi_private = psd;
2280 	cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2281 	pd->stats.secs_r += bio_sectors(bio);
2282 	pkt_queue_bio(pd, cloned_bio);
2283 }
2284 
2285 static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
2286 {
2287 	struct pktcdvd_device *pd = q->queuedata;
2288 	sector_t zone;
2289 	struct packet_data *pkt;
2290 	int was_empty, blocked_bio;
2291 	struct pkt_rb_node *node;
2292 
2293 	zone = get_zone(bio->bi_iter.bi_sector, pd);
2294 
2295 	/*
2296 	 * If we find a matching packet in state WAITING or READ_WAIT, we can
2297 	 * just append this bio to that packet.
2298 	 */
2299 	spin_lock(&pd->cdrw.active_list_lock);
2300 	blocked_bio = 0;
2301 	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2302 		if (pkt->sector == zone) {
2303 			spin_lock(&pkt->lock);
2304 			if ((pkt->state == PACKET_WAITING_STATE) ||
2305 			    (pkt->state == PACKET_READ_WAIT_STATE)) {
2306 				bio_list_add(&pkt->orig_bios, bio);
2307 				pkt->write_size +=
2308 					bio->bi_iter.bi_size / CD_FRAMESIZE;
2309 				if ((pkt->write_size >= pkt->frames) &&
2310 				    (pkt->state == PACKET_WAITING_STATE)) {
2311 					atomic_inc(&pkt->run_sm);
2312 					wake_up(&pd->wqueue);
2313 				}
2314 				spin_unlock(&pkt->lock);
2315 				spin_unlock(&pd->cdrw.active_list_lock);
2316 				return;
2317 			} else {
2318 				blocked_bio = 1;
2319 			}
2320 			spin_unlock(&pkt->lock);
2321 		}
2322 	}
2323 	spin_unlock(&pd->cdrw.active_list_lock);
2324 
2325  	/*
2326 	 * Test if there is enough room left in the bio work queue
2327 	 * (queue size >= congestion on mark).
2328 	 * If not, wait till the work queue size is below the congestion off mark.
2329 	 */
2330 	spin_lock(&pd->lock);
2331 	if (pd->write_congestion_on > 0
2332 	    && pd->bio_queue_size >= pd->write_congestion_on) {
2333 		set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
2334 		do {
2335 			spin_unlock(&pd->lock);
2336 			congestion_wait(BLK_RW_ASYNC, HZ);
2337 			spin_lock(&pd->lock);
2338 		} while(pd->bio_queue_size > pd->write_congestion_off);
2339 	}
2340 	spin_unlock(&pd->lock);
2341 
2342 	/*
2343 	 * No matching packet found. Store the bio in the work queue.
2344 	 */
2345 	node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
2346 	node->bio = bio;
2347 	spin_lock(&pd->lock);
2348 	BUG_ON(pd->bio_queue_size < 0);
2349 	was_empty = (pd->bio_queue_size == 0);
2350 	pkt_rbtree_insert(pd, node);
2351 	spin_unlock(&pd->lock);
2352 
2353 	/*
2354 	 * Wake up the worker thread.
2355 	 */
2356 	atomic_set(&pd->scan_queue, 1);
2357 	if (was_empty) {
2358 		/* This wake_up is required for correct operation */
2359 		wake_up(&pd->wqueue);
2360 	} else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2361 		/*
2362 		 * This wake up is not required for correct operation,
2363 		 * but improves performance in some cases.
2364 		 */
2365 		wake_up(&pd->wqueue);
2366 	}
2367 }
2368 
2369 static blk_qc_t pkt_submit_bio(struct bio *bio)
2370 {
2371 	struct pktcdvd_device *pd;
2372 	char b[BDEVNAME_SIZE];
2373 	struct bio *split;
2374 
2375 	blk_queue_split(&bio);
2376 
2377 	pd = bio->bi_bdev->bd_disk->queue->queuedata;
2378 	if (!pd) {
2379 		pr_err("%s incorrect request queue\n", bio_devname(bio, b));
2380 		goto end_io;
2381 	}
2382 
2383 	pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2384 		(unsigned long long)bio->bi_iter.bi_sector,
2385 		(unsigned long long)bio_end_sector(bio));
2386 
2387 	/*
2388 	 * Clone READ bios so we can have our own bi_end_io callback.
2389 	 */
2390 	if (bio_data_dir(bio) == READ) {
2391 		pkt_make_request_read(pd, bio);
2392 		return BLK_QC_T_NONE;
2393 	}
2394 
2395 	if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2396 		pkt_notice(pd, "WRITE for ro device (%llu)\n",
2397 			   (unsigned long long)bio->bi_iter.bi_sector);
2398 		goto end_io;
2399 	}
2400 
2401 	if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2402 		pkt_err(pd, "wrong bio size\n");
2403 		goto end_io;
2404 	}
2405 
2406 	do {
2407 		sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2408 		sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2409 
2410 		if (last_zone != zone) {
2411 			BUG_ON(last_zone != zone + pd->settings.size);
2412 
2413 			split = bio_split(bio, last_zone -
2414 					  bio->bi_iter.bi_sector,
2415 					  GFP_NOIO, &pkt_bio_set);
2416 			bio_chain(split, bio);
2417 		} else {
2418 			split = bio;
2419 		}
2420 
2421 		pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
2422 	} while (split != bio);
2423 
2424 	return BLK_QC_T_NONE;
2425 end_io:
2426 	bio_io_error(bio);
2427 	return BLK_QC_T_NONE;
2428 }
2429 
2430 static void pkt_init_queue(struct pktcdvd_device *pd)
2431 {
2432 	struct request_queue *q = pd->disk->queue;
2433 
2434 	blk_queue_logical_block_size(q, CD_FRAMESIZE);
2435 	blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
2436 	q->queuedata = pd;
2437 }
2438 
2439 static int pkt_seq_show(struct seq_file *m, void *p)
2440 {
2441 	struct pktcdvd_device *pd = m->private;
2442 	char *msg;
2443 	char bdev_buf[BDEVNAME_SIZE];
2444 	int states[PACKET_NUM_STATES];
2445 
2446 	seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
2447 		   bdevname(pd->bdev, bdev_buf));
2448 
2449 	seq_printf(m, "\nSettings:\n");
2450 	seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2451 
2452 	if (pd->settings.write_type == 0)
2453 		msg = "Packet";
2454 	else
2455 		msg = "Unknown";
2456 	seq_printf(m, "\twrite type:\t\t%s\n", msg);
2457 
2458 	seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2459 	seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2460 
2461 	seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2462 
2463 	if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2464 		msg = "Mode 1";
2465 	else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2466 		msg = "Mode 2";
2467 	else
2468 		msg = "Unknown";
2469 	seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2470 
2471 	seq_printf(m, "\nStatistics:\n");
2472 	seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2473 	seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2474 	seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2475 	seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2476 	seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2477 
2478 	seq_printf(m, "\nMisc:\n");
2479 	seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2480 	seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2481 	seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2482 	seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2483 	seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2484 	seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2485 
2486 	seq_printf(m, "\nQueue state:\n");
2487 	seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2488 	seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2489 	seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2490 
2491 	pkt_count_states(pd, states);
2492 	seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2493 		   states[0], states[1], states[2], states[3], states[4], states[5]);
2494 
2495 	seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
2496 			pd->write_congestion_off,
2497 			pd->write_congestion_on);
2498 	return 0;
2499 }
2500 
2501 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2502 {
2503 	int i;
2504 	char b[BDEVNAME_SIZE];
2505 	struct block_device *bdev;
2506 
2507 	if (pd->pkt_dev == dev) {
2508 		pkt_err(pd, "recursive setup not allowed\n");
2509 		return -EBUSY;
2510 	}
2511 	for (i = 0; i < MAX_WRITERS; i++) {
2512 		struct pktcdvd_device *pd2 = pkt_devs[i];
2513 		if (!pd2)
2514 			continue;
2515 		if (pd2->bdev->bd_dev == dev) {
2516 			pkt_err(pd, "%s already setup\n",
2517 				bdevname(pd2->bdev, b));
2518 			return -EBUSY;
2519 		}
2520 		if (pd2->pkt_dev == dev) {
2521 			pkt_err(pd, "can't chain pktcdvd devices\n");
2522 			return -EBUSY;
2523 		}
2524 	}
2525 
2526 	bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
2527 	if (IS_ERR(bdev))
2528 		return PTR_ERR(bdev);
2529 	if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
2530 		blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2531 		return -EINVAL;
2532 	}
2533 
2534 	/* This is safe, since we have a reference from open(). */
2535 	__module_get(THIS_MODULE);
2536 
2537 	pd->bdev = bdev;
2538 	set_blocksize(bdev, CD_FRAMESIZE);
2539 
2540 	pkt_init_queue(pd);
2541 
2542 	atomic_set(&pd->cdrw.pending_bios, 0);
2543 	pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2544 	if (IS_ERR(pd->cdrw.thread)) {
2545 		pkt_err(pd, "can't start kernel thread\n");
2546 		goto out_mem;
2547 	}
2548 
2549 	proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
2550 	pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
2551 	return 0;
2552 
2553 out_mem:
2554 	blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2555 	/* This is safe: open() is still holding a reference. */
2556 	module_put(THIS_MODULE);
2557 	return -ENOMEM;
2558 }
2559 
2560 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
2561 {
2562 	struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2563 	int ret;
2564 
2565 	pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
2566 		cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2567 
2568 	mutex_lock(&pktcdvd_mutex);
2569 	switch (cmd) {
2570 	case CDROMEJECT:
2571 		/*
2572 		 * The door gets locked when the device is opened, so we
2573 		 * have to unlock it or else the eject command fails.
2574 		 */
2575 		if (pd->refcnt == 1)
2576 			pkt_lock_door(pd, 0);
2577 		fallthrough;
2578 	/*
2579 	 * forward selected CDROM ioctls to CD-ROM, for UDF
2580 	 */
2581 	case CDROMMULTISESSION:
2582 	case CDROMREADTOCENTRY:
2583 	case CDROM_LAST_WRITTEN:
2584 	case CDROM_SEND_PACKET:
2585 	case SCSI_IOCTL_SEND_COMMAND:
2586 		if (!bdev->bd_disk->fops->ioctl)
2587 			ret = -ENOTTY;
2588 		else
2589 			ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
2590 		break;
2591 	default:
2592 		pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
2593 		ret = -ENOTTY;
2594 	}
2595 	mutex_unlock(&pktcdvd_mutex);
2596 
2597 	return ret;
2598 }
2599 
2600 static unsigned int pkt_check_events(struct gendisk *disk,
2601 				     unsigned int clearing)
2602 {
2603 	struct pktcdvd_device *pd = disk->private_data;
2604 	struct gendisk *attached_disk;
2605 
2606 	if (!pd)
2607 		return 0;
2608 	if (!pd->bdev)
2609 		return 0;
2610 	attached_disk = pd->bdev->bd_disk;
2611 	if (!attached_disk || !attached_disk->fops->check_events)
2612 		return 0;
2613 	return attached_disk->fops->check_events(attached_disk, clearing);
2614 }
2615 
2616 static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
2617 {
2618 	return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
2619 }
2620 
2621 static const struct block_device_operations pktcdvd_ops = {
2622 	.owner =		THIS_MODULE,
2623 	.submit_bio =		pkt_submit_bio,
2624 	.open =			pkt_open,
2625 	.release =		pkt_close,
2626 	.ioctl =		pkt_ioctl,
2627 	.compat_ioctl =		blkdev_compat_ptr_ioctl,
2628 	.check_events =		pkt_check_events,
2629 	.devnode =		pkt_devnode,
2630 };
2631 
2632 /*
2633  * Set up mapping from pktcdvd device to CD-ROM device.
2634  */
2635 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2636 {
2637 	int idx;
2638 	int ret = -ENOMEM;
2639 	struct pktcdvd_device *pd;
2640 	struct gendisk *disk;
2641 
2642 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2643 
2644 	for (idx = 0; idx < MAX_WRITERS; idx++)
2645 		if (!pkt_devs[idx])
2646 			break;
2647 	if (idx == MAX_WRITERS) {
2648 		pr_err("max %d writers supported\n", MAX_WRITERS);
2649 		ret = -EBUSY;
2650 		goto out_mutex;
2651 	}
2652 
2653 	pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2654 	if (!pd)
2655 		goto out_mutex;
2656 
2657 	ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
2658 					sizeof(struct pkt_rb_node));
2659 	if (ret)
2660 		goto out_mem;
2661 
2662 	INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2663 	INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2664 	spin_lock_init(&pd->cdrw.active_list_lock);
2665 
2666 	spin_lock_init(&pd->lock);
2667 	spin_lock_init(&pd->iosched.lock);
2668 	bio_list_init(&pd->iosched.read_queue);
2669 	bio_list_init(&pd->iosched.write_queue);
2670 	sprintf(pd->name, DRIVER_NAME"%d", idx);
2671 	init_waitqueue_head(&pd->wqueue);
2672 	pd->bio_queue = RB_ROOT;
2673 
2674 	pd->write_congestion_on  = write_congestion_on;
2675 	pd->write_congestion_off = write_congestion_off;
2676 
2677 	ret = -ENOMEM;
2678 	disk = alloc_disk(1);
2679 	if (!disk)
2680 		goto out_mem;
2681 	pd->disk = disk;
2682 	disk->major = pktdev_major;
2683 	disk->first_minor = idx;
2684 	disk->fops = &pktcdvd_ops;
2685 	disk->flags = GENHD_FL_REMOVABLE;
2686 	strcpy(disk->disk_name, pd->name);
2687 	disk->private_data = pd;
2688 	disk->queue = blk_alloc_queue(NUMA_NO_NODE);
2689 	if (!disk->queue)
2690 		goto out_mem2;
2691 
2692 	pd->pkt_dev = MKDEV(pktdev_major, idx);
2693 	ret = pkt_new_dev(pd, dev);
2694 	if (ret)
2695 		goto out_mem2;
2696 
2697 	/* inherit events of the host device */
2698 	disk->events = pd->bdev->bd_disk->events;
2699 
2700 	add_disk(disk);
2701 
2702 	pkt_sysfs_dev_new(pd);
2703 	pkt_debugfs_dev_new(pd);
2704 
2705 	pkt_devs[idx] = pd;
2706 	if (pkt_dev)
2707 		*pkt_dev = pd->pkt_dev;
2708 
2709 	mutex_unlock(&ctl_mutex);
2710 	return 0;
2711 
2712 out_mem2:
2713 	put_disk(disk);
2714 out_mem:
2715 	mempool_exit(&pd->rb_pool);
2716 	kfree(pd);
2717 out_mutex:
2718 	mutex_unlock(&ctl_mutex);
2719 	pr_err("setup of pktcdvd device failed\n");
2720 	return ret;
2721 }
2722 
2723 /*
2724  * Tear down mapping from pktcdvd device to CD-ROM device.
2725  */
2726 static int pkt_remove_dev(dev_t pkt_dev)
2727 {
2728 	struct pktcdvd_device *pd;
2729 	int idx;
2730 	int ret = 0;
2731 
2732 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2733 
2734 	for (idx = 0; idx < MAX_WRITERS; idx++) {
2735 		pd = pkt_devs[idx];
2736 		if (pd && (pd->pkt_dev == pkt_dev))
2737 			break;
2738 	}
2739 	if (idx == MAX_WRITERS) {
2740 		pr_debug("dev not setup\n");
2741 		ret = -ENXIO;
2742 		goto out;
2743 	}
2744 
2745 	if (pd->refcnt > 0) {
2746 		ret = -EBUSY;
2747 		goto out;
2748 	}
2749 	if (!IS_ERR(pd->cdrw.thread))
2750 		kthread_stop(pd->cdrw.thread);
2751 
2752 	pkt_devs[idx] = NULL;
2753 
2754 	pkt_debugfs_dev_remove(pd);
2755 	pkt_sysfs_dev_remove(pd);
2756 
2757 	blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2758 
2759 	remove_proc_entry(pd->name, pkt_proc);
2760 	pkt_dbg(1, pd, "writer unmapped\n");
2761 
2762 	del_gendisk(pd->disk);
2763 	blk_cleanup_queue(pd->disk->queue);
2764 	put_disk(pd->disk);
2765 
2766 	mempool_exit(&pd->rb_pool);
2767 	kfree(pd);
2768 
2769 	/* This is safe: open() is still holding a reference. */
2770 	module_put(THIS_MODULE);
2771 
2772 out:
2773 	mutex_unlock(&ctl_mutex);
2774 	return ret;
2775 }
2776 
2777 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2778 {
2779 	struct pktcdvd_device *pd;
2780 
2781 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2782 
2783 	pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2784 	if (pd) {
2785 		ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2786 		ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2787 	} else {
2788 		ctrl_cmd->dev = 0;
2789 		ctrl_cmd->pkt_dev = 0;
2790 	}
2791 	ctrl_cmd->num_devices = MAX_WRITERS;
2792 
2793 	mutex_unlock(&ctl_mutex);
2794 }
2795 
2796 static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2797 {
2798 	void __user *argp = (void __user *)arg;
2799 	struct pkt_ctrl_command ctrl_cmd;
2800 	int ret = 0;
2801 	dev_t pkt_dev = 0;
2802 
2803 	if (cmd != PACKET_CTRL_CMD)
2804 		return -ENOTTY;
2805 
2806 	if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2807 		return -EFAULT;
2808 
2809 	switch (ctrl_cmd.command) {
2810 	case PKT_CTRL_CMD_SETUP:
2811 		if (!capable(CAP_SYS_ADMIN))
2812 			return -EPERM;
2813 		ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
2814 		ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
2815 		break;
2816 	case PKT_CTRL_CMD_TEARDOWN:
2817 		if (!capable(CAP_SYS_ADMIN))
2818 			return -EPERM;
2819 		ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
2820 		break;
2821 	case PKT_CTRL_CMD_STATUS:
2822 		pkt_get_status(&ctrl_cmd);
2823 		break;
2824 	default:
2825 		return -ENOTTY;
2826 	}
2827 
2828 	if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2829 		return -EFAULT;
2830 	return ret;
2831 }
2832 
2833 #ifdef CONFIG_COMPAT
2834 static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2835 {
2836 	return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2837 }
2838 #endif
2839 
2840 static const struct file_operations pkt_ctl_fops = {
2841 	.open		= nonseekable_open,
2842 	.unlocked_ioctl	= pkt_ctl_ioctl,
2843 #ifdef CONFIG_COMPAT
2844 	.compat_ioctl	= pkt_ctl_compat_ioctl,
2845 #endif
2846 	.owner		= THIS_MODULE,
2847 	.llseek		= no_llseek,
2848 };
2849 
2850 static struct miscdevice pkt_misc = {
2851 	.minor 		= MISC_DYNAMIC_MINOR,
2852 	.name  		= DRIVER_NAME,
2853 	.nodename	= "pktcdvd/control",
2854 	.fops  		= &pkt_ctl_fops
2855 };
2856 
2857 static int __init pkt_init(void)
2858 {
2859 	int ret;
2860 
2861 	mutex_init(&ctl_mutex);
2862 
2863 	ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
2864 				    sizeof(struct packet_stacked_data));
2865 	if (ret)
2866 		return ret;
2867 	ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
2868 	if (ret) {
2869 		mempool_exit(&psd_pool);
2870 		return ret;
2871 	}
2872 
2873 	ret = register_blkdev(pktdev_major, DRIVER_NAME);
2874 	if (ret < 0) {
2875 		pr_err("unable to register block device\n");
2876 		goto out2;
2877 	}
2878 	if (!pktdev_major)
2879 		pktdev_major = ret;
2880 
2881 	ret = pkt_sysfs_init();
2882 	if (ret)
2883 		goto out;
2884 
2885 	pkt_debugfs_init();
2886 
2887 	ret = misc_register(&pkt_misc);
2888 	if (ret) {
2889 		pr_err("unable to register misc device\n");
2890 		goto out_misc;
2891 	}
2892 
2893 	pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
2894 
2895 	return 0;
2896 
2897 out_misc:
2898 	pkt_debugfs_cleanup();
2899 	pkt_sysfs_cleanup();
2900 out:
2901 	unregister_blkdev(pktdev_major, DRIVER_NAME);
2902 out2:
2903 	mempool_exit(&psd_pool);
2904 	bioset_exit(&pkt_bio_set);
2905 	return ret;
2906 }
2907 
2908 static void __exit pkt_exit(void)
2909 {
2910 	remove_proc_entry("driver/"DRIVER_NAME, NULL);
2911 	misc_deregister(&pkt_misc);
2912 
2913 	pkt_debugfs_cleanup();
2914 	pkt_sysfs_cleanup();
2915 
2916 	unregister_blkdev(pktdev_major, DRIVER_NAME);
2917 	mempool_exit(&psd_pool);
2918 	bioset_exit(&pkt_bio_set);
2919 }
2920 
2921 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2922 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2923 MODULE_LICENSE("GPL");
2924 
2925 module_init(pkt_init);
2926 module_exit(pkt_exit);
2927