xref: /openbmc/linux/drivers/block/pktcdvd.c (revision 56a0eccd)
1 /*
2  * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3  * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4  * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
5  *
6  * May be copied or modified under the terms of the GNU General Public
7  * License.  See linux/COPYING for more information.
8  *
9  * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
10  * DVD-RAM devices.
11  *
12  * Theory of operation:
13  *
14  * At the lowest level, there is the standard driver for the CD/DVD device,
15  * typically ide-cd.c or sr.c. This driver can handle read and write requests,
16  * but it doesn't know anything about the special restrictions that apply to
17  * packet writing. One restriction is that write requests must be aligned to
18  * packet boundaries on the physical media, and the size of a write request
19  * must be equal to the packet size. Another restriction is that a
20  * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21  * command, if the previous command was a write.
22  *
23  * The purpose of the packet writing driver is to hide these restrictions from
24  * higher layers, such as file systems, and present a block device that can be
25  * randomly read and written using 2kB-sized blocks.
26  *
27  * The lowest layer in the packet writing driver is the packet I/O scheduler.
28  * Its data is defined by the struct packet_iosched and includes two bio
29  * queues with pending read and write requests. These queues are processed
30  * by the pkt_iosched_process_queue() function. The write requests in this
31  * queue are already properly aligned and sized. This layer is responsible for
32  * issuing the flush cache commands and scheduling the I/O in a good order.
33  *
34  * The next layer transforms unaligned write requests to aligned writes. This
35  * transformation requires reading missing pieces of data from the underlying
36  * block device, assembling the pieces to full packets and queuing them to the
37  * packet I/O scheduler.
38  *
39  * At the top layer there is a custom make_request_fn function that forwards
40  * read requests directly to the iosched queue and puts write requests in the
41  * unaligned write queue. A kernel thread performs the necessary read
42  * gathering to convert the unaligned writes to aligned writes and then feeds
43  * them to the packet I/O scheduler.
44  *
45  *************************************************************************/
46 
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48 
49 #include <linux/pktcdvd.h>
50 #include <linux/module.h>
51 #include <linux/types.h>
52 #include <linux/kernel.h>
53 #include <linux/compat.h>
54 #include <linux/kthread.h>
55 #include <linux/errno.h>
56 #include <linux/spinlock.h>
57 #include <linux/file.h>
58 #include <linux/proc_fs.h>
59 #include <linux/seq_file.h>
60 #include <linux/miscdevice.h>
61 #include <linux/freezer.h>
62 #include <linux/mutex.h>
63 #include <linux/slab.h>
64 #include <linux/backing-dev.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_ioctl.h>
67 #include <scsi/scsi.h>
68 #include <linux/debugfs.h>
69 #include <linux/device.h>
70 
71 #include <asm/uaccess.h>
72 
73 #define DRIVER_NAME	"pktcdvd"
74 
75 #define pkt_err(pd, fmt, ...)						\
76 	pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
77 #define pkt_notice(pd, fmt, ...)					\
78 	pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
79 #define pkt_info(pd, fmt, ...)						\
80 	pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
81 
82 #define pkt_dbg(level, pd, fmt, ...)					\
83 do {									\
84 	if (level == 2 && PACKET_DEBUG >= 2)				\
85 		pr_notice("%s: %s():" fmt,				\
86 			  pd->name, __func__, ##__VA_ARGS__);		\
87 	else if (level == 1 && PACKET_DEBUG >= 1)			\
88 		pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__);		\
89 } while (0)
90 
91 #define MAX_SPEED 0xffff
92 
93 static DEFINE_MUTEX(pktcdvd_mutex);
94 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
95 static struct proc_dir_entry *pkt_proc;
96 static int pktdev_major;
97 static int write_congestion_on  = PKT_WRITE_CONGESTION_ON;
98 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
99 static struct mutex ctl_mutex;	/* Serialize open/close/setup/teardown */
100 static mempool_t *psd_pool;
101 
102 static struct class	*class_pktcdvd = NULL;    /* /sys/class/pktcdvd */
103 static struct dentry	*pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
104 
105 /* forward declaration */
106 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
107 static int pkt_remove_dev(dev_t pkt_dev);
108 static int pkt_seq_show(struct seq_file *m, void *p);
109 
110 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
111 {
112 	return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
113 }
114 
115 /*
116  * create and register a pktcdvd kernel object.
117  */
118 static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
119 					const char* name,
120 					struct kobject* parent,
121 					struct kobj_type* ktype)
122 {
123 	struct pktcdvd_kobj *p;
124 	int error;
125 
126 	p = kzalloc(sizeof(*p), GFP_KERNEL);
127 	if (!p)
128 		return NULL;
129 	p->pd = pd;
130 	error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
131 	if (error) {
132 		kobject_put(&p->kobj);
133 		return NULL;
134 	}
135 	kobject_uevent(&p->kobj, KOBJ_ADD);
136 	return p;
137 }
138 /*
139  * remove a pktcdvd kernel object.
140  */
141 static void pkt_kobj_remove(struct pktcdvd_kobj *p)
142 {
143 	if (p)
144 		kobject_put(&p->kobj);
145 }
146 /*
147  * default release function for pktcdvd kernel objects.
148  */
149 static void pkt_kobj_release(struct kobject *kobj)
150 {
151 	kfree(to_pktcdvdkobj(kobj));
152 }
153 
154 
155 /**********************************************************
156  *
157  * sysfs interface for pktcdvd
158  * by (C) 2006  Thomas Maier <balagi@justmail.de>
159  *
160  **********************************************************/
161 
162 #define DEF_ATTR(_obj,_name,_mode) \
163 	static struct attribute _obj = { .name = _name, .mode = _mode }
164 
165 /**********************************************************
166   /sys/class/pktcdvd/pktcdvd[0-7]/
167                      stat/reset
168                      stat/packets_started
169                      stat/packets_finished
170                      stat/kb_written
171                      stat/kb_read
172                      stat/kb_read_gather
173                      write_queue/size
174                      write_queue/congestion_off
175                      write_queue/congestion_on
176  **********************************************************/
177 
178 DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
179 DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
180 DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
181 DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
182 DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
183 DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
184 
185 static struct attribute *kobj_pkt_attrs_stat[] = {
186 	&kobj_pkt_attr_st1,
187 	&kobj_pkt_attr_st2,
188 	&kobj_pkt_attr_st3,
189 	&kobj_pkt_attr_st4,
190 	&kobj_pkt_attr_st5,
191 	&kobj_pkt_attr_st6,
192 	NULL
193 };
194 
195 DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
196 DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
197 DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on",  0644);
198 
199 static struct attribute *kobj_pkt_attrs_wqueue[] = {
200 	&kobj_pkt_attr_wq1,
201 	&kobj_pkt_attr_wq2,
202 	&kobj_pkt_attr_wq3,
203 	NULL
204 };
205 
206 static ssize_t kobj_pkt_show(struct kobject *kobj,
207 			struct attribute *attr, char *data)
208 {
209 	struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
210 	int n = 0;
211 	int v;
212 	if (strcmp(attr->name, "packets_started") == 0) {
213 		n = sprintf(data, "%lu\n", pd->stats.pkt_started);
214 
215 	} else if (strcmp(attr->name, "packets_finished") == 0) {
216 		n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
217 
218 	} else if (strcmp(attr->name, "kb_written") == 0) {
219 		n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
220 
221 	} else if (strcmp(attr->name, "kb_read") == 0) {
222 		n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
223 
224 	} else if (strcmp(attr->name, "kb_read_gather") == 0) {
225 		n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
226 
227 	} else if (strcmp(attr->name, "size") == 0) {
228 		spin_lock(&pd->lock);
229 		v = pd->bio_queue_size;
230 		spin_unlock(&pd->lock);
231 		n = sprintf(data, "%d\n", v);
232 
233 	} else if (strcmp(attr->name, "congestion_off") == 0) {
234 		spin_lock(&pd->lock);
235 		v = pd->write_congestion_off;
236 		spin_unlock(&pd->lock);
237 		n = sprintf(data, "%d\n", v);
238 
239 	} else if (strcmp(attr->name, "congestion_on") == 0) {
240 		spin_lock(&pd->lock);
241 		v = pd->write_congestion_on;
242 		spin_unlock(&pd->lock);
243 		n = sprintf(data, "%d\n", v);
244 	}
245 	return n;
246 }
247 
248 static void init_write_congestion_marks(int* lo, int* hi)
249 {
250 	if (*hi > 0) {
251 		*hi = max(*hi, 500);
252 		*hi = min(*hi, 1000000);
253 		if (*lo <= 0)
254 			*lo = *hi - 100;
255 		else {
256 			*lo = min(*lo, *hi - 100);
257 			*lo = max(*lo, 100);
258 		}
259 	} else {
260 		*hi = -1;
261 		*lo = -1;
262 	}
263 }
264 
265 static ssize_t kobj_pkt_store(struct kobject *kobj,
266 			struct attribute *attr,
267 			const char *data, size_t len)
268 {
269 	struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
270 	int val;
271 
272 	if (strcmp(attr->name, "reset") == 0 && len > 0) {
273 		pd->stats.pkt_started = 0;
274 		pd->stats.pkt_ended = 0;
275 		pd->stats.secs_w = 0;
276 		pd->stats.secs_rg = 0;
277 		pd->stats.secs_r = 0;
278 
279 	} else if (strcmp(attr->name, "congestion_off") == 0
280 		   && sscanf(data, "%d", &val) == 1) {
281 		spin_lock(&pd->lock);
282 		pd->write_congestion_off = val;
283 		init_write_congestion_marks(&pd->write_congestion_off,
284 					&pd->write_congestion_on);
285 		spin_unlock(&pd->lock);
286 
287 	} else if (strcmp(attr->name, "congestion_on") == 0
288 		   && sscanf(data, "%d", &val) == 1) {
289 		spin_lock(&pd->lock);
290 		pd->write_congestion_on = val;
291 		init_write_congestion_marks(&pd->write_congestion_off,
292 					&pd->write_congestion_on);
293 		spin_unlock(&pd->lock);
294 	}
295 	return len;
296 }
297 
298 static const struct sysfs_ops kobj_pkt_ops = {
299 	.show = kobj_pkt_show,
300 	.store = kobj_pkt_store
301 };
302 static struct kobj_type kobj_pkt_type_stat = {
303 	.release = pkt_kobj_release,
304 	.sysfs_ops = &kobj_pkt_ops,
305 	.default_attrs = kobj_pkt_attrs_stat
306 };
307 static struct kobj_type kobj_pkt_type_wqueue = {
308 	.release = pkt_kobj_release,
309 	.sysfs_ops = &kobj_pkt_ops,
310 	.default_attrs = kobj_pkt_attrs_wqueue
311 };
312 
313 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
314 {
315 	if (class_pktcdvd) {
316 		pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
317 					"%s", pd->name);
318 		if (IS_ERR(pd->dev))
319 			pd->dev = NULL;
320 	}
321 	if (pd->dev) {
322 		pd->kobj_stat = pkt_kobj_create(pd, "stat",
323 					&pd->dev->kobj,
324 					&kobj_pkt_type_stat);
325 		pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
326 					&pd->dev->kobj,
327 					&kobj_pkt_type_wqueue);
328 	}
329 }
330 
331 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
332 {
333 	pkt_kobj_remove(pd->kobj_stat);
334 	pkt_kobj_remove(pd->kobj_wqueue);
335 	if (class_pktcdvd)
336 		device_unregister(pd->dev);
337 }
338 
339 
340 /********************************************************************
341   /sys/class/pktcdvd/
342                      add            map block device
343                      remove         unmap packet dev
344                      device_map     show mappings
345  *******************************************************************/
346 
347 static void class_pktcdvd_release(struct class *cls)
348 {
349 	kfree(cls);
350 }
351 static ssize_t class_pktcdvd_show_map(struct class *c,
352 					struct class_attribute *attr,
353 					char *data)
354 {
355 	int n = 0;
356 	int idx;
357 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
358 	for (idx = 0; idx < MAX_WRITERS; idx++) {
359 		struct pktcdvd_device *pd = pkt_devs[idx];
360 		if (!pd)
361 			continue;
362 		n += sprintf(data+n, "%s %u:%u %u:%u\n",
363 			pd->name,
364 			MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
365 			MAJOR(pd->bdev->bd_dev),
366 			MINOR(pd->bdev->bd_dev));
367 	}
368 	mutex_unlock(&ctl_mutex);
369 	return n;
370 }
371 
372 static ssize_t class_pktcdvd_store_add(struct class *c,
373 					struct class_attribute *attr,
374 					const char *buf,
375 					size_t count)
376 {
377 	unsigned int major, minor;
378 
379 	if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
380 		/* pkt_setup_dev() expects caller to hold reference to self */
381 		if (!try_module_get(THIS_MODULE))
382 			return -ENODEV;
383 
384 		pkt_setup_dev(MKDEV(major, minor), NULL);
385 
386 		module_put(THIS_MODULE);
387 
388 		return count;
389 	}
390 
391 	return -EINVAL;
392 }
393 
394 static ssize_t class_pktcdvd_store_remove(struct class *c,
395 					  struct class_attribute *attr,
396 					  const char *buf,
397 					size_t count)
398 {
399 	unsigned int major, minor;
400 	if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
401 		pkt_remove_dev(MKDEV(major, minor));
402 		return count;
403 	}
404 	return -EINVAL;
405 }
406 
407 static struct class_attribute class_pktcdvd_attrs[] = {
408  __ATTR(add,            0200, NULL, class_pktcdvd_store_add),
409  __ATTR(remove,         0200, NULL, class_pktcdvd_store_remove),
410  __ATTR(device_map,     0444, class_pktcdvd_show_map, NULL),
411  __ATTR_NULL
412 };
413 
414 
415 static int pkt_sysfs_init(void)
416 {
417 	int ret = 0;
418 
419 	/*
420 	 * create control files in sysfs
421 	 * /sys/class/pktcdvd/...
422 	 */
423 	class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
424 	if (!class_pktcdvd)
425 		return -ENOMEM;
426 	class_pktcdvd->name = DRIVER_NAME;
427 	class_pktcdvd->owner = THIS_MODULE;
428 	class_pktcdvd->class_release = class_pktcdvd_release;
429 	class_pktcdvd->class_attrs = class_pktcdvd_attrs;
430 	ret = class_register(class_pktcdvd);
431 	if (ret) {
432 		kfree(class_pktcdvd);
433 		class_pktcdvd = NULL;
434 		pr_err("failed to create class pktcdvd\n");
435 		return ret;
436 	}
437 	return 0;
438 }
439 
440 static void pkt_sysfs_cleanup(void)
441 {
442 	if (class_pktcdvd)
443 		class_destroy(class_pktcdvd);
444 	class_pktcdvd = NULL;
445 }
446 
447 /********************************************************************
448   entries in debugfs
449 
450   /sys/kernel/debug/pktcdvd[0-7]/
451 			info
452 
453  *******************************************************************/
454 
455 static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
456 {
457 	return pkt_seq_show(m, p);
458 }
459 
460 static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
461 {
462 	return single_open(file, pkt_debugfs_seq_show, inode->i_private);
463 }
464 
465 static const struct file_operations debug_fops = {
466 	.open		= pkt_debugfs_fops_open,
467 	.read		= seq_read,
468 	.llseek		= seq_lseek,
469 	.release	= single_release,
470 	.owner		= THIS_MODULE,
471 };
472 
473 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
474 {
475 	if (!pkt_debugfs_root)
476 		return;
477 	pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
478 	if (!pd->dfs_d_root)
479 		return;
480 
481 	pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
482 				pd->dfs_d_root, pd, &debug_fops);
483 }
484 
485 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
486 {
487 	if (!pkt_debugfs_root)
488 		return;
489 	debugfs_remove(pd->dfs_f_info);
490 	debugfs_remove(pd->dfs_d_root);
491 	pd->dfs_f_info = NULL;
492 	pd->dfs_d_root = NULL;
493 }
494 
495 static void pkt_debugfs_init(void)
496 {
497 	pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
498 }
499 
500 static void pkt_debugfs_cleanup(void)
501 {
502 	debugfs_remove(pkt_debugfs_root);
503 	pkt_debugfs_root = NULL;
504 }
505 
506 /* ----------------------------------------------------------*/
507 
508 
509 static void pkt_bio_finished(struct pktcdvd_device *pd)
510 {
511 	BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
512 	if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
513 		pkt_dbg(2, pd, "queue empty\n");
514 		atomic_set(&pd->iosched.attention, 1);
515 		wake_up(&pd->wqueue);
516 	}
517 }
518 
519 /*
520  * Allocate a packet_data struct
521  */
522 static struct packet_data *pkt_alloc_packet_data(int frames)
523 {
524 	int i;
525 	struct packet_data *pkt;
526 
527 	pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
528 	if (!pkt)
529 		goto no_pkt;
530 
531 	pkt->frames = frames;
532 	pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
533 	if (!pkt->w_bio)
534 		goto no_bio;
535 
536 	for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
537 		pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
538 		if (!pkt->pages[i])
539 			goto no_page;
540 	}
541 
542 	spin_lock_init(&pkt->lock);
543 	bio_list_init(&pkt->orig_bios);
544 
545 	for (i = 0; i < frames; i++) {
546 		struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
547 		if (!bio)
548 			goto no_rd_bio;
549 
550 		pkt->r_bios[i] = bio;
551 	}
552 
553 	return pkt;
554 
555 no_rd_bio:
556 	for (i = 0; i < frames; i++) {
557 		struct bio *bio = pkt->r_bios[i];
558 		if (bio)
559 			bio_put(bio);
560 	}
561 
562 no_page:
563 	for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
564 		if (pkt->pages[i])
565 			__free_page(pkt->pages[i]);
566 	bio_put(pkt->w_bio);
567 no_bio:
568 	kfree(pkt);
569 no_pkt:
570 	return NULL;
571 }
572 
573 /*
574  * Free a packet_data struct
575  */
576 static void pkt_free_packet_data(struct packet_data *pkt)
577 {
578 	int i;
579 
580 	for (i = 0; i < pkt->frames; i++) {
581 		struct bio *bio = pkt->r_bios[i];
582 		if (bio)
583 			bio_put(bio);
584 	}
585 	for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
586 		__free_page(pkt->pages[i]);
587 	bio_put(pkt->w_bio);
588 	kfree(pkt);
589 }
590 
591 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
592 {
593 	struct packet_data *pkt, *next;
594 
595 	BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
596 
597 	list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
598 		pkt_free_packet_data(pkt);
599 	}
600 	INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
601 }
602 
603 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
604 {
605 	struct packet_data *pkt;
606 
607 	BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
608 
609 	while (nr_packets > 0) {
610 		pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
611 		if (!pkt) {
612 			pkt_shrink_pktlist(pd);
613 			return 0;
614 		}
615 		pkt->id = nr_packets;
616 		pkt->pd = pd;
617 		list_add(&pkt->list, &pd->cdrw.pkt_free_list);
618 		nr_packets--;
619 	}
620 	return 1;
621 }
622 
623 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
624 {
625 	struct rb_node *n = rb_next(&node->rb_node);
626 	if (!n)
627 		return NULL;
628 	return rb_entry(n, struct pkt_rb_node, rb_node);
629 }
630 
631 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
632 {
633 	rb_erase(&node->rb_node, &pd->bio_queue);
634 	mempool_free(node, pd->rb_pool);
635 	pd->bio_queue_size--;
636 	BUG_ON(pd->bio_queue_size < 0);
637 }
638 
639 /*
640  * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
641  */
642 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
643 {
644 	struct rb_node *n = pd->bio_queue.rb_node;
645 	struct rb_node *next;
646 	struct pkt_rb_node *tmp;
647 
648 	if (!n) {
649 		BUG_ON(pd->bio_queue_size > 0);
650 		return NULL;
651 	}
652 
653 	for (;;) {
654 		tmp = rb_entry(n, struct pkt_rb_node, rb_node);
655 		if (s <= tmp->bio->bi_iter.bi_sector)
656 			next = n->rb_left;
657 		else
658 			next = n->rb_right;
659 		if (!next)
660 			break;
661 		n = next;
662 	}
663 
664 	if (s > tmp->bio->bi_iter.bi_sector) {
665 		tmp = pkt_rbtree_next(tmp);
666 		if (!tmp)
667 			return NULL;
668 	}
669 	BUG_ON(s > tmp->bio->bi_iter.bi_sector);
670 	return tmp;
671 }
672 
673 /*
674  * Insert a node into the pd->bio_queue rb tree.
675  */
676 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
677 {
678 	struct rb_node **p = &pd->bio_queue.rb_node;
679 	struct rb_node *parent = NULL;
680 	sector_t s = node->bio->bi_iter.bi_sector;
681 	struct pkt_rb_node *tmp;
682 
683 	while (*p) {
684 		parent = *p;
685 		tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
686 		if (s < tmp->bio->bi_iter.bi_sector)
687 			p = &(*p)->rb_left;
688 		else
689 			p = &(*p)->rb_right;
690 	}
691 	rb_link_node(&node->rb_node, parent, p);
692 	rb_insert_color(&node->rb_node, &pd->bio_queue);
693 	pd->bio_queue_size++;
694 }
695 
696 /*
697  * Send a packet_command to the underlying block device and
698  * wait for completion.
699  */
700 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
701 {
702 	struct request_queue *q = bdev_get_queue(pd->bdev);
703 	struct request *rq;
704 	int ret = 0;
705 
706 	rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
707 			     WRITE : READ, __GFP_RECLAIM);
708 	if (IS_ERR(rq))
709 		return PTR_ERR(rq);
710 	blk_rq_set_block_pc(rq);
711 
712 	if (cgc->buflen) {
713 		ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
714 				      __GFP_RECLAIM);
715 		if (ret)
716 			goto out;
717 	}
718 
719 	rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
720 	memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
721 
722 	rq->timeout = 60*HZ;
723 	if (cgc->quiet)
724 		rq->cmd_flags |= REQ_QUIET;
725 
726 	blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
727 	if (rq->errors)
728 		ret = -EIO;
729 out:
730 	blk_put_request(rq);
731 	return ret;
732 }
733 
734 static const char *sense_key_string(__u8 index)
735 {
736 	static const char * const info[] = {
737 		"No sense", "Recovered error", "Not ready",
738 		"Medium error", "Hardware error", "Illegal request",
739 		"Unit attention", "Data protect", "Blank check",
740 	};
741 
742 	return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
743 }
744 
745 /*
746  * A generic sense dump / resolve mechanism should be implemented across
747  * all ATAPI + SCSI devices.
748  */
749 static void pkt_dump_sense(struct pktcdvd_device *pd,
750 			   struct packet_command *cgc)
751 {
752 	struct request_sense *sense = cgc->sense;
753 
754 	if (sense)
755 		pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
756 			CDROM_PACKET_SIZE, cgc->cmd,
757 			sense->sense_key, sense->asc, sense->ascq,
758 			sense_key_string(sense->sense_key));
759 	else
760 		pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
761 }
762 
763 /*
764  * flush the drive cache to media
765  */
766 static int pkt_flush_cache(struct pktcdvd_device *pd)
767 {
768 	struct packet_command cgc;
769 
770 	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
771 	cgc.cmd[0] = GPCMD_FLUSH_CACHE;
772 	cgc.quiet = 1;
773 
774 	/*
775 	 * the IMMED bit -- we default to not setting it, although that
776 	 * would allow a much faster close, this is safer
777 	 */
778 #if 0
779 	cgc.cmd[1] = 1 << 1;
780 #endif
781 	return pkt_generic_packet(pd, &cgc);
782 }
783 
784 /*
785  * speed is given as the normal factor, e.g. 4 for 4x
786  */
787 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
788 				unsigned write_speed, unsigned read_speed)
789 {
790 	struct packet_command cgc;
791 	struct request_sense sense;
792 	int ret;
793 
794 	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
795 	cgc.sense = &sense;
796 	cgc.cmd[0] = GPCMD_SET_SPEED;
797 	cgc.cmd[2] = (read_speed >> 8) & 0xff;
798 	cgc.cmd[3] = read_speed & 0xff;
799 	cgc.cmd[4] = (write_speed >> 8) & 0xff;
800 	cgc.cmd[5] = write_speed & 0xff;
801 
802 	if ((ret = pkt_generic_packet(pd, &cgc)))
803 		pkt_dump_sense(pd, &cgc);
804 
805 	return ret;
806 }
807 
808 /*
809  * Queue a bio for processing by the low-level CD device. Must be called
810  * from process context.
811  */
812 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
813 {
814 	spin_lock(&pd->iosched.lock);
815 	if (bio_data_dir(bio) == READ)
816 		bio_list_add(&pd->iosched.read_queue, bio);
817 	else
818 		bio_list_add(&pd->iosched.write_queue, bio);
819 	spin_unlock(&pd->iosched.lock);
820 
821 	atomic_set(&pd->iosched.attention, 1);
822 	wake_up(&pd->wqueue);
823 }
824 
825 /*
826  * Process the queued read/write requests. This function handles special
827  * requirements for CDRW drives:
828  * - A cache flush command must be inserted before a read request if the
829  *   previous request was a write.
830  * - Switching between reading and writing is slow, so don't do it more often
831  *   than necessary.
832  * - Optimize for throughput at the expense of latency. This means that streaming
833  *   writes will never be interrupted by a read, but if the drive has to seek
834  *   before the next write, switch to reading instead if there are any pending
835  *   read requests.
836  * - Set the read speed according to current usage pattern. When only reading
837  *   from the device, it's best to use the highest possible read speed, but
838  *   when switching often between reading and writing, it's better to have the
839  *   same read and write speeds.
840  */
841 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
842 {
843 
844 	if (atomic_read(&pd->iosched.attention) == 0)
845 		return;
846 	atomic_set(&pd->iosched.attention, 0);
847 
848 	for (;;) {
849 		struct bio *bio;
850 		int reads_queued, writes_queued;
851 
852 		spin_lock(&pd->iosched.lock);
853 		reads_queued = !bio_list_empty(&pd->iosched.read_queue);
854 		writes_queued = !bio_list_empty(&pd->iosched.write_queue);
855 		spin_unlock(&pd->iosched.lock);
856 
857 		if (!reads_queued && !writes_queued)
858 			break;
859 
860 		if (pd->iosched.writing) {
861 			int need_write_seek = 1;
862 			spin_lock(&pd->iosched.lock);
863 			bio = bio_list_peek(&pd->iosched.write_queue);
864 			spin_unlock(&pd->iosched.lock);
865 			if (bio && (bio->bi_iter.bi_sector ==
866 				    pd->iosched.last_write))
867 				need_write_seek = 0;
868 			if (need_write_seek && reads_queued) {
869 				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
870 					pkt_dbg(2, pd, "write, waiting\n");
871 					break;
872 				}
873 				pkt_flush_cache(pd);
874 				pd->iosched.writing = 0;
875 			}
876 		} else {
877 			if (!reads_queued && writes_queued) {
878 				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
879 					pkt_dbg(2, pd, "read, waiting\n");
880 					break;
881 				}
882 				pd->iosched.writing = 1;
883 			}
884 		}
885 
886 		spin_lock(&pd->iosched.lock);
887 		if (pd->iosched.writing)
888 			bio = bio_list_pop(&pd->iosched.write_queue);
889 		else
890 			bio = bio_list_pop(&pd->iosched.read_queue);
891 		spin_unlock(&pd->iosched.lock);
892 
893 		if (!bio)
894 			continue;
895 
896 		if (bio_data_dir(bio) == READ)
897 			pd->iosched.successive_reads +=
898 				bio->bi_iter.bi_size >> 10;
899 		else {
900 			pd->iosched.successive_reads = 0;
901 			pd->iosched.last_write = bio_end_sector(bio);
902 		}
903 		if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
904 			if (pd->read_speed == pd->write_speed) {
905 				pd->read_speed = MAX_SPEED;
906 				pkt_set_speed(pd, pd->write_speed, pd->read_speed);
907 			}
908 		} else {
909 			if (pd->read_speed != pd->write_speed) {
910 				pd->read_speed = pd->write_speed;
911 				pkt_set_speed(pd, pd->write_speed, pd->read_speed);
912 			}
913 		}
914 
915 		atomic_inc(&pd->cdrw.pending_bios);
916 		generic_make_request(bio);
917 	}
918 }
919 
920 /*
921  * Special care is needed if the underlying block device has a small
922  * max_phys_segments value.
923  */
924 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
925 {
926 	if ((pd->settings.size << 9) / CD_FRAMESIZE
927 	    <= queue_max_segments(q)) {
928 		/*
929 		 * The cdrom device can handle one segment/frame
930 		 */
931 		clear_bit(PACKET_MERGE_SEGS, &pd->flags);
932 		return 0;
933 	} else if ((pd->settings.size << 9) / PAGE_SIZE
934 		   <= queue_max_segments(q)) {
935 		/*
936 		 * We can handle this case at the expense of some extra memory
937 		 * copies during write operations
938 		 */
939 		set_bit(PACKET_MERGE_SEGS, &pd->flags);
940 		return 0;
941 	} else {
942 		pkt_err(pd, "cdrom max_phys_segments too small\n");
943 		return -EIO;
944 	}
945 }
946 
947 /*
948  * Copy all data for this packet to pkt->pages[], so that
949  * a) The number of required segments for the write bio is minimized, which
950  *    is necessary for some scsi controllers.
951  * b) The data can be used as cache to avoid read requests if we receive a
952  *    new write request for the same zone.
953  */
954 static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
955 {
956 	int f, p, offs;
957 
958 	/* Copy all data to pkt->pages[] */
959 	p = 0;
960 	offs = 0;
961 	for (f = 0; f < pkt->frames; f++) {
962 		if (bvec[f].bv_page != pkt->pages[p]) {
963 			void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
964 			void *vto = page_address(pkt->pages[p]) + offs;
965 			memcpy(vto, vfrom, CD_FRAMESIZE);
966 			kunmap_atomic(vfrom);
967 			bvec[f].bv_page = pkt->pages[p];
968 			bvec[f].bv_offset = offs;
969 		} else {
970 			BUG_ON(bvec[f].bv_offset != offs);
971 		}
972 		offs += CD_FRAMESIZE;
973 		if (offs >= PAGE_SIZE) {
974 			offs = 0;
975 			p++;
976 		}
977 	}
978 }
979 
980 static void pkt_end_io_read(struct bio *bio)
981 {
982 	struct packet_data *pkt = bio->bi_private;
983 	struct pktcdvd_device *pd = pkt->pd;
984 	BUG_ON(!pd);
985 
986 	pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
987 		bio, (unsigned long long)pkt->sector,
988 		(unsigned long long)bio->bi_iter.bi_sector, bio->bi_error);
989 
990 	if (bio->bi_error)
991 		atomic_inc(&pkt->io_errors);
992 	if (atomic_dec_and_test(&pkt->io_wait)) {
993 		atomic_inc(&pkt->run_sm);
994 		wake_up(&pd->wqueue);
995 	}
996 	pkt_bio_finished(pd);
997 }
998 
999 static void pkt_end_io_packet_write(struct bio *bio)
1000 {
1001 	struct packet_data *pkt = bio->bi_private;
1002 	struct pktcdvd_device *pd = pkt->pd;
1003 	BUG_ON(!pd);
1004 
1005 	pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error);
1006 
1007 	pd->stats.pkt_ended++;
1008 
1009 	pkt_bio_finished(pd);
1010 	atomic_dec(&pkt->io_wait);
1011 	atomic_inc(&pkt->run_sm);
1012 	wake_up(&pd->wqueue);
1013 }
1014 
1015 /*
1016  * Schedule reads for the holes in a packet
1017  */
1018 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1019 {
1020 	int frames_read = 0;
1021 	struct bio *bio;
1022 	int f;
1023 	char written[PACKET_MAX_SIZE];
1024 
1025 	BUG_ON(bio_list_empty(&pkt->orig_bios));
1026 
1027 	atomic_set(&pkt->io_wait, 0);
1028 	atomic_set(&pkt->io_errors, 0);
1029 
1030 	/*
1031 	 * Figure out which frames we need to read before we can write.
1032 	 */
1033 	memset(written, 0, sizeof(written));
1034 	spin_lock(&pkt->lock);
1035 	bio_list_for_each(bio, &pkt->orig_bios) {
1036 		int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
1037 			(CD_FRAMESIZE >> 9);
1038 		int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
1039 		pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1040 		BUG_ON(first_frame < 0);
1041 		BUG_ON(first_frame + num_frames > pkt->frames);
1042 		for (f = first_frame; f < first_frame + num_frames; f++)
1043 			written[f] = 1;
1044 	}
1045 	spin_unlock(&pkt->lock);
1046 
1047 	if (pkt->cache_valid) {
1048 		pkt_dbg(2, pd, "zone %llx cached\n",
1049 			(unsigned long long)pkt->sector);
1050 		goto out_account;
1051 	}
1052 
1053 	/*
1054 	 * Schedule reads for missing parts of the packet.
1055 	 */
1056 	for (f = 0; f < pkt->frames; f++) {
1057 		int p, offset;
1058 
1059 		if (written[f])
1060 			continue;
1061 
1062 		bio = pkt->r_bios[f];
1063 		bio_reset(bio);
1064 		bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1065 		bio->bi_bdev = pd->bdev;
1066 		bio->bi_end_io = pkt_end_io_read;
1067 		bio->bi_private = pkt;
1068 
1069 		p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1070 		offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1071 		pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
1072 			f, pkt->pages[p], offset);
1073 		if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
1074 			BUG();
1075 
1076 		atomic_inc(&pkt->io_wait);
1077 		bio->bi_rw = READ;
1078 		pkt_queue_bio(pd, bio);
1079 		frames_read++;
1080 	}
1081 
1082 out_account:
1083 	pkt_dbg(2, pd, "need %d frames for zone %llx\n",
1084 		frames_read, (unsigned long long)pkt->sector);
1085 	pd->stats.pkt_started++;
1086 	pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1087 }
1088 
1089 /*
1090  * Find a packet matching zone, or the least recently used packet if
1091  * there is no match.
1092  */
1093 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1094 {
1095 	struct packet_data *pkt;
1096 
1097 	list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1098 		if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1099 			list_del_init(&pkt->list);
1100 			if (pkt->sector != zone)
1101 				pkt->cache_valid = 0;
1102 			return pkt;
1103 		}
1104 	}
1105 	BUG();
1106 	return NULL;
1107 }
1108 
1109 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1110 {
1111 	if (pkt->cache_valid) {
1112 		list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1113 	} else {
1114 		list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1115 	}
1116 }
1117 
1118 /*
1119  * recover a failed write, query for relocation if possible
1120  *
1121  * returns 1 if recovery is possible, or 0 if not
1122  *
1123  */
1124 static int pkt_start_recovery(struct packet_data *pkt)
1125 {
1126 	/*
1127 	 * FIXME. We need help from the file system to implement
1128 	 * recovery handling.
1129 	 */
1130 	return 0;
1131 #if 0
1132 	struct request *rq = pkt->rq;
1133 	struct pktcdvd_device *pd = rq->rq_disk->private_data;
1134 	struct block_device *pkt_bdev;
1135 	struct super_block *sb = NULL;
1136 	unsigned long old_block, new_block;
1137 	sector_t new_sector;
1138 
1139 	pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
1140 	if (pkt_bdev) {
1141 		sb = get_super(pkt_bdev);
1142 		bdput(pkt_bdev);
1143 	}
1144 
1145 	if (!sb)
1146 		return 0;
1147 
1148 	if (!sb->s_op->relocate_blocks)
1149 		goto out;
1150 
1151 	old_block = pkt->sector / (CD_FRAMESIZE >> 9);
1152 	if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
1153 		goto out;
1154 
1155 	new_sector = new_block * (CD_FRAMESIZE >> 9);
1156 	pkt->sector = new_sector;
1157 
1158 	bio_reset(pkt->bio);
1159 	pkt->bio->bi_bdev = pd->bdev;
1160 	pkt->bio->bi_rw = REQ_WRITE;
1161 	pkt->bio->bi_iter.bi_sector = new_sector;
1162 	pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
1163 	pkt->bio->bi_vcnt = pkt->frames;
1164 
1165 	pkt->bio->bi_end_io = pkt_end_io_packet_write;
1166 	pkt->bio->bi_private = pkt;
1167 
1168 	drop_super(sb);
1169 	return 1;
1170 
1171 out:
1172 	drop_super(sb);
1173 	return 0;
1174 #endif
1175 }
1176 
1177 static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
1178 {
1179 #if PACKET_DEBUG > 1
1180 	static const char *state_name[] = {
1181 		"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1182 	};
1183 	enum packet_data_state old_state = pkt->state;
1184 	pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
1185 		pkt->id, (unsigned long long)pkt->sector,
1186 		state_name[old_state], state_name[state]);
1187 #endif
1188 	pkt->state = state;
1189 }
1190 
1191 /*
1192  * Scan the work queue to see if we can start a new packet.
1193  * returns non-zero if any work was done.
1194  */
1195 static int pkt_handle_queue(struct pktcdvd_device *pd)
1196 {
1197 	struct packet_data *pkt, *p;
1198 	struct bio *bio = NULL;
1199 	sector_t zone = 0; /* Suppress gcc warning */
1200 	struct pkt_rb_node *node, *first_node;
1201 	struct rb_node *n;
1202 	int wakeup;
1203 
1204 	atomic_set(&pd->scan_queue, 0);
1205 
1206 	if (list_empty(&pd->cdrw.pkt_free_list)) {
1207 		pkt_dbg(2, pd, "no pkt\n");
1208 		return 0;
1209 	}
1210 
1211 	/*
1212 	 * Try to find a zone we are not already working on.
1213 	 */
1214 	spin_lock(&pd->lock);
1215 	first_node = pkt_rbtree_find(pd, pd->current_sector);
1216 	if (!first_node) {
1217 		n = rb_first(&pd->bio_queue);
1218 		if (n)
1219 			first_node = rb_entry(n, struct pkt_rb_node, rb_node);
1220 	}
1221 	node = first_node;
1222 	while (node) {
1223 		bio = node->bio;
1224 		zone = get_zone(bio->bi_iter.bi_sector, pd);
1225 		list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1226 			if (p->sector == zone) {
1227 				bio = NULL;
1228 				goto try_next_bio;
1229 			}
1230 		}
1231 		break;
1232 try_next_bio:
1233 		node = pkt_rbtree_next(node);
1234 		if (!node) {
1235 			n = rb_first(&pd->bio_queue);
1236 			if (n)
1237 				node = rb_entry(n, struct pkt_rb_node, rb_node);
1238 		}
1239 		if (node == first_node)
1240 			node = NULL;
1241 	}
1242 	spin_unlock(&pd->lock);
1243 	if (!bio) {
1244 		pkt_dbg(2, pd, "no bio\n");
1245 		return 0;
1246 	}
1247 
1248 	pkt = pkt_get_packet_data(pd, zone);
1249 
1250 	pd->current_sector = zone + pd->settings.size;
1251 	pkt->sector = zone;
1252 	BUG_ON(pkt->frames != pd->settings.size >> 2);
1253 	pkt->write_size = 0;
1254 
1255 	/*
1256 	 * Scan work queue for bios in the same zone and link them
1257 	 * to this packet.
1258 	 */
1259 	spin_lock(&pd->lock);
1260 	pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1261 	while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1262 		bio = node->bio;
1263 		pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1264 			get_zone(bio->bi_iter.bi_sector, pd));
1265 		if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1266 			break;
1267 		pkt_rbtree_erase(pd, node);
1268 		spin_lock(&pkt->lock);
1269 		bio_list_add(&pkt->orig_bios, bio);
1270 		pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1271 		spin_unlock(&pkt->lock);
1272 	}
1273 	/* check write congestion marks, and if bio_queue_size is
1274 	   below, wake up any waiters */
1275 	wakeup = (pd->write_congestion_on > 0
1276 	 		&& pd->bio_queue_size <= pd->write_congestion_off);
1277 	spin_unlock(&pd->lock);
1278 	if (wakeup) {
1279 		clear_bdi_congested(&pd->disk->queue->backing_dev_info,
1280 					BLK_RW_ASYNC);
1281 	}
1282 
1283 	pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
1284 	pkt_set_state(pkt, PACKET_WAITING_STATE);
1285 	atomic_set(&pkt->run_sm, 1);
1286 
1287 	spin_lock(&pd->cdrw.active_list_lock);
1288 	list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1289 	spin_unlock(&pd->cdrw.active_list_lock);
1290 
1291 	return 1;
1292 }
1293 
1294 /*
1295  * Assemble a bio to write one packet and queue the bio for processing
1296  * by the underlying block device.
1297  */
1298 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1299 {
1300 	int f;
1301 	struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
1302 
1303 	bio_reset(pkt->w_bio);
1304 	pkt->w_bio->bi_iter.bi_sector = pkt->sector;
1305 	pkt->w_bio->bi_bdev = pd->bdev;
1306 	pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1307 	pkt->w_bio->bi_private = pkt;
1308 
1309 	/* XXX: locking? */
1310 	for (f = 0; f < pkt->frames; f++) {
1311 		bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
1312 		bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1313 		if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
1314 			BUG();
1315 	}
1316 	pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
1317 
1318 	/*
1319 	 * Fill-in bvec with data from orig_bios.
1320 	 */
1321 	spin_lock(&pkt->lock);
1322 	bio_copy_data(pkt->w_bio, pkt->orig_bios.head);
1323 
1324 	pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1325 	spin_unlock(&pkt->lock);
1326 
1327 	pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
1328 		pkt->write_size, (unsigned long long)pkt->sector);
1329 
1330 	if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
1331 		pkt_make_local_copy(pkt, bvec);
1332 		pkt->cache_valid = 1;
1333 	} else {
1334 		pkt->cache_valid = 0;
1335 	}
1336 
1337 	/* Start the write request */
1338 	atomic_set(&pkt->io_wait, 1);
1339 	pkt->w_bio->bi_rw = WRITE;
1340 	pkt_queue_bio(pd, pkt->w_bio);
1341 }
1342 
1343 static void pkt_finish_packet(struct packet_data *pkt, int error)
1344 {
1345 	struct bio *bio;
1346 
1347 	if (error)
1348 		pkt->cache_valid = 0;
1349 
1350 	/* Finish all bios corresponding to this packet */
1351 	while ((bio = bio_list_pop(&pkt->orig_bios))) {
1352 		bio->bi_error = error;
1353 		bio_endio(bio);
1354 	}
1355 }
1356 
1357 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1358 {
1359 	pkt_dbg(2, pd, "pkt %d\n", pkt->id);
1360 
1361 	for (;;) {
1362 		switch (pkt->state) {
1363 		case PACKET_WAITING_STATE:
1364 			if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1365 				return;
1366 
1367 			pkt->sleep_time = 0;
1368 			pkt_gather_data(pd, pkt);
1369 			pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1370 			break;
1371 
1372 		case PACKET_READ_WAIT_STATE:
1373 			if (atomic_read(&pkt->io_wait) > 0)
1374 				return;
1375 
1376 			if (atomic_read(&pkt->io_errors) > 0) {
1377 				pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1378 			} else {
1379 				pkt_start_write(pd, pkt);
1380 			}
1381 			break;
1382 
1383 		case PACKET_WRITE_WAIT_STATE:
1384 			if (atomic_read(&pkt->io_wait) > 0)
1385 				return;
1386 
1387 			if (!pkt->w_bio->bi_error) {
1388 				pkt_set_state(pkt, PACKET_FINISHED_STATE);
1389 			} else {
1390 				pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1391 			}
1392 			break;
1393 
1394 		case PACKET_RECOVERY_STATE:
1395 			if (pkt_start_recovery(pkt)) {
1396 				pkt_start_write(pd, pkt);
1397 			} else {
1398 				pkt_dbg(2, pd, "No recovery possible\n");
1399 				pkt_set_state(pkt, PACKET_FINISHED_STATE);
1400 			}
1401 			break;
1402 
1403 		case PACKET_FINISHED_STATE:
1404 			pkt_finish_packet(pkt, pkt->w_bio->bi_error);
1405 			return;
1406 
1407 		default:
1408 			BUG();
1409 			break;
1410 		}
1411 	}
1412 }
1413 
1414 static void pkt_handle_packets(struct pktcdvd_device *pd)
1415 {
1416 	struct packet_data *pkt, *next;
1417 
1418 	/*
1419 	 * Run state machine for active packets
1420 	 */
1421 	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1422 		if (atomic_read(&pkt->run_sm) > 0) {
1423 			atomic_set(&pkt->run_sm, 0);
1424 			pkt_run_state_machine(pd, pkt);
1425 		}
1426 	}
1427 
1428 	/*
1429 	 * Move no longer active packets to the free list
1430 	 */
1431 	spin_lock(&pd->cdrw.active_list_lock);
1432 	list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1433 		if (pkt->state == PACKET_FINISHED_STATE) {
1434 			list_del(&pkt->list);
1435 			pkt_put_packet_data(pd, pkt);
1436 			pkt_set_state(pkt, PACKET_IDLE_STATE);
1437 			atomic_set(&pd->scan_queue, 1);
1438 		}
1439 	}
1440 	spin_unlock(&pd->cdrw.active_list_lock);
1441 }
1442 
1443 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1444 {
1445 	struct packet_data *pkt;
1446 	int i;
1447 
1448 	for (i = 0; i < PACKET_NUM_STATES; i++)
1449 		states[i] = 0;
1450 
1451 	spin_lock(&pd->cdrw.active_list_lock);
1452 	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1453 		states[pkt->state]++;
1454 	}
1455 	spin_unlock(&pd->cdrw.active_list_lock);
1456 }
1457 
1458 /*
1459  * kcdrwd is woken up when writes have been queued for one of our
1460  * registered devices
1461  */
1462 static int kcdrwd(void *foobar)
1463 {
1464 	struct pktcdvd_device *pd = foobar;
1465 	struct packet_data *pkt;
1466 	long min_sleep_time, residue;
1467 
1468 	set_user_nice(current, MIN_NICE);
1469 	set_freezable();
1470 
1471 	for (;;) {
1472 		DECLARE_WAITQUEUE(wait, current);
1473 
1474 		/*
1475 		 * Wait until there is something to do
1476 		 */
1477 		add_wait_queue(&pd->wqueue, &wait);
1478 		for (;;) {
1479 			set_current_state(TASK_INTERRUPTIBLE);
1480 
1481 			/* Check if we need to run pkt_handle_queue */
1482 			if (atomic_read(&pd->scan_queue) > 0)
1483 				goto work_to_do;
1484 
1485 			/* Check if we need to run the state machine for some packet */
1486 			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1487 				if (atomic_read(&pkt->run_sm) > 0)
1488 					goto work_to_do;
1489 			}
1490 
1491 			/* Check if we need to process the iosched queues */
1492 			if (atomic_read(&pd->iosched.attention) != 0)
1493 				goto work_to_do;
1494 
1495 			/* Otherwise, go to sleep */
1496 			if (PACKET_DEBUG > 1) {
1497 				int states[PACKET_NUM_STATES];
1498 				pkt_count_states(pd, states);
1499 				pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1500 					states[0], states[1], states[2],
1501 					states[3], states[4], states[5]);
1502 			}
1503 
1504 			min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1505 			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1506 				if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1507 					min_sleep_time = pkt->sleep_time;
1508 			}
1509 
1510 			pkt_dbg(2, pd, "sleeping\n");
1511 			residue = schedule_timeout(min_sleep_time);
1512 			pkt_dbg(2, pd, "wake up\n");
1513 
1514 			/* make swsusp happy with our thread */
1515 			try_to_freeze();
1516 
1517 			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1518 				if (!pkt->sleep_time)
1519 					continue;
1520 				pkt->sleep_time -= min_sleep_time - residue;
1521 				if (pkt->sleep_time <= 0) {
1522 					pkt->sleep_time = 0;
1523 					atomic_inc(&pkt->run_sm);
1524 				}
1525 			}
1526 
1527 			if (kthread_should_stop())
1528 				break;
1529 		}
1530 work_to_do:
1531 		set_current_state(TASK_RUNNING);
1532 		remove_wait_queue(&pd->wqueue, &wait);
1533 
1534 		if (kthread_should_stop())
1535 			break;
1536 
1537 		/*
1538 		 * if pkt_handle_queue returns true, we can queue
1539 		 * another request.
1540 		 */
1541 		while (pkt_handle_queue(pd))
1542 			;
1543 
1544 		/*
1545 		 * Handle packet state machine
1546 		 */
1547 		pkt_handle_packets(pd);
1548 
1549 		/*
1550 		 * Handle iosched queues
1551 		 */
1552 		pkt_iosched_process_queue(pd);
1553 	}
1554 
1555 	return 0;
1556 }
1557 
1558 static void pkt_print_settings(struct pktcdvd_device *pd)
1559 {
1560 	pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
1561 		 pd->settings.fp ? "Fixed" : "Variable",
1562 		 pd->settings.size >> 2,
1563 		 pd->settings.block_mode == 8 ? '1' : '2');
1564 }
1565 
1566 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1567 {
1568 	memset(cgc->cmd, 0, sizeof(cgc->cmd));
1569 
1570 	cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1571 	cgc->cmd[2] = page_code | (page_control << 6);
1572 	cgc->cmd[7] = cgc->buflen >> 8;
1573 	cgc->cmd[8] = cgc->buflen & 0xff;
1574 	cgc->data_direction = CGC_DATA_READ;
1575 	return pkt_generic_packet(pd, cgc);
1576 }
1577 
1578 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1579 {
1580 	memset(cgc->cmd, 0, sizeof(cgc->cmd));
1581 	memset(cgc->buffer, 0, 2);
1582 	cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1583 	cgc->cmd[1] = 0x10;		/* PF */
1584 	cgc->cmd[7] = cgc->buflen >> 8;
1585 	cgc->cmd[8] = cgc->buflen & 0xff;
1586 	cgc->data_direction = CGC_DATA_WRITE;
1587 	return pkt_generic_packet(pd, cgc);
1588 }
1589 
1590 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1591 {
1592 	struct packet_command cgc;
1593 	int ret;
1594 
1595 	/* set up command and get the disc info */
1596 	init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1597 	cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1598 	cgc.cmd[8] = cgc.buflen = 2;
1599 	cgc.quiet = 1;
1600 
1601 	if ((ret = pkt_generic_packet(pd, &cgc)))
1602 		return ret;
1603 
1604 	/* not all drives have the same disc_info length, so requeue
1605 	 * packet with the length the drive tells us it can supply
1606 	 */
1607 	cgc.buflen = be16_to_cpu(di->disc_information_length) +
1608 		     sizeof(di->disc_information_length);
1609 
1610 	if (cgc.buflen > sizeof(disc_information))
1611 		cgc.buflen = sizeof(disc_information);
1612 
1613 	cgc.cmd[8] = cgc.buflen;
1614 	return pkt_generic_packet(pd, &cgc);
1615 }
1616 
1617 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1618 {
1619 	struct packet_command cgc;
1620 	int ret;
1621 
1622 	init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1623 	cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1624 	cgc.cmd[1] = type & 3;
1625 	cgc.cmd[4] = (track & 0xff00) >> 8;
1626 	cgc.cmd[5] = track & 0xff;
1627 	cgc.cmd[8] = 8;
1628 	cgc.quiet = 1;
1629 
1630 	if ((ret = pkt_generic_packet(pd, &cgc)))
1631 		return ret;
1632 
1633 	cgc.buflen = be16_to_cpu(ti->track_information_length) +
1634 		     sizeof(ti->track_information_length);
1635 
1636 	if (cgc.buflen > sizeof(track_information))
1637 		cgc.buflen = sizeof(track_information);
1638 
1639 	cgc.cmd[8] = cgc.buflen;
1640 	return pkt_generic_packet(pd, &cgc);
1641 }
1642 
1643 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1644 						long *last_written)
1645 {
1646 	disc_information di;
1647 	track_information ti;
1648 	__u32 last_track;
1649 	int ret = -1;
1650 
1651 	if ((ret = pkt_get_disc_info(pd, &di)))
1652 		return ret;
1653 
1654 	last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1655 	if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1656 		return ret;
1657 
1658 	/* if this track is blank, try the previous. */
1659 	if (ti.blank) {
1660 		last_track--;
1661 		if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1662 			return ret;
1663 	}
1664 
1665 	/* if last recorded field is valid, return it. */
1666 	if (ti.lra_v) {
1667 		*last_written = be32_to_cpu(ti.last_rec_address);
1668 	} else {
1669 		/* make it up instead */
1670 		*last_written = be32_to_cpu(ti.track_start) +
1671 				be32_to_cpu(ti.track_size);
1672 		if (ti.free_blocks)
1673 			*last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1674 	}
1675 	return 0;
1676 }
1677 
1678 /*
1679  * write mode select package based on pd->settings
1680  */
1681 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1682 {
1683 	struct packet_command cgc;
1684 	struct request_sense sense;
1685 	write_param_page *wp;
1686 	char buffer[128];
1687 	int ret, size;
1688 
1689 	/* doesn't apply to DVD+RW or DVD-RAM */
1690 	if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1691 		return 0;
1692 
1693 	memset(buffer, 0, sizeof(buffer));
1694 	init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1695 	cgc.sense = &sense;
1696 	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1697 		pkt_dump_sense(pd, &cgc);
1698 		return ret;
1699 	}
1700 
1701 	size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1702 	pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1703 	if (size > sizeof(buffer))
1704 		size = sizeof(buffer);
1705 
1706 	/*
1707 	 * now get it all
1708 	 */
1709 	init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1710 	cgc.sense = &sense;
1711 	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1712 		pkt_dump_sense(pd, &cgc);
1713 		return ret;
1714 	}
1715 
1716 	/*
1717 	 * write page is offset header + block descriptor length
1718 	 */
1719 	wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1720 
1721 	wp->fp = pd->settings.fp;
1722 	wp->track_mode = pd->settings.track_mode;
1723 	wp->write_type = pd->settings.write_type;
1724 	wp->data_block_type = pd->settings.block_mode;
1725 
1726 	wp->multi_session = 0;
1727 
1728 #ifdef PACKET_USE_LS
1729 	wp->link_size = 7;
1730 	wp->ls_v = 1;
1731 #endif
1732 
1733 	if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1734 		wp->session_format = 0;
1735 		wp->subhdr2 = 0x20;
1736 	} else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1737 		wp->session_format = 0x20;
1738 		wp->subhdr2 = 8;
1739 #if 0
1740 		wp->mcn[0] = 0x80;
1741 		memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1742 #endif
1743 	} else {
1744 		/*
1745 		 * paranoia
1746 		 */
1747 		pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
1748 		return 1;
1749 	}
1750 	wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1751 
1752 	cgc.buflen = cgc.cmd[8] = size;
1753 	if ((ret = pkt_mode_select(pd, &cgc))) {
1754 		pkt_dump_sense(pd, &cgc);
1755 		return ret;
1756 	}
1757 
1758 	pkt_print_settings(pd);
1759 	return 0;
1760 }
1761 
1762 /*
1763  * 1 -- we can write to this track, 0 -- we can't
1764  */
1765 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1766 {
1767 	switch (pd->mmc3_profile) {
1768 		case 0x1a: /* DVD+RW */
1769 		case 0x12: /* DVD-RAM */
1770 			/* The track is always writable on DVD+RW/DVD-RAM */
1771 			return 1;
1772 		default:
1773 			break;
1774 	}
1775 
1776 	if (!ti->packet || !ti->fp)
1777 		return 0;
1778 
1779 	/*
1780 	 * "good" settings as per Mt Fuji.
1781 	 */
1782 	if (ti->rt == 0 && ti->blank == 0)
1783 		return 1;
1784 
1785 	if (ti->rt == 0 && ti->blank == 1)
1786 		return 1;
1787 
1788 	if (ti->rt == 1 && ti->blank == 0)
1789 		return 1;
1790 
1791 	pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1792 	return 0;
1793 }
1794 
1795 /*
1796  * 1 -- we can write to this disc, 0 -- we can't
1797  */
1798 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1799 {
1800 	switch (pd->mmc3_profile) {
1801 		case 0x0a: /* CD-RW */
1802 		case 0xffff: /* MMC3 not supported */
1803 			break;
1804 		case 0x1a: /* DVD+RW */
1805 		case 0x13: /* DVD-RW */
1806 		case 0x12: /* DVD-RAM */
1807 			return 1;
1808 		default:
1809 			pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
1810 				pd->mmc3_profile);
1811 			return 0;
1812 	}
1813 
1814 	/*
1815 	 * for disc type 0xff we should probably reserve a new track.
1816 	 * but i'm not sure, should we leave this to user apps? probably.
1817 	 */
1818 	if (di->disc_type == 0xff) {
1819 		pkt_notice(pd, "unknown disc - no track?\n");
1820 		return 0;
1821 	}
1822 
1823 	if (di->disc_type != 0x20 && di->disc_type != 0) {
1824 		pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
1825 		return 0;
1826 	}
1827 
1828 	if (di->erasable == 0) {
1829 		pkt_notice(pd, "disc not erasable\n");
1830 		return 0;
1831 	}
1832 
1833 	if (di->border_status == PACKET_SESSION_RESERVED) {
1834 		pkt_err(pd, "can't write to last track (reserved)\n");
1835 		return 0;
1836 	}
1837 
1838 	return 1;
1839 }
1840 
1841 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1842 {
1843 	struct packet_command cgc;
1844 	unsigned char buf[12];
1845 	disc_information di;
1846 	track_information ti;
1847 	int ret, track;
1848 
1849 	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1850 	cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1851 	cgc.cmd[8] = 8;
1852 	ret = pkt_generic_packet(pd, &cgc);
1853 	pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1854 
1855 	memset(&di, 0, sizeof(disc_information));
1856 	memset(&ti, 0, sizeof(track_information));
1857 
1858 	if ((ret = pkt_get_disc_info(pd, &di))) {
1859 		pkt_err(pd, "failed get_disc\n");
1860 		return ret;
1861 	}
1862 
1863 	if (!pkt_writable_disc(pd, &di))
1864 		return -EROFS;
1865 
1866 	pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1867 
1868 	track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1869 	if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
1870 		pkt_err(pd, "failed get_track\n");
1871 		return ret;
1872 	}
1873 
1874 	if (!pkt_writable_track(pd, &ti)) {
1875 		pkt_err(pd, "can't write to this track\n");
1876 		return -EROFS;
1877 	}
1878 
1879 	/*
1880 	 * we keep packet size in 512 byte units, makes it easier to
1881 	 * deal with request calculations.
1882 	 */
1883 	pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1884 	if (pd->settings.size == 0) {
1885 		pkt_notice(pd, "detected zero packet size!\n");
1886 		return -ENXIO;
1887 	}
1888 	if (pd->settings.size > PACKET_MAX_SECTORS) {
1889 		pkt_err(pd, "packet size is too big\n");
1890 		return -EROFS;
1891 	}
1892 	pd->settings.fp = ti.fp;
1893 	pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1894 
1895 	if (ti.nwa_v) {
1896 		pd->nwa = be32_to_cpu(ti.next_writable);
1897 		set_bit(PACKET_NWA_VALID, &pd->flags);
1898 	}
1899 
1900 	/*
1901 	 * in theory we could use lra on -RW media as well and just zero
1902 	 * blocks that haven't been written yet, but in practice that
1903 	 * is just a no-go. we'll use that for -R, naturally.
1904 	 */
1905 	if (ti.lra_v) {
1906 		pd->lra = be32_to_cpu(ti.last_rec_address);
1907 		set_bit(PACKET_LRA_VALID, &pd->flags);
1908 	} else {
1909 		pd->lra = 0xffffffff;
1910 		set_bit(PACKET_LRA_VALID, &pd->flags);
1911 	}
1912 
1913 	/*
1914 	 * fine for now
1915 	 */
1916 	pd->settings.link_loss = 7;
1917 	pd->settings.write_type = 0;	/* packet */
1918 	pd->settings.track_mode = ti.track_mode;
1919 
1920 	/*
1921 	 * mode1 or mode2 disc
1922 	 */
1923 	switch (ti.data_mode) {
1924 		case PACKET_MODE1:
1925 			pd->settings.block_mode = PACKET_BLOCK_MODE1;
1926 			break;
1927 		case PACKET_MODE2:
1928 			pd->settings.block_mode = PACKET_BLOCK_MODE2;
1929 			break;
1930 		default:
1931 			pkt_err(pd, "unknown data mode\n");
1932 			return -EROFS;
1933 	}
1934 	return 0;
1935 }
1936 
1937 /*
1938  * enable/disable write caching on drive
1939  */
1940 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
1941 						int set)
1942 {
1943 	struct packet_command cgc;
1944 	struct request_sense sense;
1945 	unsigned char buf[64];
1946 	int ret;
1947 
1948 	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1949 	cgc.sense = &sense;
1950 	cgc.buflen = pd->mode_offset + 12;
1951 
1952 	/*
1953 	 * caching mode page might not be there, so quiet this command
1954 	 */
1955 	cgc.quiet = 1;
1956 
1957 	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
1958 		return ret;
1959 
1960 	buf[pd->mode_offset + 10] |= (!!set << 2);
1961 
1962 	cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1963 	ret = pkt_mode_select(pd, &cgc);
1964 	if (ret) {
1965 		pkt_err(pd, "write caching control failed\n");
1966 		pkt_dump_sense(pd, &cgc);
1967 	} else if (!ret && set)
1968 		pkt_notice(pd, "enabled write caching\n");
1969 	return ret;
1970 }
1971 
1972 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1973 {
1974 	struct packet_command cgc;
1975 
1976 	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1977 	cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1978 	cgc.cmd[4] = lockflag ? 1 : 0;
1979 	return pkt_generic_packet(pd, &cgc);
1980 }
1981 
1982 /*
1983  * Returns drive maximum write speed
1984  */
1985 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1986 						unsigned *write_speed)
1987 {
1988 	struct packet_command cgc;
1989 	struct request_sense sense;
1990 	unsigned char buf[256+18];
1991 	unsigned char *cap_buf;
1992 	int ret, offset;
1993 
1994 	cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1995 	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1996 	cgc.sense = &sense;
1997 
1998 	ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1999 	if (ret) {
2000 		cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
2001 			     sizeof(struct mode_page_header);
2002 		ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
2003 		if (ret) {
2004 			pkt_dump_sense(pd, &cgc);
2005 			return ret;
2006 		}
2007 	}
2008 
2009 	offset = 20;			    /* Obsoleted field, used by older drives */
2010 	if (cap_buf[1] >= 28)
2011 		offset = 28;		    /* Current write speed selected */
2012 	if (cap_buf[1] >= 30) {
2013 		/* If the drive reports at least one "Logical Unit Write
2014 		 * Speed Performance Descriptor Block", use the information
2015 		 * in the first block. (contains the highest speed)
2016 		 */
2017 		int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
2018 		if (num_spdb > 0)
2019 			offset = 34;
2020 	}
2021 
2022 	*write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
2023 	return 0;
2024 }
2025 
2026 /* These tables from cdrecord - I don't have orange book */
2027 /* standard speed CD-RW (1-4x) */
2028 static char clv_to_speed[16] = {
2029 	/* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
2030 	   0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2031 };
2032 /* high speed CD-RW (-10x) */
2033 static char hs_clv_to_speed[16] = {
2034 	/* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
2035 	   0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2036 };
2037 /* ultra high speed CD-RW */
2038 static char us_clv_to_speed[16] = {
2039 	/* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
2040 	   0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
2041 };
2042 
2043 /*
2044  * reads the maximum media speed from ATIP
2045  */
2046 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
2047 						unsigned *speed)
2048 {
2049 	struct packet_command cgc;
2050 	struct request_sense sense;
2051 	unsigned char buf[64];
2052 	unsigned int size, st, sp;
2053 	int ret;
2054 
2055 	init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
2056 	cgc.sense = &sense;
2057 	cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2058 	cgc.cmd[1] = 2;
2059 	cgc.cmd[2] = 4; /* READ ATIP */
2060 	cgc.cmd[8] = 2;
2061 	ret = pkt_generic_packet(pd, &cgc);
2062 	if (ret) {
2063 		pkt_dump_sense(pd, &cgc);
2064 		return ret;
2065 	}
2066 	size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
2067 	if (size > sizeof(buf))
2068 		size = sizeof(buf);
2069 
2070 	init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
2071 	cgc.sense = &sense;
2072 	cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2073 	cgc.cmd[1] = 2;
2074 	cgc.cmd[2] = 4;
2075 	cgc.cmd[8] = size;
2076 	ret = pkt_generic_packet(pd, &cgc);
2077 	if (ret) {
2078 		pkt_dump_sense(pd, &cgc);
2079 		return ret;
2080 	}
2081 
2082 	if (!(buf[6] & 0x40)) {
2083 		pkt_notice(pd, "disc type is not CD-RW\n");
2084 		return 1;
2085 	}
2086 	if (!(buf[6] & 0x4)) {
2087 		pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
2088 		return 1;
2089 	}
2090 
2091 	st = (buf[6] >> 3) & 0x7; /* disc sub-type */
2092 
2093 	sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
2094 
2095 	/* Info from cdrecord */
2096 	switch (st) {
2097 		case 0: /* standard speed */
2098 			*speed = clv_to_speed[sp];
2099 			break;
2100 		case 1: /* high speed */
2101 			*speed = hs_clv_to_speed[sp];
2102 			break;
2103 		case 2: /* ultra high speed */
2104 			*speed = us_clv_to_speed[sp];
2105 			break;
2106 		default:
2107 			pkt_notice(pd, "unknown disc sub-type %d\n", st);
2108 			return 1;
2109 	}
2110 	if (*speed) {
2111 		pkt_info(pd, "maximum media speed: %d\n", *speed);
2112 		return 0;
2113 	} else {
2114 		pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
2115 		return 1;
2116 	}
2117 }
2118 
2119 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2120 {
2121 	struct packet_command cgc;
2122 	struct request_sense sense;
2123 	int ret;
2124 
2125 	pkt_dbg(2, pd, "Performing OPC\n");
2126 
2127 	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2128 	cgc.sense = &sense;
2129 	cgc.timeout = 60*HZ;
2130 	cgc.cmd[0] = GPCMD_SEND_OPC;
2131 	cgc.cmd[1] = 1;
2132 	if ((ret = pkt_generic_packet(pd, &cgc)))
2133 		pkt_dump_sense(pd, &cgc);
2134 	return ret;
2135 }
2136 
2137 static int pkt_open_write(struct pktcdvd_device *pd)
2138 {
2139 	int ret;
2140 	unsigned int write_speed, media_write_speed, read_speed;
2141 
2142 	if ((ret = pkt_probe_settings(pd))) {
2143 		pkt_dbg(2, pd, "failed probe\n");
2144 		return ret;
2145 	}
2146 
2147 	if ((ret = pkt_set_write_settings(pd))) {
2148 		pkt_dbg(1, pd, "failed saving write settings\n");
2149 		return -EIO;
2150 	}
2151 
2152 	pkt_write_caching(pd, USE_WCACHING);
2153 
2154 	if ((ret = pkt_get_max_speed(pd, &write_speed)))
2155 		write_speed = 16 * 177;
2156 	switch (pd->mmc3_profile) {
2157 		case 0x13: /* DVD-RW */
2158 		case 0x1a: /* DVD+RW */
2159 		case 0x12: /* DVD-RAM */
2160 			pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
2161 			break;
2162 		default:
2163 			if ((ret = pkt_media_speed(pd, &media_write_speed)))
2164 				media_write_speed = 16;
2165 			write_speed = min(write_speed, media_write_speed * 177);
2166 			pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
2167 			break;
2168 	}
2169 	read_speed = write_speed;
2170 
2171 	if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
2172 		pkt_dbg(1, pd, "couldn't set write speed\n");
2173 		return -EIO;
2174 	}
2175 	pd->write_speed = write_speed;
2176 	pd->read_speed = read_speed;
2177 
2178 	if ((ret = pkt_perform_opc(pd))) {
2179 		pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
2180 	}
2181 
2182 	return 0;
2183 }
2184 
2185 /*
2186  * called at open time.
2187  */
2188 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2189 {
2190 	int ret;
2191 	long lba;
2192 	struct request_queue *q;
2193 
2194 	/*
2195 	 * We need to re-open the cdrom device without O_NONBLOCK to be able
2196 	 * to read/write from/to it. It is already opened in O_NONBLOCK mode
2197 	 * so bdget() can't fail.
2198 	 */
2199 	bdget(pd->bdev->bd_dev);
2200 	if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
2201 		goto out;
2202 
2203 	if ((ret = pkt_get_last_written(pd, &lba))) {
2204 		pkt_err(pd, "pkt_get_last_written failed\n");
2205 		goto out_putdev;
2206 	}
2207 
2208 	set_capacity(pd->disk, lba << 2);
2209 	set_capacity(pd->bdev->bd_disk, lba << 2);
2210 	bd_set_size(pd->bdev, (loff_t)lba << 11);
2211 
2212 	q = bdev_get_queue(pd->bdev);
2213 	if (write) {
2214 		if ((ret = pkt_open_write(pd)))
2215 			goto out_putdev;
2216 		/*
2217 		 * Some CDRW drives can not handle writes larger than one packet,
2218 		 * even if the size is a multiple of the packet size.
2219 		 */
2220 		spin_lock_irq(q->queue_lock);
2221 		blk_queue_max_hw_sectors(q, pd->settings.size);
2222 		spin_unlock_irq(q->queue_lock);
2223 		set_bit(PACKET_WRITABLE, &pd->flags);
2224 	} else {
2225 		pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2226 		clear_bit(PACKET_WRITABLE, &pd->flags);
2227 	}
2228 
2229 	if ((ret = pkt_set_segment_merging(pd, q)))
2230 		goto out_putdev;
2231 
2232 	if (write) {
2233 		if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2234 			pkt_err(pd, "not enough memory for buffers\n");
2235 			ret = -ENOMEM;
2236 			goto out_putdev;
2237 		}
2238 		pkt_info(pd, "%lukB available on disc\n", lba << 1);
2239 	}
2240 
2241 	return 0;
2242 
2243 out_putdev:
2244 	blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2245 out:
2246 	return ret;
2247 }
2248 
2249 /*
2250  * called when the device is closed. makes sure that the device flushes
2251  * the internal cache before we close.
2252  */
2253 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2254 {
2255 	if (flush && pkt_flush_cache(pd))
2256 		pkt_dbg(1, pd, "not flushing cache\n");
2257 
2258 	pkt_lock_door(pd, 0);
2259 
2260 	pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2261 	blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2262 
2263 	pkt_shrink_pktlist(pd);
2264 }
2265 
2266 static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2267 {
2268 	if (dev_minor >= MAX_WRITERS)
2269 		return NULL;
2270 	return pkt_devs[dev_minor];
2271 }
2272 
2273 static int pkt_open(struct block_device *bdev, fmode_t mode)
2274 {
2275 	struct pktcdvd_device *pd = NULL;
2276 	int ret;
2277 
2278 	mutex_lock(&pktcdvd_mutex);
2279 	mutex_lock(&ctl_mutex);
2280 	pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
2281 	if (!pd) {
2282 		ret = -ENODEV;
2283 		goto out;
2284 	}
2285 	BUG_ON(pd->refcnt < 0);
2286 
2287 	pd->refcnt++;
2288 	if (pd->refcnt > 1) {
2289 		if ((mode & FMODE_WRITE) &&
2290 		    !test_bit(PACKET_WRITABLE, &pd->flags)) {
2291 			ret = -EBUSY;
2292 			goto out_dec;
2293 		}
2294 	} else {
2295 		ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2296 		if (ret)
2297 			goto out_dec;
2298 		/*
2299 		 * needed here as well, since ext2 (among others) may change
2300 		 * the blocksize at mount time
2301 		 */
2302 		set_blocksize(bdev, CD_FRAMESIZE);
2303 	}
2304 
2305 	mutex_unlock(&ctl_mutex);
2306 	mutex_unlock(&pktcdvd_mutex);
2307 	return 0;
2308 
2309 out_dec:
2310 	pd->refcnt--;
2311 out:
2312 	mutex_unlock(&ctl_mutex);
2313 	mutex_unlock(&pktcdvd_mutex);
2314 	return ret;
2315 }
2316 
2317 static void pkt_close(struct gendisk *disk, fmode_t mode)
2318 {
2319 	struct pktcdvd_device *pd = disk->private_data;
2320 
2321 	mutex_lock(&pktcdvd_mutex);
2322 	mutex_lock(&ctl_mutex);
2323 	pd->refcnt--;
2324 	BUG_ON(pd->refcnt < 0);
2325 	if (pd->refcnt == 0) {
2326 		int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2327 		pkt_release_dev(pd, flush);
2328 	}
2329 	mutex_unlock(&ctl_mutex);
2330 	mutex_unlock(&pktcdvd_mutex);
2331 }
2332 
2333 
2334 static void pkt_end_io_read_cloned(struct bio *bio)
2335 {
2336 	struct packet_stacked_data *psd = bio->bi_private;
2337 	struct pktcdvd_device *pd = psd->pd;
2338 
2339 	psd->bio->bi_error = bio->bi_error;
2340 	bio_put(bio);
2341 	bio_endio(psd->bio);
2342 	mempool_free(psd, psd_pool);
2343 	pkt_bio_finished(pd);
2344 }
2345 
2346 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
2347 {
2348 	struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2349 	struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2350 
2351 	psd->pd = pd;
2352 	psd->bio = bio;
2353 	cloned_bio->bi_bdev = pd->bdev;
2354 	cloned_bio->bi_private = psd;
2355 	cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2356 	pd->stats.secs_r += bio_sectors(bio);
2357 	pkt_queue_bio(pd, cloned_bio);
2358 }
2359 
2360 static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
2361 {
2362 	struct pktcdvd_device *pd = q->queuedata;
2363 	sector_t zone;
2364 	struct packet_data *pkt;
2365 	int was_empty, blocked_bio;
2366 	struct pkt_rb_node *node;
2367 
2368 	zone = get_zone(bio->bi_iter.bi_sector, pd);
2369 
2370 	/*
2371 	 * If we find a matching packet in state WAITING or READ_WAIT, we can
2372 	 * just append this bio to that packet.
2373 	 */
2374 	spin_lock(&pd->cdrw.active_list_lock);
2375 	blocked_bio = 0;
2376 	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2377 		if (pkt->sector == zone) {
2378 			spin_lock(&pkt->lock);
2379 			if ((pkt->state == PACKET_WAITING_STATE) ||
2380 			    (pkt->state == PACKET_READ_WAIT_STATE)) {
2381 				bio_list_add(&pkt->orig_bios, bio);
2382 				pkt->write_size +=
2383 					bio->bi_iter.bi_size / CD_FRAMESIZE;
2384 				if ((pkt->write_size >= pkt->frames) &&
2385 				    (pkt->state == PACKET_WAITING_STATE)) {
2386 					atomic_inc(&pkt->run_sm);
2387 					wake_up(&pd->wqueue);
2388 				}
2389 				spin_unlock(&pkt->lock);
2390 				spin_unlock(&pd->cdrw.active_list_lock);
2391 				return;
2392 			} else {
2393 				blocked_bio = 1;
2394 			}
2395 			spin_unlock(&pkt->lock);
2396 		}
2397 	}
2398 	spin_unlock(&pd->cdrw.active_list_lock);
2399 
2400  	/*
2401 	 * Test if there is enough room left in the bio work queue
2402 	 * (queue size >= congestion on mark).
2403 	 * If not, wait till the work queue size is below the congestion off mark.
2404 	 */
2405 	spin_lock(&pd->lock);
2406 	if (pd->write_congestion_on > 0
2407 	    && pd->bio_queue_size >= pd->write_congestion_on) {
2408 		set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
2409 		do {
2410 			spin_unlock(&pd->lock);
2411 			congestion_wait(BLK_RW_ASYNC, HZ);
2412 			spin_lock(&pd->lock);
2413 		} while(pd->bio_queue_size > pd->write_congestion_off);
2414 	}
2415 	spin_unlock(&pd->lock);
2416 
2417 	/*
2418 	 * No matching packet found. Store the bio in the work queue.
2419 	 */
2420 	node = mempool_alloc(pd->rb_pool, GFP_NOIO);
2421 	node->bio = bio;
2422 	spin_lock(&pd->lock);
2423 	BUG_ON(pd->bio_queue_size < 0);
2424 	was_empty = (pd->bio_queue_size == 0);
2425 	pkt_rbtree_insert(pd, node);
2426 	spin_unlock(&pd->lock);
2427 
2428 	/*
2429 	 * Wake up the worker thread.
2430 	 */
2431 	atomic_set(&pd->scan_queue, 1);
2432 	if (was_empty) {
2433 		/* This wake_up is required for correct operation */
2434 		wake_up(&pd->wqueue);
2435 	} else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2436 		/*
2437 		 * This wake up is not required for correct operation,
2438 		 * but improves performance in some cases.
2439 		 */
2440 		wake_up(&pd->wqueue);
2441 	}
2442 }
2443 
2444 static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
2445 {
2446 	struct pktcdvd_device *pd;
2447 	char b[BDEVNAME_SIZE];
2448 	struct bio *split;
2449 
2450 	blk_queue_bounce(q, &bio);
2451 
2452 	blk_queue_split(q, &bio, q->bio_split);
2453 
2454 	pd = q->queuedata;
2455 	if (!pd) {
2456 		pr_err("%s incorrect request queue\n",
2457 		       bdevname(bio->bi_bdev, b));
2458 		goto end_io;
2459 	}
2460 
2461 	pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2462 		(unsigned long long)bio->bi_iter.bi_sector,
2463 		(unsigned long long)bio_end_sector(bio));
2464 
2465 	/*
2466 	 * Clone READ bios so we can have our own bi_end_io callback.
2467 	 */
2468 	if (bio_data_dir(bio) == READ) {
2469 		pkt_make_request_read(pd, bio);
2470 		return BLK_QC_T_NONE;
2471 	}
2472 
2473 	if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2474 		pkt_notice(pd, "WRITE for ro device (%llu)\n",
2475 			   (unsigned long long)bio->bi_iter.bi_sector);
2476 		goto end_io;
2477 	}
2478 
2479 	if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2480 		pkt_err(pd, "wrong bio size\n");
2481 		goto end_io;
2482 	}
2483 
2484 	do {
2485 		sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2486 		sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2487 
2488 		if (last_zone != zone) {
2489 			BUG_ON(last_zone != zone + pd->settings.size);
2490 
2491 			split = bio_split(bio, last_zone -
2492 					  bio->bi_iter.bi_sector,
2493 					  GFP_NOIO, fs_bio_set);
2494 			bio_chain(split, bio);
2495 		} else {
2496 			split = bio;
2497 		}
2498 
2499 		pkt_make_request_write(q, split);
2500 	} while (split != bio);
2501 
2502 	return BLK_QC_T_NONE;
2503 end_io:
2504 	bio_io_error(bio);
2505 	return BLK_QC_T_NONE;
2506 }
2507 
2508 static void pkt_init_queue(struct pktcdvd_device *pd)
2509 {
2510 	struct request_queue *q = pd->disk->queue;
2511 
2512 	blk_queue_make_request(q, pkt_make_request);
2513 	blk_queue_logical_block_size(q, CD_FRAMESIZE);
2514 	blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
2515 	q->queuedata = pd;
2516 }
2517 
2518 static int pkt_seq_show(struct seq_file *m, void *p)
2519 {
2520 	struct pktcdvd_device *pd = m->private;
2521 	char *msg;
2522 	char bdev_buf[BDEVNAME_SIZE];
2523 	int states[PACKET_NUM_STATES];
2524 
2525 	seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
2526 		   bdevname(pd->bdev, bdev_buf));
2527 
2528 	seq_printf(m, "\nSettings:\n");
2529 	seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2530 
2531 	if (pd->settings.write_type == 0)
2532 		msg = "Packet";
2533 	else
2534 		msg = "Unknown";
2535 	seq_printf(m, "\twrite type:\t\t%s\n", msg);
2536 
2537 	seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2538 	seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2539 
2540 	seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2541 
2542 	if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2543 		msg = "Mode 1";
2544 	else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2545 		msg = "Mode 2";
2546 	else
2547 		msg = "Unknown";
2548 	seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2549 
2550 	seq_printf(m, "\nStatistics:\n");
2551 	seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2552 	seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2553 	seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2554 	seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2555 	seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2556 
2557 	seq_printf(m, "\nMisc:\n");
2558 	seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2559 	seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2560 	seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2561 	seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2562 	seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2563 	seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2564 
2565 	seq_printf(m, "\nQueue state:\n");
2566 	seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2567 	seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2568 	seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2569 
2570 	pkt_count_states(pd, states);
2571 	seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2572 		   states[0], states[1], states[2], states[3], states[4], states[5]);
2573 
2574 	seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
2575 			pd->write_congestion_off,
2576 			pd->write_congestion_on);
2577 	return 0;
2578 }
2579 
2580 static int pkt_seq_open(struct inode *inode, struct file *file)
2581 {
2582 	return single_open(file, pkt_seq_show, PDE_DATA(inode));
2583 }
2584 
2585 static const struct file_operations pkt_proc_fops = {
2586 	.open	= pkt_seq_open,
2587 	.read	= seq_read,
2588 	.llseek	= seq_lseek,
2589 	.release = single_release
2590 };
2591 
2592 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2593 {
2594 	int i;
2595 	int ret = 0;
2596 	char b[BDEVNAME_SIZE];
2597 	struct block_device *bdev;
2598 
2599 	if (pd->pkt_dev == dev) {
2600 		pkt_err(pd, "recursive setup not allowed\n");
2601 		return -EBUSY;
2602 	}
2603 	for (i = 0; i < MAX_WRITERS; i++) {
2604 		struct pktcdvd_device *pd2 = pkt_devs[i];
2605 		if (!pd2)
2606 			continue;
2607 		if (pd2->bdev->bd_dev == dev) {
2608 			pkt_err(pd, "%s already setup\n",
2609 				bdevname(pd2->bdev, b));
2610 			return -EBUSY;
2611 		}
2612 		if (pd2->pkt_dev == dev) {
2613 			pkt_err(pd, "can't chain pktcdvd devices\n");
2614 			return -EBUSY;
2615 		}
2616 	}
2617 
2618 	bdev = bdget(dev);
2619 	if (!bdev)
2620 		return -ENOMEM;
2621 	ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
2622 	if (ret)
2623 		return ret;
2624 
2625 	/* This is safe, since we have a reference from open(). */
2626 	__module_get(THIS_MODULE);
2627 
2628 	pd->bdev = bdev;
2629 	set_blocksize(bdev, CD_FRAMESIZE);
2630 
2631 	pkt_init_queue(pd);
2632 
2633 	atomic_set(&pd->cdrw.pending_bios, 0);
2634 	pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2635 	if (IS_ERR(pd->cdrw.thread)) {
2636 		pkt_err(pd, "can't start kernel thread\n");
2637 		ret = -ENOMEM;
2638 		goto out_mem;
2639 	}
2640 
2641 	proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
2642 	pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
2643 	return 0;
2644 
2645 out_mem:
2646 	blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2647 	/* This is safe: open() is still holding a reference. */
2648 	module_put(THIS_MODULE);
2649 	return ret;
2650 }
2651 
2652 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
2653 {
2654 	struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2655 	int ret;
2656 
2657 	pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
2658 		cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2659 
2660 	mutex_lock(&pktcdvd_mutex);
2661 	switch (cmd) {
2662 	case CDROMEJECT:
2663 		/*
2664 		 * The door gets locked when the device is opened, so we
2665 		 * have to unlock it or else the eject command fails.
2666 		 */
2667 		if (pd->refcnt == 1)
2668 			pkt_lock_door(pd, 0);
2669 		/* fallthru */
2670 	/*
2671 	 * forward selected CDROM ioctls to CD-ROM, for UDF
2672 	 */
2673 	case CDROMMULTISESSION:
2674 	case CDROMREADTOCENTRY:
2675 	case CDROM_LAST_WRITTEN:
2676 	case CDROM_SEND_PACKET:
2677 	case SCSI_IOCTL_SEND_COMMAND:
2678 		ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
2679 		break;
2680 
2681 	default:
2682 		pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
2683 		ret = -ENOTTY;
2684 	}
2685 	mutex_unlock(&pktcdvd_mutex);
2686 
2687 	return ret;
2688 }
2689 
2690 static unsigned int pkt_check_events(struct gendisk *disk,
2691 				     unsigned int clearing)
2692 {
2693 	struct pktcdvd_device *pd = disk->private_data;
2694 	struct gendisk *attached_disk;
2695 
2696 	if (!pd)
2697 		return 0;
2698 	if (!pd->bdev)
2699 		return 0;
2700 	attached_disk = pd->bdev->bd_disk;
2701 	if (!attached_disk || !attached_disk->fops->check_events)
2702 		return 0;
2703 	return attached_disk->fops->check_events(attached_disk, clearing);
2704 }
2705 
2706 static const struct block_device_operations pktcdvd_ops = {
2707 	.owner =		THIS_MODULE,
2708 	.open =			pkt_open,
2709 	.release =		pkt_close,
2710 	.ioctl =		pkt_ioctl,
2711 	.check_events =		pkt_check_events,
2712 };
2713 
2714 static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode)
2715 {
2716 	return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
2717 }
2718 
2719 /*
2720  * Set up mapping from pktcdvd device to CD-ROM device.
2721  */
2722 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2723 {
2724 	int idx;
2725 	int ret = -ENOMEM;
2726 	struct pktcdvd_device *pd;
2727 	struct gendisk *disk;
2728 
2729 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2730 
2731 	for (idx = 0; idx < MAX_WRITERS; idx++)
2732 		if (!pkt_devs[idx])
2733 			break;
2734 	if (idx == MAX_WRITERS) {
2735 		pr_err("max %d writers supported\n", MAX_WRITERS);
2736 		ret = -EBUSY;
2737 		goto out_mutex;
2738 	}
2739 
2740 	pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2741 	if (!pd)
2742 		goto out_mutex;
2743 
2744 	pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
2745 						  sizeof(struct pkt_rb_node));
2746 	if (!pd->rb_pool)
2747 		goto out_mem;
2748 
2749 	INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2750 	INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2751 	spin_lock_init(&pd->cdrw.active_list_lock);
2752 
2753 	spin_lock_init(&pd->lock);
2754 	spin_lock_init(&pd->iosched.lock);
2755 	bio_list_init(&pd->iosched.read_queue);
2756 	bio_list_init(&pd->iosched.write_queue);
2757 	sprintf(pd->name, DRIVER_NAME"%d", idx);
2758 	init_waitqueue_head(&pd->wqueue);
2759 	pd->bio_queue = RB_ROOT;
2760 
2761 	pd->write_congestion_on  = write_congestion_on;
2762 	pd->write_congestion_off = write_congestion_off;
2763 
2764 	disk = alloc_disk(1);
2765 	if (!disk)
2766 		goto out_mem;
2767 	pd->disk = disk;
2768 	disk->major = pktdev_major;
2769 	disk->first_minor = idx;
2770 	disk->fops = &pktcdvd_ops;
2771 	disk->flags = GENHD_FL_REMOVABLE;
2772 	strcpy(disk->disk_name, pd->name);
2773 	disk->devnode = pktcdvd_devnode;
2774 	disk->private_data = pd;
2775 	disk->queue = blk_alloc_queue(GFP_KERNEL);
2776 	if (!disk->queue)
2777 		goto out_mem2;
2778 
2779 	pd->pkt_dev = MKDEV(pktdev_major, idx);
2780 	ret = pkt_new_dev(pd, dev);
2781 	if (ret)
2782 		goto out_new_dev;
2783 
2784 	/* inherit events of the host device */
2785 	disk->events = pd->bdev->bd_disk->events;
2786 	disk->async_events = pd->bdev->bd_disk->async_events;
2787 
2788 	add_disk(disk);
2789 
2790 	pkt_sysfs_dev_new(pd);
2791 	pkt_debugfs_dev_new(pd);
2792 
2793 	pkt_devs[idx] = pd;
2794 	if (pkt_dev)
2795 		*pkt_dev = pd->pkt_dev;
2796 
2797 	mutex_unlock(&ctl_mutex);
2798 	return 0;
2799 
2800 out_new_dev:
2801 	blk_cleanup_queue(disk->queue);
2802 out_mem2:
2803 	put_disk(disk);
2804 out_mem:
2805 	mempool_destroy(pd->rb_pool);
2806 	kfree(pd);
2807 out_mutex:
2808 	mutex_unlock(&ctl_mutex);
2809 	pr_err("setup of pktcdvd device failed\n");
2810 	return ret;
2811 }
2812 
2813 /*
2814  * Tear down mapping from pktcdvd device to CD-ROM device.
2815  */
2816 static int pkt_remove_dev(dev_t pkt_dev)
2817 {
2818 	struct pktcdvd_device *pd;
2819 	int idx;
2820 	int ret = 0;
2821 
2822 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2823 
2824 	for (idx = 0; idx < MAX_WRITERS; idx++) {
2825 		pd = pkt_devs[idx];
2826 		if (pd && (pd->pkt_dev == pkt_dev))
2827 			break;
2828 	}
2829 	if (idx == MAX_WRITERS) {
2830 		pr_debug("dev not setup\n");
2831 		ret = -ENXIO;
2832 		goto out;
2833 	}
2834 
2835 	if (pd->refcnt > 0) {
2836 		ret = -EBUSY;
2837 		goto out;
2838 	}
2839 	if (!IS_ERR(pd->cdrw.thread))
2840 		kthread_stop(pd->cdrw.thread);
2841 
2842 	pkt_devs[idx] = NULL;
2843 
2844 	pkt_debugfs_dev_remove(pd);
2845 	pkt_sysfs_dev_remove(pd);
2846 
2847 	blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2848 
2849 	remove_proc_entry(pd->name, pkt_proc);
2850 	pkt_dbg(1, pd, "writer unmapped\n");
2851 
2852 	del_gendisk(pd->disk);
2853 	blk_cleanup_queue(pd->disk->queue);
2854 	put_disk(pd->disk);
2855 
2856 	mempool_destroy(pd->rb_pool);
2857 	kfree(pd);
2858 
2859 	/* This is safe: open() is still holding a reference. */
2860 	module_put(THIS_MODULE);
2861 
2862 out:
2863 	mutex_unlock(&ctl_mutex);
2864 	return ret;
2865 }
2866 
2867 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2868 {
2869 	struct pktcdvd_device *pd;
2870 
2871 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2872 
2873 	pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2874 	if (pd) {
2875 		ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2876 		ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2877 	} else {
2878 		ctrl_cmd->dev = 0;
2879 		ctrl_cmd->pkt_dev = 0;
2880 	}
2881 	ctrl_cmd->num_devices = MAX_WRITERS;
2882 
2883 	mutex_unlock(&ctl_mutex);
2884 }
2885 
2886 static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2887 {
2888 	void __user *argp = (void __user *)arg;
2889 	struct pkt_ctrl_command ctrl_cmd;
2890 	int ret = 0;
2891 	dev_t pkt_dev = 0;
2892 
2893 	if (cmd != PACKET_CTRL_CMD)
2894 		return -ENOTTY;
2895 
2896 	if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2897 		return -EFAULT;
2898 
2899 	switch (ctrl_cmd.command) {
2900 	case PKT_CTRL_CMD_SETUP:
2901 		if (!capable(CAP_SYS_ADMIN))
2902 			return -EPERM;
2903 		ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
2904 		ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
2905 		break;
2906 	case PKT_CTRL_CMD_TEARDOWN:
2907 		if (!capable(CAP_SYS_ADMIN))
2908 			return -EPERM;
2909 		ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
2910 		break;
2911 	case PKT_CTRL_CMD_STATUS:
2912 		pkt_get_status(&ctrl_cmd);
2913 		break;
2914 	default:
2915 		return -ENOTTY;
2916 	}
2917 
2918 	if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2919 		return -EFAULT;
2920 	return ret;
2921 }
2922 
2923 #ifdef CONFIG_COMPAT
2924 static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2925 {
2926 	return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2927 }
2928 #endif
2929 
2930 static const struct file_operations pkt_ctl_fops = {
2931 	.open		= nonseekable_open,
2932 	.unlocked_ioctl	= pkt_ctl_ioctl,
2933 #ifdef CONFIG_COMPAT
2934 	.compat_ioctl	= pkt_ctl_compat_ioctl,
2935 #endif
2936 	.owner		= THIS_MODULE,
2937 	.llseek		= no_llseek,
2938 };
2939 
2940 static struct miscdevice pkt_misc = {
2941 	.minor 		= MISC_DYNAMIC_MINOR,
2942 	.name  		= DRIVER_NAME,
2943 	.nodename	= "pktcdvd/control",
2944 	.fops  		= &pkt_ctl_fops
2945 };
2946 
2947 static int __init pkt_init(void)
2948 {
2949 	int ret;
2950 
2951 	mutex_init(&ctl_mutex);
2952 
2953 	psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
2954 					sizeof(struct packet_stacked_data));
2955 	if (!psd_pool)
2956 		return -ENOMEM;
2957 
2958 	ret = register_blkdev(pktdev_major, DRIVER_NAME);
2959 	if (ret < 0) {
2960 		pr_err("unable to register block device\n");
2961 		goto out2;
2962 	}
2963 	if (!pktdev_major)
2964 		pktdev_major = ret;
2965 
2966 	ret = pkt_sysfs_init();
2967 	if (ret)
2968 		goto out;
2969 
2970 	pkt_debugfs_init();
2971 
2972 	ret = misc_register(&pkt_misc);
2973 	if (ret) {
2974 		pr_err("unable to register misc device\n");
2975 		goto out_misc;
2976 	}
2977 
2978 	pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
2979 
2980 	return 0;
2981 
2982 out_misc:
2983 	pkt_debugfs_cleanup();
2984 	pkt_sysfs_cleanup();
2985 out:
2986 	unregister_blkdev(pktdev_major, DRIVER_NAME);
2987 out2:
2988 	mempool_destroy(psd_pool);
2989 	return ret;
2990 }
2991 
2992 static void __exit pkt_exit(void)
2993 {
2994 	remove_proc_entry("driver/"DRIVER_NAME, NULL);
2995 	misc_deregister(&pkt_misc);
2996 
2997 	pkt_debugfs_cleanup();
2998 	pkt_sysfs_cleanup();
2999 
3000 	unregister_blkdev(pktdev_major, DRIVER_NAME);
3001 	mempool_destroy(psd_pool);
3002 }
3003 
3004 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
3005 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
3006 MODULE_LICENSE("GPL");
3007 
3008 module_init(pkt_init);
3009 module_exit(pkt_exit);
3010