Lines Matching full:pd
98 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) in get_zone() argument
100 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); in get_zone()
122 struct pktcdvd_device *pd = dev_get_drvdata(dev); in packets_started_show() local
124 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started); in packets_started_show()
131 struct pktcdvd_device *pd = dev_get_drvdata(dev); in packets_finished_show() local
133 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended); in packets_finished_show()
140 struct pktcdvd_device *pd = dev_get_drvdata(dev); in kb_written_show() local
142 return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1); in kb_written_show()
149 struct pktcdvd_device *pd = dev_get_drvdata(dev); in kb_read_show() local
151 return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1); in kb_read_show()
158 struct pktcdvd_device *pd = dev_get_drvdata(dev); in kb_read_gather_show() local
160 return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1); in kb_read_gather_show()
167 struct pktcdvd_device *pd = dev_get_drvdata(dev); in reset_store() local
170 pd->stats.pkt_started = 0; in reset_store()
171 pd->stats.pkt_ended = 0; in reset_store()
172 pd->stats.secs_w = 0; in reset_store()
173 pd->stats.secs_rg = 0; in reset_store()
174 pd->stats.secs_r = 0; in reset_store()
198 struct pktcdvd_device *pd = dev_get_drvdata(dev); in size_show() local
201 spin_lock(&pd->lock); in size_show()
202 n = sysfs_emit(buf, "%d\n", pd->bio_queue_size); in size_show()
203 spin_unlock(&pd->lock); in size_show()
228 struct pktcdvd_device *pd = dev_get_drvdata(dev); in congestion_off_show() local
231 spin_lock(&pd->lock); in congestion_off_show()
232 n = sysfs_emit(buf, "%d\n", pd->write_congestion_off); in congestion_off_show()
233 spin_unlock(&pd->lock); in congestion_off_show()
241 struct pktcdvd_device *pd = dev_get_drvdata(dev); in congestion_off_store() local
248 spin_lock(&pd->lock); in congestion_off_store()
249 pd->write_congestion_off = val; in congestion_off_store()
250 init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on); in congestion_off_store()
251 spin_unlock(&pd->lock); in congestion_off_store()
259 struct pktcdvd_device *pd = dev_get_drvdata(dev); in congestion_on_show() local
262 spin_lock(&pd->lock); in congestion_on_show()
263 n = sysfs_emit(buf, "%d\n", pd->write_congestion_on); in congestion_on_show()
264 spin_unlock(&pd->lock); in congestion_on_show()
272 struct pktcdvd_device *pd = dev_get_drvdata(dev); in congestion_on_store() local
279 spin_lock(&pd->lock); in congestion_on_store()
280 pd->write_congestion_on = val; in congestion_on_store()
281 init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on); in congestion_on_store()
282 spin_unlock(&pd->lock); in congestion_on_store()
305 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) in pkt_sysfs_dev_new() argument
308 pd->dev = device_create_with_groups(&class_pktcdvd, NULL, in pkt_sysfs_dev_new()
309 MKDEV(0, 0), pd, pkt_groups, in pkt_sysfs_dev_new()
310 "%s", pd->disk->disk_name); in pkt_sysfs_dev_new()
311 if (IS_ERR(pd->dev)) in pkt_sysfs_dev_new()
312 pd->dev = NULL; in pkt_sysfs_dev_new()
316 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) in pkt_sysfs_dev_remove() argument
319 device_unregister(pd->dev); in pkt_sysfs_dev_remove()
337 struct pktcdvd_device *pd = pkt_devs[idx]; in device_map_show() local
338 if (!pd) in device_map_show()
341 pd->disk->disk_name, in device_map_show()
342 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), in device_map_show()
343 MAJOR(pd->bdev->bd_dev), in device_map_show()
344 MINOR(pd->bdev->bd_dev)); in device_map_show()
419 static void pkt_count_states(struct pktcdvd_device *pd, int *states) in pkt_count_states() argument
427 spin_lock(&pd->cdrw.active_list_lock); in pkt_count_states()
428 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in pkt_count_states()
431 spin_unlock(&pd->cdrw.active_list_lock); in pkt_count_states()
436 struct pktcdvd_device *pd = m->private; in pkt_seq_show() local
440 seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name, pd->bdev); in pkt_seq_show()
443 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); in pkt_seq_show()
445 if (pd->settings.write_type == 0) in pkt_seq_show()
451 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); in pkt_seq_show()
452 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); in pkt_seq_show()
454 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); in pkt_seq_show()
456 if (pd->settings.block_mode == PACKET_BLOCK_MODE1) in pkt_seq_show()
458 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) in pkt_seq_show()
465 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); in pkt_seq_show()
466 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); in pkt_seq_show()
467 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); in pkt_seq_show()
468 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); in pkt_seq_show()
469 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); in pkt_seq_show()
472 seq_printf(m, "\treference count:\t%d\n", pd->refcnt); in pkt_seq_show()
473 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); in pkt_seq_show()
474 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); in pkt_seq_show()
475 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); in pkt_seq_show()
476 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); in pkt_seq_show()
477 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); in pkt_seq_show()
480 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); in pkt_seq_show()
481 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); in pkt_seq_show()
482 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", pd->current_sector); in pkt_seq_show()
484 pkt_count_states(pd, states); in pkt_seq_show()
489 pd->write_congestion_off, in pkt_seq_show()
490 pd->write_congestion_on); in pkt_seq_show()
495 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) in pkt_debugfs_dev_new() argument
499 pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root); in pkt_debugfs_dev_new()
500 if (!pd->dfs_d_root) in pkt_debugfs_dev_new()
503 pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root, in pkt_debugfs_dev_new()
504 pd, &pkt_seq_fops); in pkt_debugfs_dev_new()
507 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) in pkt_debugfs_dev_remove() argument
511 debugfs_remove(pd->dfs_f_info); in pkt_debugfs_dev_remove()
512 debugfs_remove(pd->dfs_d_root); in pkt_debugfs_dev_remove()
513 pd->dfs_f_info = NULL; in pkt_debugfs_dev_remove()
514 pd->dfs_d_root = NULL; in pkt_debugfs_dev_remove()
531 static void pkt_bio_finished(struct pktcdvd_device *pd) in pkt_bio_finished() argument
533 struct device *ddev = disk_to_dev(pd->disk); in pkt_bio_finished()
535 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); in pkt_bio_finished()
536 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { in pkt_bio_finished()
538 atomic_set(&pd->iosched.attention, 1); in pkt_bio_finished()
539 wake_up(&pd->wqueue); in pkt_bio_finished()
606 static void pkt_shrink_pktlist(struct pktcdvd_device *pd) in pkt_shrink_pktlist() argument
610 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list)); in pkt_shrink_pktlist()
612 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { in pkt_shrink_pktlist()
615 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); in pkt_shrink_pktlist()
618 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) in pkt_grow_pktlist() argument
622 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list)); in pkt_grow_pktlist()
625 pkt = pkt_alloc_packet_data(pd->settings.size >> 2); in pkt_grow_pktlist()
627 pkt_shrink_pktlist(pd); in pkt_grow_pktlist()
631 pkt->pd = pd; in pkt_grow_pktlist()
632 list_add(&pkt->list, &pd->cdrw.pkt_free_list); in pkt_grow_pktlist()
646 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node) in pkt_rbtree_erase() argument
648 rb_erase(&node->rb_node, &pd->bio_queue); in pkt_rbtree_erase()
649 mempool_free(node, &pd->rb_pool); in pkt_rbtree_erase()
650 pd->bio_queue_size--; in pkt_rbtree_erase()
651 BUG_ON(pd->bio_queue_size < 0); in pkt_rbtree_erase()
655 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
657 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s) in pkt_rbtree_find() argument
659 struct rb_node *n = pd->bio_queue.rb_node; in pkt_rbtree_find()
664 BUG_ON(pd->bio_queue_size > 0); in pkt_rbtree_find()
689 * Insert a node into the pd->bio_queue rb tree.
691 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node) in pkt_rbtree_insert() argument
693 struct rb_node **p = &pd->bio_queue.rb_node; in pkt_rbtree_insert()
707 rb_insert_color(&node->rb_node, &pd->bio_queue); in pkt_rbtree_insert()
708 pd->bio_queue_size++; in pkt_rbtree_insert()
715 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) in pkt_generic_packet() argument
717 struct request_queue *q = bdev_get_queue(pd->bdev); in pkt_generic_packet()
765 static void pkt_dump_sense(struct pktcdvd_device *pd, in pkt_dump_sense() argument
768 struct device *ddev = disk_to_dev(pd->disk); in pkt_dump_sense()
783 static int pkt_flush_cache(struct pktcdvd_device *pd) in pkt_flush_cache() argument
798 return pkt_generic_packet(pd, &cgc); in pkt_flush_cache()
804 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, in pkt_set_speed() argument
817 ret = pkt_generic_packet(pd, &cgc); in pkt_set_speed()
819 pkt_dump_sense(pd, &cgc); in pkt_set_speed()
828 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) in pkt_queue_bio() argument
830 spin_lock(&pd->iosched.lock); in pkt_queue_bio()
832 bio_list_add(&pd->iosched.read_queue, bio); in pkt_queue_bio()
834 bio_list_add(&pd->iosched.write_queue, bio); in pkt_queue_bio()
835 spin_unlock(&pd->iosched.lock); in pkt_queue_bio()
837 atomic_set(&pd->iosched.attention, 1); in pkt_queue_bio()
838 wake_up(&pd->wqueue); in pkt_queue_bio()
857 static void pkt_iosched_process_queue(struct pktcdvd_device *pd) in pkt_iosched_process_queue() argument
859 struct device *ddev = disk_to_dev(pd->disk); in pkt_iosched_process_queue()
861 if (atomic_read(&pd->iosched.attention) == 0) in pkt_iosched_process_queue()
863 atomic_set(&pd->iosched.attention, 0); in pkt_iosched_process_queue()
869 spin_lock(&pd->iosched.lock); in pkt_iosched_process_queue()
870 reads_queued = !bio_list_empty(&pd->iosched.read_queue); in pkt_iosched_process_queue()
871 writes_queued = !bio_list_empty(&pd->iosched.write_queue); in pkt_iosched_process_queue()
872 spin_unlock(&pd->iosched.lock); in pkt_iosched_process_queue()
877 if (pd->iosched.writing) { in pkt_iosched_process_queue()
879 spin_lock(&pd->iosched.lock); in pkt_iosched_process_queue()
880 bio = bio_list_peek(&pd->iosched.write_queue); in pkt_iosched_process_queue()
881 spin_unlock(&pd->iosched.lock); in pkt_iosched_process_queue()
883 pd->iosched.last_write)) in pkt_iosched_process_queue()
886 if (atomic_read(&pd->cdrw.pending_bios) > 0) { in pkt_iosched_process_queue()
890 pkt_flush_cache(pd); in pkt_iosched_process_queue()
891 pd->iosched.writing = 0; in pkt_iosched_process_queue()
895 if (atomic_read(&pd->cdrw.pending_bios) > 0) { in pkt_iosched_process_queue()
899 pd->iosched.writing = 1; in pkt_iosched_process_queue()
903 spin_lock(&pd->iosched.lock); in pkt_iosched_process_queue()
904 if (pd->iosched.writing) in pkt_iosched_process_queue()
905 bio = bio_list_pop(&pd->iosched.write_queue); in pkt_iosched_process_queue()
907 bio = bio_list_pop(&pd->iosched.read_queue); in pkt_iosched_process_queue()
908 spin_unlock(&pd->iosched.lock); in pkt_iosched_process_queue()
914 pd->iosched.successive_reads += in pkt_iosched_process_queue()
917 pd->iosched.successive_reads = 0; in pkt_iosched_process_queue()
918 pd->iosched.last_write = bio_end_sector(bio); in pkt_iosched_process_queue()
920 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { in pkt_iosched_process_queue()
921 if (pd->read_speed == pd->write_speed) { in pkt_iosched_process_queue()
922 pd->read_speed = MAX_SPEED; in pkt_iosched_process_queue()
923 pkt_set_speed(pd, pd->write_speed, pd->read_speed); in pkt_iosched_process_queue()
926 if (pd->read_speed != pd->write_speed) { in pkt_iosched_process_queue()
927 pd->read_speed = pd->write_speed; in pkt_iosched_process_queue()
928 pkt_set_speed(pd, pd->write_speed, pd->read_speed); in pkt_iosched_process_queue()
932 atomic_inc(&pd->cdrw.pending_bios); in pkt_iosched_process_queue()
941 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) in pkt_set_segment_merging() argument
943 struct device *ddev = disk_to_dev(pd->disk); in pkt_set_segment_merging()
945 if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) { in pkt_set_segment_merging()
949 clear_bit(PACKET_MERGE_SEGS, &pd->flags); in pkt_set_segment_merging()
953 if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) { in pkt_set_segment_merging()
958 set_bit(PACKET_MERGE_SEGS, &pd->flags); in pkt_set_segment_merging()
969 struct pktcdvd_device *pd = pkt->pd; in pkt_end_io_read() local
970 BUG_ON(!pd); in pkt_end_io_read()
972 dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n", in pkt_end_io_read()
980 wake_up(&pd->wqueue); in pkt_end_io_read()
982 pkt_bio_finished(pd); in pkt_end_io_read()
988 struct pktcdvd_device *pd = pkt->pd; in pkt_end_io_packet_write() local
989 BUG_ON(!pd); in pkt_end_io_packet_write()
991 dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status); in pkt_end_io_packet_write()
993 pd->stats.pkt_ended++; in pkt_end_io_packet_write()
996 pkt_bio_finished(pd); in pkt_end_io_packet_write()
999 wake_up(&pd->wqueue); in pkt_end_io_packet_write()
1005 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_gather_data() argument
1007 struct device *ddev = disk_to_dev(pd->disk); in pkt_gather_data()
1027 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1050 bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ); in pkt_gather_data()
1063 pkt_queue_bio(pd, bio); in pkt_gather_data()
1069 pd->stats.pkt_started++; in pkt_gather_data()
1070 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1077 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone) in pkt_get_packet_data() argument
1081 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) { in pkt_get_packet_data()
1082 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) { in pkt_get_packet_data()
1093 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_put_packet_data() argument
1096 list_add(&pkt->list, &pd->cdrw.pkt_free_list); in pkt_put_packet_data()
1098 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list); in pkt_put_packet_data()
1120 static int pkt_handle_queue(struct pktcdvd_device *pd) in pkt_handle_queue() argument
1122 struct device *ddev = disk_to_dev(pd->disk); in pkt_handle_queue()
1129 atomic_set(&pd->scan_queue, 0); in pkt_handle_queue()
1131 if (list_empty(&pd->cdrw.pkt_free_list)) { in pkt_handle_queue()
1139 spin_lock(&pd->lock); in pkt_handle_queue()
1140 first_node = pkt_rbtree_find(pd, pd->current_sector); in pkt_handle_queue()
1142 n = rb_first(&pd->bio_queue); in pkt_handle_queue()
1149 zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_handle_queue()
1150 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { in pkt_handle_queue()
1160 n = rb_first(&pd->bio_queue); in pkt_handle_queue()
1167 spin_unlock(&pd->lock); in pkt_handle_queue()
1173 pkt = pkt_get_packet_data(pd, zone); in pkt_handle_queue()
1175 pd->current_sector = zone + pd->settings.size; in pkt_handle_queue()
1177 BUG_ON(pkt->frames != pd->settings.size >> 2); in pkt_handle_queue()
1184 spin_lock(&pd->lock); in pkt_handle_queue()
1186 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { in pkt_handle_queue()
1187 sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd); in pkt_handle_queue()
1193 pkt_rbtree_erase(pd, node); in pkt_handle_queue()
1202 if (pd->congested && in pkt_handle_queue()
1203 pd->bio_queue_size <= pd->write_congestion_off) { in pkt_handle_queue()
1204 pd->congested = false; in pkt_handle_queue()
1205 wake_up_var(&pd->congested); in pkt_handle_queue()
1207 spin_unlock(&pd->lock); in pkt_handle_queue()
1213 spin_lock(&pd->cdrw.active_list_lock); in pkt_handle_queue()
1214 list_add(&pkt->list, &pd->cdrw.pkt_active_list); in pkt_handle_queue()
1215 spin_unlock(&pd->cdrw.active_list_lock); in pkt_handle_queue()
1260 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_start_write() argument
1262 struct device *ddev = disk_to_dev(pd->disk); in pkt_start_write()
1265 bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames, in pkt_start_write()
1292 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) in pkt_start_write()
1299 pkt_queue_bio(pd, pkt->w_bio); in pkt_start_write()
1316 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_run_state_machine() argument
1318 struct device *ddev = disk_to_dev(pd->disk); in pkt_run_state_machine()
1329 pkt_gather_data(pd, pkt); in pkt_run_state_machine()
1340 pkt_start_write(pd, pkt); in pkt_run_state_machine()
1371 static void pkt_handle_packets(struct pktcdvd_device *pd) in pkt_handle_packets() argument
1373 struct device *ddev = disk_to_dev(pd->disk); in pkt_handle_packets()
1379 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in pkt_handle_packets()
1382 pkt_run_state_machine(pd, pkt); in pkt_handle_packets()
1389 spin_lock(&pd->cdrw.active_list_lock); in pkt_handle_packets()
1390 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) { in pkt_handle_packets()
1393 pkt_put_packet_data(pd, pkt); in pkt_handle_packets()
1395 atomic_set(&pd->scan_queue, 1); in pkt_handle_packets()
1398 spin_unlock(&pd->cdrw.active_list_lock); in pkt_handle_packets()
1407 struct pktcdvd_device *pd = foobar; in kcdrwd() local
1408 struct device *ddev = disk_to_dev(pd->disk); in kcdrwd()
1422 add_wait_queue(&pd->wqueue, &wait); in kcdrwd()
1427 if (atomic_read(&pd->scan_queue) > 0) in kcdrwd()
1431 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in kcdrwd()
1437 if (atomic_read(&pd->iosched.attention) != 0) in kcdrwd()
1441 pkt_count_states(pd, states); in kcdrwd()
1446 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in kcdrwd()
1458 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in kcdrwd()
1473 remove_wait_queue(&pd->wqueue, &wait); in kcdrwd()
1482 while (pkt_handle_queue(pd)) in kcdrwd()
1488 pkt_handle_packets(pd); in kcdrwd()
1493 pkt_iosched_process_queue(pd); in kcdrwd()
1499 static void pkt_print_settings(struct pktcdvd_device *pd) in pkt_print_settings() argument
1501 dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n", in pkt_print_settings()
1502 pd->settings.fp ? "Fixed" : "Variable", in pkt_print_settings()
1503 pd->settings.size >> 2, in pkt_print_settings()
1504 pd->settings.block_mode == 8 ? '1' : '2'); in pkt_print_settings()
1507 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int… in pkt_mode_sense() argument
1515 return pkt_generic_packet(pd, cgc); in pkt_mode_sense()
1518 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc) in pkt_mode_select() argument
1526 return pkt_generic_packet(pd, cgc); in pkt_mode_select()
1529 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di) in pkt_get_disc_info() argument
1540 ret = pkt_generic_packet(pd, &cgc); in pkt_get_disc_info()
1554 return pkt_generic_packet(pd, &cgc); in pkt_get_disc_info()
1557 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information … in pkt_get_track_info() argument
1569 ret = pkt_generic_packet(pd, &cgc); in pkt_get_track_info()
1580 return pkt_generic_packet(pd, &cgc); in pkt_get_track_info()
1583 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, in pkt_get_last_written() argument
1591 ret = pkt_get_disc_info(pd, &di); in pkt_get_last_written()
1596 ret = pkt_get_track_info(pd, last_track, 1, &ti); in pkt_get_last_written()
1603 ret = pkt_get_track_info(pd, last_track, 1, &ti); in pkt_get_last_written()
1622 * write mode select package based on pd->settings
1624 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) in pkt_set_write_settings() argument
1626 struct device *ddev = disk_to_dev(pd->disk); in pkt_set_write_settings()
1634 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12)) in pkt_set_write_settings()
1640 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); in pkt_set_write_settings()
1642 pkt_dump_sense(pd, &cgc); in pkt_set_write_settings()
1647 pd->mode_offset = get_unaligned_be16(&buffer[6]); in pkt_set_write_settings()
1656 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); in pkt_set_write_settings()
1658 pkt_dump_sense(pd, &cgc); in pkt_set_write_settings()
1665 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset]; in pkt_set_write_settings()
1667 wp->fp = pd->settings.fp; in pkt_set_write_settings()
1668 wp->track_mode = pd->settings.track_mode; in pkt_set_write_settings()
1669 wp->write_type = pd->settings.write_type; in pkt_set_write_settings()
1670 wp->data_block_type = pd->settings.block_mode; in pkt_set_write_settings()
1696 wp->packet_size = cpu_to_be32(pd->settings.size >> 2); in pkt_set_write_settings()
1699 ret = pkt_mode_select(pd, &cgc); in pkt_set_write_settings()
1701 pkt_dump_sense(pd, &cgc); in pkt_set_write_settings()
1705 pkt_print_settings(pd); in pkt_set_write_settings()
1712 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) in pkt_writable_track() argument
1714 struct device *ddev = disk_to_dev(pd->disk); in pkt_writable_track()
1716 switch (pd->mmc3_profile) { in pkt_writable_track()
1747 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) in pkt_writable_disc() argument
1749 struct device *ddev = disk_to_dev(pd->disk); in pkt_writable_disc()
1751 switch (pd->mmc3_profile) { in pkt_writable_disc()
1760 dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile); in pkt_writable_disc()
1791 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) in pkt_probe_settings() argument
1793 struct device *ddev = disk_to_dev(pd->disk); in pkt_probe_settings()
1803 ret = pkt_generic_packet(pd, &cgc); in pkt_probe_settings()
1804 pd->mmc3_profile = ret ? 0xffff : get_unaligned_be16(&buf[6]); in pkt_probe_settings()
1809 ret = pkt_get_disc_info(pd, &di); in pkt_probe_settings()
1815 if (!pkt_writable_disc(pd, &di)) in pkt_probe_settings()
1818 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; in pkt_probe_settings()
1821 ret = pkt_get_track_info(pd, track, 1, &ti); in pkt_probe_settings()
1827 if (!pkt_writable_track(pd, &ti)) { in pkt_probe_settings()
1836 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; in pkt_probe_settings()
1837 if (pd->settings.size == 0) { in pkt_probe_settings()
1841 if (pd->settings.size > PACKET_MAX_SECTORS) { in pkt_probe_settings()
1845 pd->settings.fp = ti.fp; in pkt_probe_settings()
1846 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); in pkt_probe_settings()
1849 pd->nwa = be32_to_cpu(ti.next_writable); in pkt_probe_settings()
1850 set_bit(PACKET_NWA_VALID, &pd->flags); in pkt_probe_settings()
1859 pd->lra = be32_to_cpu(ti.last_rec_address); in pkt_probe_settings()
1860 set_bit(PACKET_LRA_VALID, &pd->flags); in pkt_probe_settings()
1862 pd->lra = 0xffffffff; in pkt_probe_settings()
1863 set_bit(PACKET_LRA_VALID, &pd->flags); in pkt_probe_settings()
1869 pd->settings.link_loss = 7; in pkt_probe_settings()
1870 pd->settings.write_type = 0; /* packet */ in pkt_probe_settings()
1871 pd->settings.track_mode = ti.track_mode; in pkt_probe_settings()
1878 pd->settings.block_mode = PACKET_BLOCK_MODE1; in pkt_probe_settings()
1881 pd->settings.block_mode = PACKET_BLOCK_MODE2; in pkt_probe_settings()
1893 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd) in pkt_write_caching() argument
1895 struct device *ddev = disk_to_dev(pd->disk); in pkt_write_caching()
1904 cgc.buflen = pd->mode_offset + 12; in pkt_write_caching()
1911 ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0); in pkt_write_caching()
1920 buf[pd->mode_offset + 10] |= (set << 2); in pkt_write_caching()
1923 ret = pkt_mode_select(pd, &cgc); in pkt_write_caching()
1926 pkt_dump_sense(pd, &cgc); in pkt_write_caching()
1932 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) in pkt_lock_door() argument
1939 return pkt_generic_packet(pd, &cgc); in pkt_lock_door()
1945 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, in pkt_get_max_speed() argument
1954 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; in pkt_get_max_speed()
1958 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); in pkt_get_max_speed()
1960 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 + in pkt_get_max_speed()
1962 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); in pkt_get_max_speed()
1964 pkt_dump_sense(pd, &cgc); in pkt_get_max_speed()
2006 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, in pkt_media_speed() argument
2009 struct device *ddev = disk_to_dev(pd->disk); in pkt_media_speed()
2022 ret = pkt_generic_packet(pd, &cgc); in pkt_media_speed()
2024 pkt_dump_sense(pd, &cgc); in pkt_media_speed()
2037 ret = pkt_generic_packet(pd, &cgc); in pkt_media_speed()
2039 pkt_dump_sense(pd, &cgc); in pkt_media_speed()
2080 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) in pkt_perform_opc() argument
2082 struct device *ddev = disk_to_dev(pd->disk); in pkt_perform_opc()
2094 ret = pkt_generic_packet(pd, &cgc); in pkt_perform_opc()
2096 pkt_dump_sense(pd, &cgc); in pkt_perform_opc()
2100 static int pkt_open_write(struct pktcdvd_device *pd) in pkt_open_write() argument
2102 struct device *ddev = disk_to_dev(pd->disk); in pkt_open_write()
2106 ret = pkt_probe_settings(pd); in pkt_open_write()
2112 ret = pkt_set_write_settings(pd); in pkt_open_write()
2118 pkt_write_caching(pd); in pkt_open_write()
2120 ret = pkt_get_max_speed(pd, &write_speed); in pkt_open_write()
2123 switch (pd->mmc3_profile) { in pkt_open_write()
2130 ret = pkt_media_speed(pd, &media_write_speed); in pkt_open_write()
2139 ret = pkt_set_speed(pd, write_speed, read_speed); in pkt_open_write()
2144 pd->write_speed = write_speed; in pkt_open_write()
2145 pd->read_speed = read_speed; in pkt_open_write()
2147 ret = pkt_perform_opc(pd); in pkt_open_write()
2157 static int pkt_open_dev(struct pktcdvd_device *pd, bool write) in pkt_open_dev() argument
2159 struct device *ddev = disk_to_dev(pd->disk); in pkt_open_dev()
2170 bdev = blkdev_get_by_dev(pd->bdev->bd_dev, BLK_OPEN_READ, pd, NULL); in pkt_open_dev()
2176 ret = pkt_get_last_written(pd, &lba); in pkt_open_dev()
2182 set_capacity(pd->disk, lba << 2); in pkt_open_dev()
2183 set_capacity_and_notify(pd->bdev->bd_disk, lba << 2); in pkt_open_dev()
2185 q = bdev_get_queue(pd->bdev); in pkt_open_dev()
2187 ret = pkt_open_write(pd); in pkt_open_dev()
2194 blk_queue_max_hw_sectors(q, pd->settings.size); in pkt_open_dev()
2195 set_bit(PACKET_WRITABLE, &pd->flags); in pkt_open_dev()
2197 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); in pkt_open_dev()
2198 clear_bit(PACKET_WRITABLE, &pd->flags); in pkt_open_dev()
2201 ret = pkt_set_segment_merging(pd, q); in pkt_open_dev()
2206 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { in pkt_open_dev()
2217 blkdev_put(bdev, pd); in pkt_open_dev()
2226 static void pkt_release_dev(struct pktcdvd_device *pd, int flush) in pkt_release_dev() argument
2228 struct device *ddev = disk_to_dev(pd->disk); in pkt_release_dev()
2230 if (flush && pkt_flush_cache(pd)) in pkt_release_dev()
2233 pkt_lock_door(pd, 0); in pkt_release_dev()
2235 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); in pkt_release_dev()
2236 blkdev_put(pd->bdev, pd); in pkt_release_dev()
2238 pkt_shrink_pktlist(pd); in pkt_release_dev()
2252 struct pktcdvd_device *pd = NULL; in pkt_open() local
2257 pd = pkt_find_dev_from_minor(disk->first_minor); in pkt_open()
2258 if (!pd) { in pkt_open()
2262 BUG_ON(pd->refcnt < 0); in pkt_open()
2264 pd->refcnt++; in pkt_open()
2265 if (pd->refcnt > 1) { in pkt_open()
2267 !test_bit(PACKET_WRITABLE, &pd->flags)) { in pkt_open()
2272 ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE); in pkt_open()
2286 pd->refcnt--; in pkt_open()
2295 struct pktcdvd_device *pd = disk->private_data; in pkt_release() local
2299 pd->refcnt--; in pkt_release()
2300 BUG_ON(pd->refcnt < 0); in pkt_release()
2301 if (pd->refcnt == 0) { in pkt_release()
2302 int flush = test_bit(PACKET_WRITABLE, &pd->flags); in pkt_release()
2303 pkt_release_dev(pd, flush); in pkt_release()
2313 struct pktcdvd_device *pd = psd->pd; in pkt_end_io_read_cloned() local
2319 pkt_bio_finished(pd); in pkt_end_io_read_cloned()
2322 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) in pkt_make_request_read() argument
2325 bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set); in pkt_make_request_read()
2328 psd->pd = pd; in pkt_make_request_read()
2332 pd->stats.secs_r += bio_sectors(bio); in pkt_make_request_read()
2333 pkt_queue_bio(pd, cloned_bio); in pkt_make_request_read()
2338 struct pktcdvd_device *pd = q->queuedata; in pkt_make_request_write() local
2344 zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_make_request_write()
2350 spin_lock(&pd->cdrw.active_list_lock); in pkt_make_request_write()
2352 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in pkt_make_request_write()
2363 wake_up(&pd->wqueue); in pkt_make_request_write()
2366 spin_unlock(&pd->cdrw.active_list_lock); in pkt_make_request_write()
2374 spin_unlock(&pd->cdrw.active_list_lock); in pkt_make_request_write()
2381 spin_lock(&pd->lock); in pkt_make_request_write()
2382 if (pd->write_congestion_on > 0 in pkt_make_request_write()
2383 && pd->bio_queue_size >= pd->write_congestion_on) { in pkt_make_request_write()
2386 init_wait_var_entry(&wqe, &pd->congested, 0); in pkt_make_request_write()
2388 prepare_to_wait_event(__var_waitqueue(&pd->congested), in pkt_make_request_write()
2391 if (pd->bio_queue_size <= pd->write_congestion_off) in pkt_make_request_write()
2393 pd->congested = true; in pkt_make_request_write()
2394 spin_unlock(&pd->lock); in pkt_make_request_write()
2396 spin_lock(&pd->lock); in pkt_make_request_write()
2399 spin_unlock(&pd->lock); in pkt_make_request_write()
2404 node = mempool_alloc(&pd->rb_pool, GFP_NOIO); in pkt_make_request_write()
2406 spin_lock(&pd->lock); in pkt_make_request_write()
2407 BUG_ON(pd->bio_queue_size < 0); in pkt_make_request_write()
2408 was_empty = (pd->bio_queue_size == 0); in pkt_make_request_write()
2409 pkt_rbtree_insert(pd, node); in pkt_make_request_write()
2410 spin_unlock(&pd->lock); in pkt_make_request_write()
2415 atomic_set(&pd->scan_queue, 1); in pkt_make_request_write()
2418 wake_up(&pd->wqueue); in pkt_make_request_write()
2419 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) { in pkt_make_request_write()
2424 wake_up(&pd->wqueue); in pkt_make_request_write()
2430 struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata; in pkt_submit_bio() local
2431 struct device *ddev = disk_to_dev(pd->disk); in pkt_submit_bio()
2445 pkt_make_request_read(pd, bio); in pkt_submit_bio()
2449 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { in pkt_submit_bio()
2460 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_submit_bio()
2461 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); in pkt_submit_bio()
2464 BUG_ON(last_zone != zone + pd->settings.size); in pkt_submit_bio()
2482 static void pkt_init_queue(struct pktcdvd_device *pd) in pkt_init_queue() argument
2484 struct request_queue *q = pd->disk->queue; in pkt_init_queue()
2488 q->queuedata = pd; in pkt_init_queue()
2491 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) in pkt_new_dev() argument
2493 struct device *ddev = disk_to_dev(pd->disk); in pkt_new_dev()
2498 if (pd->pkt_dev == dev) { in pkt_new_dev()
2530 pd->bdev = bdev; in pkt_new_dev()
2533 pkt_init_queue(pd); in pkt_new_dev()
2535 atomic_set(&pd->cdrw.pending_bios, 0); in pkt_new_dev()
2536 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name); in pkt_new_dev()
2537 if (IS_ERR(pd->cdrw.thread)) { in pkt_new_dev()
2542 proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd); in pkt_new_dev()
2556 struct pktcdvd_device *pd = bdev->bd_disk->private_data; in pkt_ioctl() local
2557 struct device *ddev = disk_to_dev(pd->disk); in pkt_ioctl()
2569 if (pd->refcnt == 1) in pkt_ioctl()
2570 pkt_lock_door(pd, 0); in pkt_ioctl()
2597 struct pktcdvd_device *pd = disk->private_data; in pkt_check_events() local
2600 if (!pd) in pkt_check_events()
2602 if (!pd->bdev) in pkt_check_events()
2604 attached_disk = pd->bdev->bd_disk; in pkt_check_events()
2633 struct pktcdvd_device *pd; in pkt_setup_dev() local
2647 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); in pkt_setup_dev()
2648 if (!pd) in pkt_setup_dev()
2651 ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE, in pkt_setup_dev()
2656 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); in pkt_setup_dev()
2657 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); in pkt_setup_dev()
2658 spin_lock_init(&pd->cdrw.active_list_lock); in pkt_setup_dev()
2660 spin_lock_init(&pd->lock); in pkt_setup_dev()
2661 spin_lock_init(&pd->iosched.lock); in pkt_setup_dev()
2662 bio_list_init(&pd->iosched.read_queue); in pkt_setup_dev()
2663 bio_list_init(&pd->iosched.write_queue); in pkt_setup_dev()
2664 init_waitqueue_head(&pd->wqueue); in pkt_setup_dev()
2665 pd->bio_queue = RB_ROOT; in pkt_setup_dev()
2667 pd->write_congestion_on = write_congestion_on; in pkt_setup_dev()
2668 pd->write_congestion_off = write_congestion_off; in pkt_setup_dev()
2674 pd->disk = disk; in pkt_setup_dev()
2681 disk->private_data = pd; in pkt_setup_dev()
2683 pd->pkt_dev = MKDEV(pktdev_major, idx); in pkt_setup_dev()
2684 ret = pkt_new_dev(pd, dev); in pkt_setup_dev()
2689 disk->events = pd->bdev->bd_disk->events; in pkt_setup_dev()
2695 pkt_sysfs_dev_new(pd); in pkt_setup_dev()
2696 pkt_debugfs_dev_new(pd); in pkt_setup_dev()
2698 pkt_devs[idx] = pd; in pkt_setup_dev()
2700 *pkt_dev = pd->pkt_dev; in pkt_setup_dev()
2708 mempool_exit(&pd->rb_pool); in pkt_setup_dev()
2709 kfree(pd); in pkt_setup_dev()
2721 struct pktcdvd_device *pd; in pkt_remove_dev() local
2729 pd = pkt_devs[idx]; in pkt_remove_dev()
2730 if (pd && (pd->pkt_dev == pkt_dev)) in pkt_remove_dev()
2739 if (pd->refcnt > 0) { in pkt_remove_dev()
2744 ddev = disk_to_dev(pd->disk); in pkt_remove_dev()
2746 if (!IS_ERR(pd->cdrw.thread)) in pkt_remove_dev()
2747 kthread_stop(pd->cdrw.thread); in pkt_remove_dev()
2751 pkt_debugfs_dev_remove(pd); in pkt_remove_dev()
2752 pkt_sysfs_dev_remove(pd); in pkt_remove_dev()
2754 blkdev_put(pd->bdev, NULL); in pkt_remove_dev()
2756 remove_proc_entry(pd->disk->disk_name, pkt_proc); in pkt_remove_dev()
2759 del_gendisk(pd->disk); in pkt_remove_dev()
2760 put_disk(pd->disk); in pkt_remove_dev()
2762 mempool_exit(&pd->rb_pool); in pkt_remove_dev()
2763 kfree(pd); in pkt_remove_dev()
2775 struct pktcdvd_device *pd; in pkt_get_status() local
2779 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index); in pkt_get_status()
2780 if (pd) { in pkt_get_status()
2781 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev); in pkt_get_status()
2782 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev); in pkt_get_status()