blk-sysfs.c (858a0d7eb5300b5f620d98ab3c4b96c9d5f19131) blk-sysfs.c (dc3b17cc8bf21307c7e076e7c778d5db756f7871)
1/*
2 * Functions related to sysfs handling
3 */
4#include <linux/kernel.h>
5#include <linux/slab.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>

--- 75 unchanged lines hidden (view full) ---

84 if (err)
85 return err;
86
87 return ret;
88}
89
90static ssize_t queue_ra_show(struct request_queue *q, char *page)
91{
1/*
2 * Functions related to sysfs handling
3 */
4#include <linux/kernel.h>
5#include <linux/slab.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>

--- 75 unchanged lines hidden (view full) ---

84 if (err)
85 return err;
86
87 return ret;
88}
89
90static ssize_t queue_ra_show(struct request_queue *q, char *page)
91{
92 unsigned long ra_kb = q->backing_dev_info.ra_pages <<
92 unsigned long ra_kb = q->backing_dev_info->ra_pages <<
93 (PAGE_SHIFT - 10);
94
95 return queue_var_show(ra_kb, (page));
96}
97
98static ssize_t
99queue_ra_store(struct request_queue *q, const char *page, size_t count)
100{
101 unsigned long ra_kb;
102 ssize_t ret = queue_var_store(&ra_kb, page, count);
103
104 if (ret < 0)
105 return ret;
106
93 (PAGE_SHIFT - 10);
94
95 return queue_var_show(ra_kb, (page));
96}
97
98static ssize_t
99queue_ra_store(struct request_queue *q, const char *page, size_t count)
100{
101 unsigned long ra_kb;
102 ssize_t ret = queue_var_store(&ra_kb, page, count);
103
104 if (ret < 0)
105 return ret;
106
107 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
107 q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
108
109 return ret;
110}
111
112static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
113{
114 int max_sectors_kb = queue_max_sectors(q) >> 1;
115

--- 115 unchanged lines hidden (view full) ---

231 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
232 q->limits.max_dev_sectors >> 1);
233
234 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
235 return -EINVAL;
236
237 spin_lock_irq(q->queue_lock);
238 q->limits.max_sectors = max_sectors_kb << 1;
108
109 return ret;
110}
111
112static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
113{
114 int max_sectors_kb = queue_max_sectors(q) >> 1;
115

--- 115 unchanged lines hidden (view full) ---

231 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
232 q->limits.max_dev_sectors >> 1);
233
234 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
235 return -EINVAL;
236
237 spin_lock_irq(q->queue_lock);
238 q->limits.max_sectors = max_sectors_kb << 1;
239 q->backing_dev_info.io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
239 q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
240 spin_unlock_irq(q->queue_lock);
241
242 return ret;
243}
244
245static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
246{
247 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;

--- 546 unchanged lines hidden (view full) ---

794 * via blk_cleanup_queue().
795 **/
796static void blk_release_queue(struct kobject *kobj)
797{
798 struct request_queue *q =
799 container_of(kobj, struct request_queue, kobj);
800
801 wbt_exit(q);
240 spin_unlock_irq(q->queue_lock);
241
242 return ret;
243}
244
245static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
246{
247 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;

--- 546 unchanged lines hidden (view full) ---

794 * via blk_cleanup_queue().
795 **/
796static void blk_release_queue(struct kobject *kobj)
797{
798 struct request_queue *q =
799 container_of(kobj, struct request_queue, kobj);
800
801 wbt_exit(q);
802 bdi_exit(&q->backing_dev_info);
802 bdi_exit(q->backing_dev_info);
803 blkcg_exit_queue(q);
804
805 if (q->elevator) {
806 spin_lock_irq(q->queue_lock);
807 ioc_clear_queue(q);
808 spin_unlock_irq(q->queue_lock);
809 elevator_exit(q->elevator);
810 }
811
812 blk_exit_rl(&q->root_rl);
813
814 if (q->queue_tags)
815 __blk_queue_free_tags(q);
816
803 blkcg_exit_queue(q);
804
805 if (q->elevator) {
806 spin_lock_irq(q->queue_lock);
807 ioc_clear_queue(q);
808 spin_unlock_irq(q->queue_lock);
809 elevator_exit(q->elevator);
810 }
811
812 blk_exit_rl(&q->root_rl);
813
814 if (q->queue_tags)
815 __blk_queue_free_tags(q);
816
817 if (!q->mq_ops)
817 if (!q->mq_ops) {
818 if (q->exit_rq_fn)
819 q->exit_rq_fn(q, q->fq->flush_rq);
818 blk_free_flush_queue(q->fq);
820 blk_free_flush_queue(q->fq);
819 else
821 } else {
820 blk_mq_release(q);
822 blk_mq_release(q);
823 }
821
822 blk_trace_shutdown(q);
823
824 if (q->bio_split)
825 bioset_free(q->bio_split);
826
827 ida_simple_remove(&blk_queue_ida, q->id);
828 call_rcu(&q->rcu_head, blk_free_queue_rcu);

--- 104 unchanged lines hidden ---
824
825 blk_trace_shutdown(q);
826
827 if (q->bio_split)
828 bioset_free(q->bio_split);
829
830 ida_simple_remove(&blk_queue_ida, q->id);
831 call_rcu(&q->rcu_head, blk_free_queue_rcu);

--- 104 unchanged lines hidden ---