1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
28324aa91SJens Axboe /*
38324aa91SJens Axboe * Functions related to sysfs handling
48324aa91SJens Axboe */
58324aa91SJens Axboe #include <linux/kernel.h>
65a0e3ad6STejun Heo #include <linux/slab.h>
78324aa91SJens Axboe #include <linux/module.h>
88324aa91SJens Axboe #include <linux/bio.h>
98324aa91SJens Axboe #include <linux/blkdev.h>
1066114cadSTejun Heo #include <linux/backing-dev.h>
118324aa91SJens Axboe #include <linux/blktrace_api.h>
1285e0cbbbSLuis Chamberlain #include <linux/debugfs.h>
138324aa91SJens Axboe
148324aa91SJens Axboe #include "blk.h"
153edcc0ceSMing Lei #include "blk-mq.h"
16d173a251SOmar Sandoval #include "blk-mq-debugfs.h"
172aa7745bSChristoph Hellwig #include "blk-mq-sched.h"
180bc65bd4SChristoph Hellwig #include "blk-rq-qos.h"
1987760e5eSJens Axboe #include "blk-wbt.h"
20672fdcf0SMing Lei #include "blk-cgroup.h"
21a7b36ee6SJens Axboe #include "blk-throttle.h"
228324aa91SJens Axboe
238324aa91SJens Axboe struct queue_sysfs_entry {
248324aa91SJens Axboe struct attribute attr;
258324aa91SJens Axboe ssize_t (*show)(struct request_queue *, char *);
268324aa91SJens Axboe ssize_t (*store)(struct request_queue *, const char *, size_t);
278324aa91SJens Axboe };
288324aa91SJens Axboe
298324aa91SJens Axboe static ssize_t
queue_var_show(unsigned long var,char * page)309cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page)
318324aa91SJens Axboe {
329cb308ceSXiaotian Feng return sprintf(page, "%lu\n", var);
338324aa91SJens Axboe }
348324aa91SJens Axboe
358324aa91SJens Axboe static ssize_t
queue_var_store(unsigned long * var,const char * page,size_t count)368324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count)
378324aa91SJens Axboe {
38b1f3b64dSDave Reisner int err;
39b1f3b64dSDave Reisner unsigned long v;
408324aa91SJens Axboe
41ed751e68SJingoo Han err = kstrtoul(page, 10, &v);
42b1f3b64dSDave Reisner if (err || v > UINT_MAX)
43b1f3b64dSDave Reisner return -EINVAL;
44b1f3b64dSDave Reisner
45b1f3b64dSDave Reisner *var = v;
46b1f3b64dSDave Reisner
478324aa91SJens Axboe return count;
488324aa91SJens Axboe }
498324aa91SJens Axboe
queue_requests_show(struct request_queue * q,char * page)508324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page)
518324aa91SJens Axboe {
5228af7428SMax Gurtovoy return queue_var_show(q->nr_requests, page);
538324aa91SJens Axboe }
548324aa91SJens Axboe
558324aa91SJens Axboe static ssize_t
queue_requests_store(struct request_queue * q,const char * page,size_t count)568324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count)
578324aa91SJens Axboe {
588324aa91SJens Axboe unsigned long nr;
59e3a2b3f9SJens Axboe int ret, err;
60b8a9ae77SJens Axboe
61344e9ffcSJens Axboe if (!queue_is_mq(q))
62b8a9ae77SJens Axboe return -EINVAL;
63b8a9ae77SJens Axboe
64b8a9ae77SJens Axboe ret = queue_var_store(&nr, page, count);
65b1f3b64dSDave Reisner if (ret < 0)
66b1f3b64dSDave Reisner return ret;
67b1f3b64dSDave Reisner
688324aa91SJens Axboe if (nr < BLKDEV_MIN_RQ)
698324aa91SJens Axboe nr = BLKDEV_MIN_RQ;
708324aa91SJens Axboe
71e3a2b3f9SJens Axboe err = blk_mq_update_nr_requests(q, nr);
72e3a2b3f9SJens Axboe if (err)
73e3a2b3f9SJens Axboe return err;
74a051661cSTejun Heo
758324aa91SJens Axboe return ret;
768324aa91SJens Axboe }
778324aa91SJens Axboe
queue_ra_show(struct request_queue * q,char * page)788324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page)
798324aa91SJens Axboe {
80edb0872fSChristoph Hellwig unsigned long ra_kb;
818324aa91SJens Axboe
82d152c682SChristoph Hellwig if (!q->disk)
83edb0872fSChristoph Hellwig return -EINVAL;
84d152c682SChristoph Hellwig ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
858c390ff9SMax Gurtovoy return queue_var_show(ra_kb, page);
868324aa91SJens Axboe }
878324aa91SJens Axboe
888324aa91SJens Axboe static ssize_t
queue_ra_store(struct request_queue * q,const char * page,size_t count)898324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count)
908324aa91SJens Axboe {
918324aa91SJens Axboe unsigned long ra_kb;
92edb0872fSChristoph Hellwig ssize_t ret;
938324aa91SJens Axboe
94d152c682SChristoph Hellwig if (!q->disk)
95edb0872fSChristoph Hellwig return -EINVAL;
96edb0872fSChristoph Hellwig ret = queue_var_store(&ra_kb, page, count);
97b1f3b64dSDave Reisner if (ret < 0)
98b1f3b64dSDave Reisner return ret;
99d152c682SChristoph Hellwig q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
1008324aa91SJens Axboe return ret;
1018324aa91SJens Axboe }
1028324aa91SJens Axboe
queue_max_sectors_show(struct request_queue * q,char * page)1038324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
1048324aa91SJens Axboe {
105ae03bf63SMartin K. Petersen int max_sectors_kb = queue_max_sectors(q) >> 1;
1068324aa91SJens Axboe
1078c390ff9SMax Gurtovoy return queue_var_show(max_sectors_kb, page);
1088324aa91SJens Axboe }
1098324aa91SJens Axboe
queue_max_segments_show(struct request_queue * q,char * page)110c77a5710SMartin K. Petersen static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
111c77a5710SMartin K. Petersen {
1128c390ff9SMax Gurtovoy return queue_var_show(queue_max_segments(q), page);
113c77a5710SMartin K. Petersen }
114c77a5710SMartin K. Petersen
queue_max_discard_segments_show(struct request_queue * q,char * page)1151e739730SChristoph Hellwig static ssize_t queue_max_discard_segments_show(struct request_queue *q,
1161e739730SChristoph Hellwig char *page)
1171e739730SChristoph Hellwig {
1188c390ff9SMax Gurtovoy return queue_var_show(queue_max_discard_segments(q), page);
1191e739730SChristoph Hellwig }
1201e739730SChristoph Hellwig
queue_max_integrity_segments_show(struct request_queue * q,char * page)12113f05c8dSMartin K. Petersen static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
12213f05c8dSMartin K. Petersen {
1238c390ff9SMax Gurtovoy return queue_var_show(q->limits.max_integrity_segments, page);
12413f05c8dSMartin K. Petersen }
12513f05c8dSMartin K. Petersen
queue_max_segment_size_show(struct request_queue * q,char * page)126c77a5710SMartin K. Petersen static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
127c77a5710SMartin K. Petersen {
1288c390ff9SMax Gurtovoy return queue_var_show(queue_max_segment_size(q), page);
129c77a5710SMartin K. Petersen }
130c77a5710SMartin K. Petersen
queue_logical_block_size_show(struct request_queue * q,char * page)131e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
132e68b903cSMartin K. Petersen {
133e1defc4fSMartin K. Petersen return queue_var_show(queue_logical_block_size(q), page);
134e68b903cSMartin K. Petersen }
135e68b903cSMartin K. Petersen
queue_physical_block_size_show(struct request_queue * q,char * page)136c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
137c72758f3SMartin K. Petersen {
138c72758f3SMartin K. Petersen return queue_var_show(queue_physical_block_size(q), page);
139c72758f3SMartin K. Petersen }
140c72758f3SMartin K. Petersen
queue_chunk_sectors_show(struct request_queue * q,char * page)14187caf97cSHannes Reinecke static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
14287caf97cSHannes Reinecke {
14387caf97cSHannes Reinecke return queue_var_show(q->limits.chunk_sectors, page);
14487caf97cSHannes Reinecke }
14587caf97cSHannes Reinecke
queue_io_min_show(struct request_queue * q,char * page)146c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page)
147c72758f3SMartin K. Petersen {
148c72758f3SMartin K. Petersen return queue_var_show(queue_io_min(q), page);
149c72758f3SMartin K. Petersen }
150c72758f3SMartin K. Petersen
queue_io_opt_show(struct request_queue * q,char * page)151c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
152c72758f3SMartin K. Petersen {
153c72758f3SMartin K. Petersen return queue_var_show(queue_io_opt(q), page);
1548324aa91SJens Axboe }
1558324aa91SJens Axboe
queue_discard_granularity_show(struct request_queue * q,char * page)15686b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
15786b37281SMartin K. Petersen {
15886b37281SMartin K. Petersen return queue_var_show(q->limits.discard_granularity, page);
15986b37281SMartin K. Petersen }
16086b37281SMartin K. Petersen
queue_discard_max_hw_show(struct request_queue * q,char * page)1610034af03SJens Axboe static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
1620034af03SJens Axboe {
1630034af03SJens Axboe
16418f922d0SAlan return sprintf(page, "%llu\n",
16518f922d0SAlan (unsigned long long)q->limits.max_hw_discard_sectors << 9);
1660034af03SJens Axboe }
1670034af03SJens Axboe
queue_discard_max_show(struct request_queue * q,char * page)16886b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
16986b37281SMartin K. Petersen {
170a934a00aSMartin K. Petersen return sprintf(page, "%llu\n",
171a934a00aSMartin K. Petersen (unsigned long long)q->limits.max_discard_sectors << 9);
17286b37281SMartin K. Petersen }
17386b37281SMartin K. Petersen
queue_discard_max_store(struct request_queue * q,const char * page,size_t count)1740034af03SJens Axboe static ssize_t queue_discard_max_store(struct request_queue *q,
1750034af03SJens Axboe const char *page, size_t count)
1760034af03SJens Axboe {
1770034af03SJens Axboe unsigned long max_discard;
1780034af03SJens Axboe ssize_t ret = queue_var_store(&max_discard, page, count);
1790034af03SJens Axboe
1800034af03SJens Axboe if (ret < 0)
1810034af03SJens Axboe return ret;
1820034af03SJens Axboe
1830034af03SJens Axboe if (max_discard & (q->limits.discard_granularity - 1))
1840034af03SJens Axboe return -EINVAL;
1850034af03SJens Axboe
1860034af03SJens Axboe max_discard >>= 9;
1870034af03SJens Axboe if (max_discard > UINT_MAX)
1880034af03SJens Axboe return -EINVAL;
1890034af03SJens Axboe
1900034af03SJens Axboe if (max_discard > q->limits.max_hw_discard_sectors)
1910034af03SJens Axboe max_discard = q->limits.max_hw_discard_sectors;
1920034af03SJens Axboe
1930034af03SJens Axboe q->limits.max_discard_sectors = max_discard;
1940034af03SJens Axboe return ret;
1950034af03SJens Axboe }
1960034af03SJens Axboe
queue_discard_zeroes_data_show(struct request_queue * q,char * page)19798262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
19898262f27SMartin K. Petersen {
19948920ff2SChristoph Hellwig return queue_var_show(0, page);
20098262f27SMartin K. Petersen }
20198262f27SMartin K. Petersen
queue_write_same_max_show(struct request_queue * q,char * page)2024363ac7cSMartin K. Petersen static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
2034363ac7cSMartin K. Petersen {
20473bd66d9SChristoph Hellwig return queue_var_show(0, page);
2054363ac7cSMartin K. Petersen }
2064363ac7cSMartin K. Petersen
queue_write_zeroes_max_show(struct request_queue * q,char * page)207a6f0788eSChaitanya Kulkarni static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
208a6f0788eSChaitanya Kulkarni {
209a6f0788eSChaitanya Kulkarni return sprintf(page, "%llu\n",
210a6f0788eSChaitanya Kulkarni (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
211a6f0788eSChaitanya Kulkarni }
2124363ac7cSMartin K. Petersen
queue_zone_write_granularity_show(struct request_queue * q,char * page)213a805a4faSDamien Le Moal static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
214a805a4faSDamien Le Moal char *page)
215a805a4faSDamien Le Moal {
216a805a4faSDamien Le Moal return queue_var_show(queue_zone_write_granularity(q), page);
217a805a4faSDamien Le Moal }
218a805a4faSDamien Le Moal
queue_zone_append_max_show(struct request_queue * q,char * page)2190512a75bSKeith Busch static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
2200512a75bSKeith Busch {
2210512a75bSKeith Busch unsigned long long max_sectors = q->limits.max_zone_append_sectors;
2220512a75bSKeith Busch
2230512a75bSKeith Busch return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
2240512a75bSKeith Busch }
2250512a75bSKeith Busch
2268324aa91SJens Axboe static ssize_t
queue_max_sectors_store(struct request_queue * q,const char * page,size_t count)2278324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
2288324aa91SJens Axboe {
229c9c77418SKeith Busch unsigned long var;
230c9c77418SKeith Busch unsigned int max_sectors_kb,
231ae03bf63SMartin K. Petersen max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
23209cbfeafSKirill A. Shutemov page_kb = 1 << (PAGE_SHIFT - 10);
233c9c77418SKeith Busch ssize_t ret = queue_var_store(&var, page, count);
2348324aa91SJens Axboe
235b1f3b64dSDave Reisner if (ret < 0)
236b1f3b64dSDave Reisner return ret;
237b1f3b64dSDave Reisner
238c9c77418SKeith Busch max_sectors_kb = (unsigned int)var;
239c9c77418SKeith Busch max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb,
240ca369d51SMartin K. Petersen q->limits.max_dev_sectors >> 1);
241c9c77418SKeith Busch if (max_sectors_kb == 0) {
242c9c77418SKeith Busch q->limits.max_user_sectors = 0;
243c9c77418SKeith Busch max_sectors_kb = min(max_hw_sectors_kb,
244c9c77418SKeith Busch BLK_DEF_MAX_SECTORS >> 1);
245c9c77418SKeith Busch } else {
246c9c77418SKeith Busch if (max_sectors_kb > max_hw_sectors_kb ||
247c9c77418SKeith Busch max_sectors_kb < page_kb)
2488324aa91SJens Axboe return -EINVAL;
249c9c77418SKeith Busch q->limits.max_user_sectors = max_sectors_kb << 1;
250c9c77418SKeith Busch }
2517c239517SWu Fengguang
2520d945c1fSChristoph Hellwig spin_lock_irq(&q->queue_lock);
253c295fc05SNikanth Karthikesan q->limits.max_sectors = max_sectors_kb << 1;
254d152c682SChristoph Hellwig if (q->disk)
255d152c682SChristoph Hellwig q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
2560d945c1fSChristoph Hellwig spin_unlock_irq(&q->queue_lock);
2578324aa91SJens Axboe
2588324aa91SJens Axboe return ret;
2598324aa91SJens Axboe }
2608324aa91SJens Axboe
queue_max_hw_sectors_show(struct request_queue * q,char * page)2618324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
2628324aa91SJens Axboe {
263ae03bf63SMartin K. Petersen int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
2648324aa91SJens Axboe
2658c390ff9SMax Gurtovoy return queue_var_show(max_hw_sectors_kb, page);
2668324aa91SJens Axboe }
2678324aa91SJens Axboe
queue_virt_boundary_mask_show(struct request_queue * q,char * page)26828af7428SMax Gurtovoy static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
26928af7428SMax Gurtovoy {
2708c390ff9SMax Gurtovoy return queue_var_show(q->limits.virt_boundary_mask, page);
27128af7428SMax Gurtovoy }
27228af7428SMax Gurtovoy
queue_dma_alignment_show(struct request_queue * q,char * page)2733850e13fSKeith Busch static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
2743850e13fSKeith Busch {
2753850e13fSKeith Busch return queue_var_show(queue_dma_alignment(q), page);
2763850e13fSKeith Busch }
2773850e13fSKeith Busch
278956bcb7cSJens Axboe #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
279956bcb7cSJens Axboe static ssize_t \
280fc93fe14SChristoph Hellwig queue_##name##_show(struct request_queue *q, char *page) \
281956bcb7cSJens Axboe { \
282956bcb7cSJens Axboe int bit; \
283956bcb7cSJens Axboe bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
284956bcb7cSJens Axboe return queue_var_show(neg ? !bit : bit, page); \
285956bcb7cSJens Axboe } \
286956bcb7cSJens Axboe static ssize_t \
287fc93fe14SChristoph Hellwig queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
288956bcb7cSJens Axboe { \
289956bcb7cSJens Axboe unsigned long val; \
290956bcb7cSJens Axboe ssize_t ret; \
291956bcb7cSJens Axboe ret = queue_var_store(&val, page, count); \
292c678ef52SArnd Bergmann if (ret < 0) \
293c678ef52SArnd Bergmann return ret; \
294956bcb7cSJens Axboe if (neg) \
295956bcb7cSJens Axboe val = !val; \
296956bcb7cSJens Axboe \
297956bcb7cSJens Axboe if (val) \
2988814ce8aSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
299956bcb7cSJens Axboe else \
3008814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
301956bcb7cSJens Axboe return ret; \
3021308835fSBartlomiej Zolnierkiewicz }
3031308835fSBartlomiej Zolnierkiewicz
304956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
305956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
306956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
3071cb039f3SChristoph Hellwig QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
308956bcb7cSJens Axboe #undef QUEUE_SYSFS_BIT_FNS
3091308835fSBartlomiej Zolnierkiewicz
queue_zoned_show(struct request_queue * q,char * page)310797476b8SDamien Le Moal static ssize_t queue_zoned_show(struct request_queue *q, char *page)
311797476b8SDamien Le Moal {
312797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) {
313797476b8SDamien Le Moal case BLK_ZONED_HA:
314797476b8SDamien Le Moal return sprintf(page, "host-aware\n");
315797476b8SDamien Le Moal case BLK_ZONED_HM:
316797476b8SDamien Le Moal return sprintf(page, "host-managed\n");
317797476b8SDamien Le Moal default:
318797476b8SDamien Le Moal return sprintf(page, "none\n");
319797476b8SDamien Le Moal }
320797476b8SDamien Le Moal }
321797476b8SDamien Le Moal
queue_nr_zones_show(struct request_queue * q,char * page)322965b652eSDamien Le Moal static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
323965b652eSDamien Le Moal {
324d86e716aSChristoph Hellwig return queue_var_show(disk_nr_zones(q->disk), page);
325965b652eSDamien Le Moal }
326965b652eSDamien Le Moal
queue_max_open_zones_show(struct request_queue * q,char * page)327e15864f8SNiklas Cassel static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
328e15864f8SNiklas Cassel {
3291dc01720SChristoph Hellwig return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
330e15864f8SNiklas Cassel }
331e15864f8SNiklas Cassel
queue_max_active_zones_show(struct request_queue * q,char * page)332659bf827SNiklas Cassel static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
333659bf827SNiklas Cassel {
3341dc01720SChristoph Hellwig return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
335659bf827SNiklas Cassel }
336659bf827SNiklas Cassel
queue_nomerges_show(struct request_queue * q,char * page)337ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
338ac9fafa1SAlan D. Brunelle {
339488991e2SAlan D. Brunelle return queue_var_show((blk_queue_nomerges(q) << 1) |
340488991e2SAlan D. Brunelle blk_queue_noxmerges(q), page);
341ac9fafa1SAlan D. Brunelle }
342ac9fafa1SAlan D. Brunelle
queue_nomerges_store(struct request_queue * q,const char * page,size_t count)343ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
344ac9fafa1SAlan D. Brunelle size_t count)
345ac9fafa1SAlan D. Brunelle {
346ac9fafa1SAlan D. Brunelle unsigned long nm;
347ac9fafa1SAlan D. Brunelle ssize_t ret = queue_var_store(&nm, page, count);
348ac9fafa1SAlan D. Brunelle
349b1f3b64dSDave Reisner if (ret < 0)
350b1f3b64dSDave Reisner return ret;
351b1f3b64dSDave Reisner
35257d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
35357d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
354488991e2SAlan D. Brunelle if (nm == 2)
35557d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
356488991e2SAlan D. Brunelle else if (nm)
35757d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
3581308835fSBartlomiej Zolnierkiewicz
359ac9fafa1SAlan D. Brunelle return ret;
360ac9fafa1SAlan D. Brunelle }
361ac9fafa1SAlan D. Brunelle
queue_rq_affinity_show(struct request_queue * q,char * page)362c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
363c7c22e4dSJens Axboe {
3649cb308ceSXiaotian Feng bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
3655757a6d7SDan Williams bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
366c7c22e4dSJens Axboe
3675757a6d7SDan Williams return queue_var_show(set << force, page);
368c7c22e4dSJens Axboe }
369c7c22e4dSJens Axboe
370c7c22e4dSJens Axboe static ssize_t
queue_rq_affinity_store(struct request_queue * q,const char * page,size_t count)371c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
372c7c22e4dSJens Axboe {
373c7c22e4dSJens Axboe ssize_t ret = -EINVAL;
3740a06ff06SChristoph Hellwig #ifdef CONFIG_SMP
375c7c22e4dSJens Axboe unsigned long val;
376c7c22e4dSJens Axboe
377c7c22e4dSJens Axboe ret = queue_var_store(&val, page, count);
378b1f3b64dSDave Reisner if (ret < 0)
379b1f3b64dSDave Reisner return ret;
380b1f3b64dSDave Reisner
381e8037d49SEric Seppanen if (val == 2) {
38257d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
38357d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
384e8037d49SEric Seppanen } else if (val == 1) {
38557d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
38657d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
387e8037d49SEric Seppanen } else if (val == 0) {
38857d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
38957d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
3905757a6d7SDan Williams }
391c7c22e4dSJens Axboe #endif
392c7c22e4dSJens Axboe return ret;
393c7c22e4dSJens Axboe }
3948324aa91SJens Axboe
queue_poll_delay_show(struct request_queue * q,char * page)39506426adfSJens Axboe static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
39606426adfSJens Axboe {
39754bdd67dSKeith Busch return sprintf(page, "%d\n", -1);
39806426adfSJens Axboe }
39906426adfSJens Axboe
queue_poll_delay_store(struct request_queue * q,const char * page,size_t count)40006426adfSJens Axboe static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
40106426adfSJens Axboe size_t count)
40206426adfSJens Axboe {
40364f1c21eSJens Axboe return count;
40406426adfSJens Axboe }
40506426adfSJens Axboe
queue_poll_show(struct request_queue * q,char * page)40605229beeSJens Axboe static ssize_t queue_poll_show(struct request_queue *q, char *page)
40705229beeSJens Axboe {
40805229beeSJens Axboe return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
40905229beeSJens Axboe }
41005229beeSJens Axboe
queue_poll_store(struct request_queue * q,const char * page,size_t count)41105229beeSJens Axboe static ssize_t queue_poll_store(struct request_queue *q, const char *page,
41205229beeSJens Axboe size_t count)
41305229beeSJens Axboe {
414a614dd22SChristoph Hellwig if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
41505229beeSJens Axboe return -EINVAL;
416a614dd22SChristoph Hellwig pr_info_ratelimited("writes to the poll attribute are ignored.\n");
417a614dd22SChristoph Hellwig pr_info_ratelimited("please use driver specific parameters instead.\n");
418a614dd22SChristoph Hellwig return count;
41905229beeSJens Axboe }
42005229beeSJens Axboe
queue_io_timeout_show(struct request_queue * q,char * page)42165cd1d13SWeiping Zhang static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
42265cd1d13SWeiping Zhang {
42365cd1d13SWeiping Zhang return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
42465cd1d13SWeiping Zhang }
42565cd1d13SWeiping Zhang
queue_io_timeout_store(struct request_queue * q,const char * page,size_t count)42665cd1d13SWeiping Zhang static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
42765cd1d13SWeiping Zhang size_t count)
42865cd1d13SWeiping Zhang {
42965cd1d13SWeiping Zhang unsigned int val;
43065cd1d13SWeiping Zhang int err;
43165cd1d13SWeiping Zhang
43265cd1d13SWeiping Zhang err = kstrtou32(page, 10, &val);
43365cd1d13SWeiping Zhang if (err || val == 0)
43465cd1d13SWeiping Zhang return -EINVAL;
43565cd1d13SWeiping Zhang
43665cd1d13SWeiping Zhang blk_queue_rq_timeout(q, msecs_to_jiffies(val));
43765cd1d13SWeiping Zhang
43865cd1d13SWeiping Zhang return count;
43965cd1d13SWeiping Zhang }
44065cd1d13SWeiping Zhang
queue_wc_show(struct request_queue * q,char * page)44193e9d8e8SJens Axboe static ssize_t queue_wc_show(struct request_queue *q, char *page)
44293e9d8e8SJens Axboe {
44393e9d8e8SJens Axboe if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
44493e9d8e8SJens Axboe return sprintf(page, "write back\n");
44593e9d8e8SJens Axboe
44693e9d8e8SJens Axboe return sprintf(page, "write through\n");
44793e9d8e8SJens Axboe }
44893e9d8e8SJens Axboe
queue_wc_store(struct request_queue * q,const char * page,size_t count)44993e9d8e8SJens Axboe static ssize_t queue_wc_store(struct request_queue *q, const char *page,
45093e9d8e8SJens Axboe size_t count)
45193e9d8e8SJens Axboe {
452*43c9835bSChristoph Hellwig if (!strncmp(page, "write back", 10)) {
453*43c9835bSChristoph Hellwig if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags))
454c4e21bcdSChristoph Hellwig return -EINVAL;
455*43c9835bSChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_WC, q);
456*43c9835bSChristoph Hellwig } else if (!strncmp(page, "write through", 13) ||
457*43c9835bSChristoph Hellwig !strncmp(page, "none", 4)) {
458*43c9835bSChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_WC, q);
459*43c9835bSChristoph Hellwig } else {
460*43c9835bSChristoph Hellwig return -EINVAL;
461*43c9835bSChristoph Hellwig }
46293e9d8e8SJens Axboe
46393e9d8e8SJens Axboe return count;
46493e9d8e8SJens Axboe }
46593e9d8e8SJens Axboe
queue_fua_show(struct request_queue * q,char * page)4666fcefbe5SKent Overstreet static ssize_t queue_fua_show(struct request_queue *q, char *page)
4676fcefbe5SKent Overstreet {
4686fcefbe5SKent Overstreet return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
4696fcefbe5SKent Overstreet }
4706fcefbe5SKent Overstreet
queue_dax_show(struct request_queue * q,char * page)471ea6ca600SYigal Korman static ssize_t queue_dax_show(struct request_queue *q, char *page)
472ea6ca600SYigal Korman {
473ea6ca600SYigal Korman return queue_var_show(blk_queue_dax(q), page);
474ea6ca600SYigal Korman }
475ea6ca600SYigal Korman
47635626147SChristoph Hellwig #define QUEUE_RO_ENTRY(_prefix, _name) \
47735626147SChristoph Hellwig static struct queue_sysfs_entry _prefix##_entry = { \
47835626147SChristoph Hellwig .attr = { .name = _name, .mode = 0444 }, \
47935626147SChristoph Hellwig .show = _prefix##_show, \
4808324aa91SJens Axboe };
4818324aa91SJens Axboe
48235626147SChristoph Hellwig #define QUEUE_RW_ENTRY(_prefix, _name) \
48335626147SChristoph Hellwig static struct queue_sysfs_entry _prefix##_entry = { \
48435626147SChristoph Hellwig .attr = { .name = _name, .mode = 0644 }, \
48535626147SChristoph Hellwig .show = _prefix##_show, \
48635626147SChristoph Hellwig .store = _prefix##_store, \
4878324aa91SJens Axboe };
4888324aa91SJens Axboe
48935626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_requests, "nr_requests");
49035626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
49135626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
49235626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
49335626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
49435626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
49535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
49635626147SChristoph Hellwig QUEUE_RW_ENTRY(elv_iosched, "scheduler");
4978324aa91SJens Axboe
49835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
49935626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
50035626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
50135626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
50235626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
5038324aa91SJens Axboe
50435626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
50535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
50635626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
50735626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
50835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
509c77a5710SMartin K. Petersen
51035626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
51135626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
51235626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
513a805a4faSDamien Le Moal QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
5141e739730SChristoph Hellwig
51535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_zoned, "zoned");
51635626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
51735626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
51835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
51913f05c8dSMartin K. Petersen
52035626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
52135626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
52235626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_poll, "io_poll");
52335626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
52435626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_wc, "write_cache");
52535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_fua, "fua");
52635626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_dax, "dax");
52735626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
52828af7428SMax Gurtovoy QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
5293850e13fSKeith Busch QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
530c77a5710SMartin K. Petersen
53135626147SChristoph Hellwig #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
53235626147SChristoph Hellwig QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
53335626147SChristoph Hellwig #endif
5348324aa91SJens Axboe
53535626147SChristoph Hellwig /* legacy alias for logical_block_size: */
536e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = {
5375657a819SJoe Perches .attr = {.name = "hw_sector_size", .mode = 0444 },
538e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show,
539e1defc4fSMartin K. Petersen };
540e1defc4fSMartin K. Petersen
541fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_nonrot, "rotational");
542fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_iostats, "iostats");
543fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_random, "add_random");
5441cb039f3SChristoph Hellwig QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
545e2e1a148SJens Axboe
546645a829eSYu Kuai #ifdef CONFIG_BLK_WBT
queue_var_store64(s64 * var,const char * page)547645a829eSYu Kuai static ssize_t queue_var_store64(s64 *var, const char *page)
548645a829eSYu Kuai {
549645a829eSYu Kuai int err;
550645a829eSYu Kuai s64 v;
551645a829eSYu Kuai
552645a829eSYu Kuai err = kstrtos64(page, 10, &v);
553645a829eSYu Kuai if (err < 0)
554645a829eSYu Kuai return err;
555645a829eSYu Kuai
556645a829eSYu Kuai *var = v;
557645a829eSYu Kuai return 0;
558645a829eSYu Kuai }
559645a829eSYu Kuai
queue_wb_lat_show(struct request_queue * q,char * page)560645a829eSYu Kuai static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
561645a829eSYu Kuai {
562645a829eSYu Kuai if (!wbt_rq_qos(q))
563645a829eSYu Kuai return -EINVAL;
564645a829eSYu Kuai
565645a829eSYu Kuai if (wbt_disabled(q))
566645a829eSYu Kuai return sprintf(page, "0\n");
567645a829eSYu Kuai
568645a829eSYu Kuai return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
569645a829eSYu Kuai }
570645a829eSYu Kuai
queue_wb_lat_store(struct request_queue * q,const char * page,size_t count)571645a829eSYu Kuai static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
572645a829eSYu Kuai size_t count)
573645a829eSYu Kuai {
574645a829eSYu Kuai struct rq_qos *rqos;
575645a829eSYu Kuai ssize_t ret;
576645a829eSYu Kuai s64 val;
577645a829eSYu Kuai
578645a829eSYu Kuai ret = queue_var_store64(&val, page);
579645a829eSYu Kuai if (ret < 0)
580645a829eSYu Kuai return ret;
581645a829eSYu Kuai if (val < -1)
582645a829eSYu Kuai return -EINVAL;
583645a829eSYu Kuai
584645a829eSYu Kuai rqos = wbt_rq_qos(q);
585645a829eSYu Kuai if (!rqos) {
586645a829eSYu Kuai ret = wbt_init(q->disk);
587645a829eSYu Kuai if (ret)
588645a829eSYu Kuai return ret;
589645a829eSYu Kuai }
590645a829eSYu Kuai
591645a829eSYu Kuai if (val == -1)
592645a829eSYu Kuai val = wbt_default_latency_nsec(q);
593645a829eSYu Kuai else if (val >= 0)
594645a829eSYu Kuai val *= 1000ULL;
595645a829eSYu Kuai
596645a829eSYu Kuai if (wbt_get_min_lat(q) == val)
597645a829eSYu Kuai return count;
598645a829eSYu Kuai
599645a829eSYu Kuai /*
600645a829eSYu Kuai * Ensure that the queue is idled, in case the latency update
601645a829eSYu Kuai * ends up either enabling or disabling wbt completely. We can't
602645a829eSYu Kuai * have IO inflight if that happens.
603645a829eSYu Kuai */
604645a829eSYu Kuai blk_mq_freeze_queue(q);
605645a829eSYu Kuai blk_mq_quiesce_queue(q);
606645a829eSYu Kuai
607645a829eSYu Kuai wbt_set_min_lat(q, val);
608645a829eSYu Kuai
609645a829eSYu Kuai blk_mq_unquiesce_queue(q);
610645a829eSYu Kuai blk_mq_unfreeze_queue(q);
611645a829eSYu Kuai
612645a829eSYu Kuai return count;
613645a829eSYu Kuai }
614645a829eSYu Kuai
615645a829eSYu Kuai QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
616645a829eSYu Kuai #endif
617645a829eSYu Kuai
6184d25339eSWeiping Zhang static struct attribute *queue_attrs[] = {
6198324aa91SJens Axboe &queue_ra_entry.attr,
6208324aa91SJens Axboe &queue_max_hw_sectors_entry.attr,
6218324aa91SJens Axboe &queue_max_sectors_entry.attr,
622c77a5710SMartin K. Petersen &queue_max_segments_entry.attr,
6231e739730SChristoph Hellwig &queue_max_discard_segments_entry.attr,
62413f05c8dSMartin K. Petersen &queue_max_integrity_segments_entry.attr,
625c77a5710SMartin K. Petersen &queue_max_segment_size_entry.attr,
626e68b903cSMartin K. Petersen &queue_hw_sector_size_entry.attr,
627e1defc4fSMartin K. Petersen &queue_logical_block_size_entry.attr,
628c72758f3SMartin K. Petersen &queue_physical_block_size_entry.attr,
62987caf97cSHannes Reinecke &queue_chunk_sectors_entry.attr,
630c72758f3SMartin K. Petersen &queue_io_min_entry.attr,
631c72758f3SMartin K. Petersen &queue_io_opt_entry.attr,
63286b37281SMartin K. Petersen &queue_discard_granularity_entry.attr,
63386b37281SMartin K. Petersen &queue_discard_max_entry.attr,
6340034af03SJens Axboe &queue_discard_max_hw_entry.attr,
63598262f27SMartin K. Petersen &queue_discard_zeroes_data_entry.attr,
6364363ac7cSMartin K. Petersen &queue_write_same_max_entry.attr,
637a6f0788eSChaitanya Kulkarni &queue_write_zeroes_max_entry.attr,
6380512a75bSKeith Busch &queue_zone_append_max_entry.attr,
639a805a4faSDamien Le Moal &queue_zone_write_granularity_entry.attr,
6401308835fSBartlomiej Zolnierkiewicz &queue_nonrot_entry.attr,
641797476b8SDamien Le Moal &queue_zoned_entry.attr,
642965b652eSDamien Le Moal &queue_nr_zones_entry.attr,
643e15864f8SNiklas Cassel &queue_max_open_zones_entry.attr,
644659bf827SNiklas Cassel &queue_max_active_zones_entry.attr,
645ac9fafa1SAlan D. Brunelle &queue_nomerges_entry.attr,
646bc58ba94SJens Axboe &queue_iostats_entry.attr,
6471cb039f3SChristoph Hellwig &queue_stable_writes_entry.attr,
648e2e1a148SJens Axboe &queue_random_entry.attr,
64905229beeSJens Axboe &queue_poll_entry.attr,
65093e9d8e8SJens Axboe &queue_wc_entry.attr,
6516fcefbe5SKent Overstreet &queue_fua_entry.attr,
652ea6ca600SYigal Korman &queue_dax_entry.attr,
65306426adfSJens Axboe &queue_poll_delay_entry.attr,
654297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
65535626147SChristoph Hellwig &blk_throtl_sample_time_entry.attr,
656297e3d85SShaohua Li #endif
65728af7428SMax Gurtovoy &queue_virt_boundary_mask_entry.attr,
6583850e13fSKeith Busch &queue_dma_alignment_entry.attr,
6598324aa91SJens Axboe NULL,
6608324aa91SJens Axboe };
6618324aa91SJens Axboe
6626d85ebf9SYu Kuai static struct attribute *blk_mq_queue_attrs[] = {
6636d85ebf9SYu Kuai &queue_requests_entry.attr,
6646d85ebf9SYu Kuai &elv_iosched_entry.attr,
6656d85ebf9SYu Kuai &queue_rq_affinity_entry.attr,
6666d85ebf9SYu Kuai &queue_io_timeout_entry.attr,
6676d85ebf9SYu Kuai #ifdef CONFIG_BLK_WBT
6686d85ebf9SYu Kuai &queue_wb_lat_entry.attr,
6696d85ebf9SYu Kuai #endif
6706d85ebf9SYu Kuai NULL,
6716d85ebf9SYu Kuai };
6726d85ebf9SYu Kuai
queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)6734d25339eSWeiping Zhang static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
6744d25339eSWeiping Zhang int n)
6754d25339eSWeiping Zhang {
6762bd85221SChristoph Hellwig struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
6772bd85221SChristoph Hellwig struct request_queue *q = disk->queue;
6784d25339eSWeiping Zhang
679659bf827SNiklas Cassel if ((attr == &queue_max_open_zones_entry.attr ||
680659bf827SNiklas Cassel attr == &queue_max_active_zones_entry.attr) &&
681e15864f8SNiklas Cassel !blk_queue_is_zoned(q))
682e15864f8SNiklas Cassel return 0;
683e15864f8SNiklas Cassel
6844d25339eSWeiping Zhang return attr->mode;
6854d25339eSWeiping Zhang }
6864d25339eSWeiping Zhang
blk_mq_queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)6876d85ebf9SYu Kuai static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
6886d85ebf9SYu Kuai struct attribute *attr, int n)
6896d85ebf9SYu Kuai {
6906d85ebf9SYu Kuai struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
6916d85ebf9SYu Kuai struct request_queue *q = disk->queue;
6926d85ebf9SYu Kuai
6936d85ebf9SYu Kuai if (!queue_is_mq(q))
6946d85ebf9SYu Kuai return 0;
6956d85ebf9SYu Kuai
6966d85ebf9SYu Kuai if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
6976d85ebf9SYu Kuai return 0;
6986d85ebf9SYu Kuai
6996d85ebf9SYu Kuai return attr->mode;
7006d85ebf9SYu Kuai }
7016d85ebf9SYu Kuai
7024d25339eSWeiping Zhang static struct attribute_group queue_attr_group = {
7034d25339eSWeiping Zhang .attrs = queue_attrs,
7044d25339eSWeiping Zhang .is_visible = queue_attr_visible,
7054d25339eSWeiping Zhang };
7064d25339eSWeiping Zhang
7076d85ebf9SYu Kuai static struct attribute_group blk_mq_queue_attr_group = {
7086d85ebf9SYu Kuai .attrs = blk_mq_queue_attrs,
7096d85ebf9SYu Kuai .is_visible = blk_mq_queue_attr_visible,
7106d85ebf9SYu Kuai };
7114d25339eSWeiping Zhang
7128324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
7138324aa91SJens Axboe
7148324aa91SJens Axboe static ssize_t
queue_attr_show(struct kobject * kobj,struct attribute * attr,char * page)7158324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
7168324aa91SJens Axboe {
7178324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr);
7182bd85221SChristoph Hellwig struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
7192bd85221SChristoph Hellwig struct request_queue *q = disk->queue;
7208324aa91SJens Axboe ssize_t res;
7218324aa91SJens Axboe
7228324aa91SJens Axboe if (!entry->show)
7238324aa91SJens Axboe return -EIO;
7248324aa91SJens Axboe mutex_lock(&q->sysfs_lock);
7258324aa91SJens Axboe res = entry->show(q, page);
7268324aa91SJens Axboe mutex_unlock(&q->sysfs_lock);
7278324aa91SJens Axboe return res;
7288324aa91SJens Axboe }
7298324aa91SJens Axboe
7308324aa91SJens Axboe static ssize_t
queue_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)7318324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr,
7328324aa91SJens Axboe const char *page, size_t length)
7338324aa91SJens Axboe {
7348324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr);
7352bd85221SChristoph Hellwig struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
7362bd85221SChristoph Hellwig struct request_queue *q = disk->queue;
7378324aa91SJens Axboe ssize_t res;
7388324aa91SJens Axboe
7398324aa91SJens Axboe if (!entry->store)
7408324aa91SJens Axboe return -EIO;
7416728cb0eSJens Axboe
7428324aa91SJens Axboe mutex_lock(&q->sysfs_lock);
7438324aa91SJens Axboe res = entry->store(q, page, length);
7448324aa91SJens Axboe mutex_unlock(&q->sysfs_lock);
7458324aa91SJens Axboe return res;
7468324aa91SJens Axboe }
7478324aa91SJens Axboe
74852cf25d0SEmese Revfy static const struct sysfs_ops queue_sysfs_ops = {
7498324aa91SJens Axboe .show = queue_attr_show,
7508324aa91SJens Axboe .store = queue_attr_store,
7518324aa91SJens Axboe };
7528324aa91SJens Axboe
7534a8d14bbSChristoph Hellwig static const struct attribute_group *blk_queue_attr_groups[] = {
7544a8d14bbSChristoph Hellwig &queue_attr_group,
7556d85ebf9SYu Kuai &blk_mq_queue_attr_group,
7564a8d14bbSChristoph Hellwig NULL
7574a8d14bbSChristoph Hellwig };
7584a8d14bbSChristoph Hellwig
blk_queue_release(struct kobject * kobj)7592bd85221SChristoph Hellwig static void blk_queue_release(struct kobject *kobj)
7602bd85221SChristoph Hellwig {
7612bd85221SChristoph Hellwig /* nothing to do here, all data is associated with the parent gendisk */
7622bd85221SChristoph Hellwig }
7632bd85221SChristoph Hellwig
7645f622417SThomas Weißschuh static const struct kobj_type blk_queue_ktype = {
7654a8d14bbSChristoph Hellwig .default_groups = blk_queue_attr_groups,
7668324aa91SJens Axboe .sysfs_ops = &queue_sysfs_ops,
7672bd85221SChristoph Hellwig .release = blk_queue_release,
7688324aa91SJens Axboe };
7698324aa91SJens Axboe
blk_debugfs_remove(struct gendisk * disk)7706fc75f30SChristoph Hellwig static void blk_debugfs_remove(struct gendisk *disk)
7716fc75f30SChristoph Hellwig {
7726fc75f30SChristoph Hellwig struct request_queue *q = disk->queue;
7736fc75f30SChristoph Hellwig
7746fc75f30SChristoph Hellwig mutex_lock(&q->debugfs_mutex);
7756fc75f30SChristoph Hellwig blk_trace_shutdown(q);
7766fc75f30SChristoph Hellwig debugfs_remove_recursive(q->debugfs_dir);
7776fc75f30SChristoph Hellwig q->debugfs_dir = NULL;
7786fc75f30SChristoph Hellwig q->sched_debugfs_dir = NULL;
7796fc75f30SChristoph Hellwig q->rqos_debugfs_dir = NULL;
7806fc75f30SChristoph Hellwig mutex_unlock(&q->debugfs_mutex);
7816fc75f30SChristoph Hellwig }
7826fc75f30SChristoph Hellwig
7832c2086afSBart Van Assche /**
7842c2086afSBart Van Assche * blk_register_queue - register a block layer queue with sysfs
7852c2086afSBart Van Assche * @disk: Disk of which the request queue should be registered with sysfs.
7862c2086afSBart Van Assche */
blk_register_queue(struct gendisk * disk)7878324aa91SJens Axboe int blk_register_queue(struct gendisk *disk)
7888324aa91SJens Axboe {
7898324aa91SJens Axboe struct request_queue *q = disk->queue;
7908682b92eSChristoph Hellwig int ret;
7918324aa91SJens Axboe
792cecf5d87SMing Lei mutex_lock(&q->sysfs_dir_lock);
7932bd85221SChristoph Hellwig kobject_init(&disk->queue_kobj, &blk_queue_ktype);
7942bd85221SChristoph Hellwig ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
795cc5c516dSChristoph Hellwig if (ret < 0)
7962bd85221SChristoph Hellwig goto out_put_queue_kobj;
7978324aa91SJens Axboe
79840602997SChristoph Hellwig if (queue_is_mq(q)) {
79940602997SChristoph Hellwig ret = blk_mq_sysfs_register(disk);
80040602997SChristoph Hellwig if (ret)
8012bd85221SChristoph Hellwig goto out_put_queue_kobj;
80240602997SChristoph Hellwig }
8035cf9c91bSChristoph Hellwig mutex_lock(&q->sysfs_lock);
8045cf9c91bSChristoph Hellwig
80585e0cbbbSLuis Chamberlain mutex_lock(&q->debugfs_mutex);
8062bd85221SChristoph Hellwig q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
8075cf9c91bSChristoph Hellwig if (queue_is_mq(q))
8089c1051aaSOmar Sandoval blk_mq_debugfs_register(q);
8095cf9c91bSChristoph Hellwig mutex_unlock(&q->debugfs_mutex);
810a2247f19SDamien Le Moal
81122d0c408SChristoph Hellwig ret = disk_register_independent_access_ranges(disk);
812a2247f19SDamien Le Moal if (ret)
81340602997SChristoph Hellwig goto out_debugfs_remove;
814a2247f19SDamien Le Moal
815344e9ffcSJens Axboe if (q->elevator) {
816cecf5d87SMing Lei ret = elv_register_queue(q, false);
817a2247f19SDamien Le Moal if (ret)
81840602997SChristoph Hellwig goto out_unregister_ia_ranges;
819b410aff2STahsin Erdogan }
820cecf5d87SMing Lei
821450deb93SChristoph Hellwig ret = blk_crypto_sysfs_register(disk);
82220f01f16SEric Biggers if (ret)
82340602997SChristoph Hellwig goto out_elv_unregister;
82420f01f16SEric Biggers
825cecf5d87SMing Lei blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
82604aad37bSChristoph Hellwig wbt_enable_default(disk);
8275f6dc752SChristoph Hellwig blk_throtl_register(disk);
828cecf5d87SMing Lei
829cecf5d87SMing Lei /* Now everything is ready and send out KOBJ_ADD uevent */
8302bd85221SChristoph Hellwig kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
8310546858cSYufen Yu if (q->elevator)
832cecf5d87SMing Lei kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
833cecf5d87SMing Lei mutex_unlock(&q->sysfs_lock);
834cecf5d87SMing Lei mutex_unlock(&q->sysfs_dir_lock);
835a72c374fSMing Lei
836a72c374fSMing Lei /*
837a72c374fSMing Lei * SCSI probing may synchronously create and destroy a lot of
838a72c374fSMing Lei * request_queues for non-existent devices. Shutting down a fully
839a72c374fSMing Lei * functional queue takes measureable wallclock time as RCU grace
840a72c374fSMing Lei * periods are involved. To avoid excessive latency in these
841a72c374fSMing Lei * cases, a request_queue starts out in a degraded mode which is
842a72c374fSMing Lei * faster to shut down and is made fully functional here as
843a72c374fSMing Lei * request_queues for non-existent devices never get registered.
844a72c374fSMing Lei */
845a72c374fSMing Lei blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
846a72c374fSMing Lei percpu_ref_switch_to_percpu(&q->q_usage_counter);
847a72c374fSMing Lei
8488324aa91SJens Axboe return ret;
849a2247f19SDamien Le Moal
85040602997SChristoph Hellwig out_elv_unregister:
85120f01f16SEric Biggers elv_unregister_queue(q);
85240602997SChristoph Hellwig out_unregister_ia_ranges:
853a2247f19SDamien Le Moal disk_unregister_independent_access_ranges(disk);
85440602997SChristoph Hellwig out_debugfs_remove:
85540602997SChristoph Hellwig blk_debugfs_remove(disk);
856a2247f19SDamien Le Moal mutex_unlock(&q->sysfs_lock);
8572bd85221SChristoph Hellwig out_put_queue_kobj:
8582bd85221SChristoph Hellwig kobject_put(&disk->queue_kobj);
85940602997SChristoph Hellwig mutex_unlock(&q->sysfs_dir_lock);
860a2247f19SDamien Le Moal return ret;
8618324aa91SJens Axboe }
8628324aa91SJens Axboe
8632c2086afSBart Van Assche /**
8642c2086afSBart Van Assche * blk_unregister_queue - counterpart of blk_register_queue()
8652c2086afSBart Van Assche * @disk: Disk of which the request queue should be unregistered from sysfs.
8662c2086afSBart Van Assche *
8672c2086afSBart Van Assche * Note: the caller is responsible for guaranteeing that this function is called
8682c2086afSBart Van Assche * after blk_register_queue() has finished.
8692c2086afSBart Van Assche */
blk_unregister_queue(struct gendisk * disk)8708324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk)
8718324aa91SJens Axboe {
8728324aa91SJens Axboe struct request_queue *q = disk->queue;
8738324aa91SJens Axboe
874fb199746SAkinobu Mita if (WARN_ON(!q))
875fb199746SAkinobu Mita return;
876fb199746SAkinobu Mita
877fa70d2e2SMike Snitzer /* Return early if disk->queue was never registered. */
87858c898baSMing Lei if (!blk_queue_registered(q))
879fa70d2e2SMike Snitzer return;
880fa70d2e2SMike Snitzer
881667257e8SMike Snitzer /*
8822c2086afSBart Van Assche * Since sysfs_remove_dir() prevents adding new directory entries
8832c2086afSBart Van Assche * before removal of existing entries starts, protect against
8842c2086afSBart Van Assche * concurrent elv_iosched_store() calls.
885667257e8SMike Snitzer */
886e9a823fbSDavid Jeffery mutex_lock(&q->sysfs_lock);
8878814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
888cecf5d87SMing Lei mutex_unlock(&q->sysfs_lock);
889334335d2SOmar Sandoval
890cecf5d87SMing Lei mutex_lock(&q->sysfs_dir_lock);
8912c2086afSBart Van Assche /*
8922c2086afSBart Van Assche * Remove the sysfs attributes before unregistering the queue data
8932c2086afSBart Van Assche * structures that can be modified through sysfs.
8942c2086afSBart Van Assche */
895344e9ffcSJens Axboe if (queue_is_mq(q))
8968682b92eSChristoph Hellwig blk_mq_sysfs_unregister(disk);
897450deb93SChristoph Hellwig blk_crypto_sysfs_unregister(disk);
898667257e8SMike Snitzer
899b89f625eSMing Lei mutex_lock(&q->sysfs_lock);
9002c2086afSBart Van Assche elv_unregister_queue(q);
901a2247f19SDamien Le Moal disk_unregister_independent_access_ranges(disk);
902b89f625eSMing Lei mutex_unlock(&q->sysfs_lock);
9030f692882SEric Biggers
9040f692882SEric Biggers /* Now that we've deleted all child objects, we can delete the queue. */
9052bd85221SChristoph Hellwig kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
9062bd85221SChristoph Hellwig kobject_del(&disk->queue_kobj);
907cecf5d87SMing Lei mutex_unlock(&q->sysfs_dir_lock);
9082c2086afSBart Van Assche
9096fc75f30SChristoph Hellwig blk_debugfs_remove(disk);
9108324aa91SJens Axboe }
911