Lines Matching full:q

50 static ssize_t queue_requests_show(struct request_queue *q, char *page)
52 return queue_var_show(q->nr_requests, page);
56 queue_requests_store(struct request_queue *q, const char *page, size_t count)
61 if (!queue_is_mq(q))
71 err = blk_mq_update_nr_requests(q, nr);
78 static ssize_t queue_ra_show(struct request_queue *q, char *page)
82 if (!q->disk)
84 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
89 queue_ra_store(struct request_queue *q, const char *page, size_t count)
94 if (!q->disk)
99 q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
103 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
105 int max_sectors_kb = queue_max_sectors(q) >> 1;
110 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
112 return queue_var_show(queue_max_segments(q), page);
115 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
118 return queue_var_show(queue_max_discard_segments(q), page);
121 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
123 return queue_var_show(q->limits.max_integrity_segments, page);
126 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
128 return queue_var_show(queue_max_segment_size(q), page);
131 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
133 return queue_var_show(queue_logical_block_size(q), page);
136 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
138 return queue_var_show(queue_physical_block_size(q), page);
141 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
143 return queue_var_show(q->limits.chunk_sectors, page);
146 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
148 return queue_var_show(queue_io_min(q), page);
151 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
153 return queue_var_show(queue_io_opt(q), page);
156 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
158 return queue_var_show(q->limits.discard_granularity, page);
161 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
165 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
168 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
171 (unsigned long long)q->limits.max_discard_sectors << 9);
174 static ssize_t queue_discard_max_store(struct request_queue *q,
183 if (max_discard & (q->limits.discard_granularity - 1))
190 if (max_discard > q->limits.max_hw_discard_sectors)
191 max_discard = q->limits.max_hw_discard_sectors;
193 q->limits.max_discard_sectors = max_discard;
197 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
202 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
207 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
210 (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
213 static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
216 return queue_var_show(queue_zone_write_granularity(q), page);
219 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
221 unsigned long long max_sectors = q->limits.max_zone_append_sectors;
227 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
231 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
240 q->limits.max_dev_sectors >> 1);
242 q->limits.max_user_sectors = 0;
249 q->limits.max_user_sectors = max_sectors_kb << 1;
252 spin_lock_irq(&q->queue_lock);
253 q->limits.max_sectors = max_sectors_kb << 1;
254 if (q->disk)
255 q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
256 spin_unlock_irq(&q->queue_lock);
261 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
263 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
268 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
270 return queue_var_show(q->limits.virt_boundary_mask, page);
273 static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
275 return queue_var_show(queue_dma_alignment(q), page);
280 queue_##name##_show(struct request_queue *q, char *page) \
283 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
287 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
298 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
300 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
310 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
312 switch (blk_queue_zoned_model(q)) {
322 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
324 return queue_var_show(disk_nr_zones(q->disk), page);
327 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
329 return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
332 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
334 return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
337 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
339 return queue_var_show((blk_queue_nomerges(q) << 1) |
340 blk_queue_noxmerges(q), page);
343 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
352 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
353 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
355 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
357 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
362 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
364 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
365 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
371 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
382 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
383 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
385 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
386 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
388 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
389 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
395 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
400 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
406 static ssize_t queue_poll_show(struct request_queue *q, char *page)
408 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
411 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
414 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
421 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
423 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
426 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
436 blk_queue_rq_timeout(q, msecs_to_jiffies(val));
441 static ssize_t queue_wc_show(struct request_queue *q, char *page)
443 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
449 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
453 if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags))
455 blk_queue_flag_set(QUEUE_FLAG_WC, q);
458 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
466 static ssize_t queue_fua_show(struct request_queue *q, char *page)
468 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
471 static ssize_t queue_dax_show(struct request_queue *q, char *page)
473 return queue_var_show(blk_queue_dax(q), page);
560 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
562 if (!wbt_rq_qos(q))
565 if (wbt_disabled(q))
568 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
571 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
584 rqos = wbt_rq_qos(q);
586 ret = wbt_init(q->disk);
592 val = wbt_default_latency_nsec(q);
596 if (wbt_get_min_lat(q) == val)
604 blk_mq_freeze_queue(q);
605 blk_mq_quiesce_queue(q);
607 wbt_set_min_lat(q, val);
609 blk_mq_unquiesce_queue(q);
610 blk_mq_unfreeze_queue(q);
677 struct request_queue *q = disk->queue;
681 !blk_queue_is_zoned(q))
691 struct request_queue *q = disk->queue;
693 if (!queue_is_mq(q))
696 if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
719 struct request_queue *q = disk->queue;
724 mutex_lock(&q->sysfs_lock);
725 res = entry->show(q, page);
726 mutex_unlock(&q->sysfs_lock);
736 struct request_queue *q = disk->queue;
742 mutex_lock(&q->sysfs_lock);
743 res = entry->store(q, page, length);
744 mutex_unlock(&q->sysfs_lock);
772 struct request_queue *q = disk->queue;
774 mutex_lock(&q->debugfs_mutex);
775 blk_trace_shutdown(q);
776 debugfs_remove_recursive(q->debugfs_dir);
777 q->debugfs_dir = NULL;
778 q->sched_debugfs_dir = NULL;
779 q->rqos_debugfs_dir = NULL;
780 mutex_unlock(&q->debugfs_mutex);
789 struct request_queue *q = disk->queue;
792 mutex_lock(&q->sysfs_dir_lock);
798 if (queue_is_mq(q)) {
803 mutex_lock(&q->sysfs_lock);
805 mutex_lock(&q->debugfs_mutex);
806 q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
807 if (queue_is_mq(q))
808 blk_mq_debugfs_register(q);
809 mutex_unlock(&q->debugfs_mutex);
815 if (q->elevator) {
816 ret = elv_register_queue(q, false);
825 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
831 if (q->elevator)
832 kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
833 mutex_unlock(&q->sysfs_lock);
834 mutex_unlock(&q->sysfs_dir_lock);
845 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
846 percpu_ref_switch_to_percpu(&q->q_usage_counter);
851 elv_unregister_queue(q);
856 mutex_unlock(&q->sysfs_lock);
859 mutex_unlock(&q->sysfs_dir_lock);
872 struct request_queue *q = disk->queue;
874 if (WARN_ON(!q))
878 if (!blk_queue_registered(q))
886 mutex_lock(&q->sysfs_lock);
887 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
888 mutex_unlock(&q->sysfs_lock);
890 mutex_lock(&q->sysfs_dir_lock);
895 if (queue_is_mq(q))
899 mutex_lock(&q->sysfs_lock);
900 elv_unregister_queue(q);
902 mutex_unlock(&q->sysfs_lock);
907 mutex_unlock(&q->sysfs_dir_lock);