1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to setting various queue properties from drivers 4 */ 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */ 11 #include <linux/gcd.h> 12 #include <linux/lcm.h> 13 #include <linux/jiffies.h> 14 #include <linux/gfp.h> 15 #include <linux/dma-mapping.h> 16 17 #include "blk.h" 18 #include "blk-wbt.h" 19 20 unsigned long blk_max_low_pfn; 21 EXPORT_SYMBOL(blk_max_low_pfn); 22 23 unsigned long blk_max_pfn; 24 25 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) 26 { 27 q->rq_timeout = timeout; 28 } 29 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); 30 31 /** 32 * blk_set_default_limits - reset limits to default values 33 * @lim: the queue_limits structure to reset 34 * 35 * Description: 36 * Returns a queue_limit struct to its default state. 37 */ 38 void blk_set_default_limits(struct queue_limits *lim) 39 { 40 lim->max_segments = BLK_MAX_SEGMENTS; 41 lim->max_discard_segments = 1; 42 lim->max_integrity_segments = 0; 43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 44 lim->virt_boundary_mask = 0; 45 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 46 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; 47 lim->max_dev_sectors = 0; 48 lim->chunk_sectors = 0; 49 lim->max_write_same_sectors = 0; 50 lim->max_write_zeroes_sectors = 0; 51 lim->max_zone_append_sectors = 0; 52 lim->max_discard_sectors = 0; 53 lim->max_hw_discard_sectors = 0; 54 lim->discard_granularity = 0; 55 lim->discard_alignment = 0; 56 lim->discard_misaligned = 0; 57 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 58 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); 59 lim->alignment_offset = 0; 60 lim->io_opt = 0; 61 lim->misaligned = 0; 62 lim->zoned = BLK_ZONED_NONE; 63 } 64 EXPORT_SYMBOL(blk_set_default_limits); 65 66 /** 67 * blk_set_stacking_limits - set default limits for stacking devices 68 * @lim: the queue_limits structure to reset 69 * 70 * Description: 71 * Returns a queue_limit struct to its default state. Should be used 72 * by stacking drivers like DM that have no internal limits. 73 */ 74 void blk_set_stacking_limits(struct queue_limits *lim) 75 { 76 blk_set_default_limits(lim); 77 78 /* Inherit limits from component devices */ 79 lim->max_segments = USHRT_MAX; 80 lim->max_discard_segments = USHRT_MAX; 81 lim->max_hw_sectors = UINT_MAX; 82 lim->max_segment_size = UINT_MAX; 83 lim->max_sectors = UINT_MAX; 84 lim->max_dev_sectors = UINT_MAX; 85 lim->max_write_same_sectors = UINT_MAX; 86 lim->max_write_zeroes_sectors = UINT_MAX; 87 lim->max_zone_append_sectors = UINT_MAX; 88 } 89 EXPORT_SYMBOL(blk_set_stacking_limits); 90 91 /** 92 * blk_queue_bounce_limit - set bounce buffer limit for queue 93 * @q: the request queue for the device 94 * @max_addr: the maximum address the device can handle 95 * 96 * Description: 97 * Different hardware can have different requirements as to what pages 98 * it can do I/O directly to. A low level driver can call 99 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 100 * buffers for doing I/O to pages residing above @max_addr. 101 **/ 102 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) 103 { 104 unsigned long b_pfn = max_addr >> PAGE_SHIFT; 105 int dma = 0; 106 107 q->bounce_gfp = GFP_NOIO; 108 #if BITS_PER_LONG == 64 109 /* 110 * Assume anything <= 4GB can be handled by IOMMU. Actually 111 * some IOMMUs can handle everything, but I don't know of a 112 * way to test this here. 113 */ 114 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 115 dma = 1; 116 q->limits.bounce_pfn = max(max_low_pfn, b_pfn); 117 #else 118 if (b_pfn < blk_max_low_pfn) 119 dma = 1; 120 q->limits.bounce_pfn = b_pfn; 121 #endif 122 if (dma) { 123 init_emergency_isa_pool(); 124 q->bounce_gfp = GFP_NOIO | GFP_DMA; 125 q->limits.bounce_pfn = b_pfn; 126 } 127 } 128 EXPORT_SYMBOL(blk_queue_bounce_limit); 129 130 /** 131 * blk_queue_max_hw_sectors - set max sectors for a request for this queue 132 * @q: the request queue for the device 133 * @max_hw_sectors: max hardware sectors in the usual 512b unit 134 * 135 * Description: 136 * Enables a low level driver to set a hard upper limit, 137 * max_hw_sectors, on the size of requests. max_hw_sectors is set by 138 * the device driver based upon the capabilities of the I/O 139 * controller. 140 * 141 * max_dev_sectors is a hard limit imposed by the storage device for 142 * READ/WRITE requests. It is set by the disk driver. 143 * 144 * max_sectors is a soft limit imposed by the block layer for 145 * filesystem type requests. This value can be overridden on a 146 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 147 * The soft limit can not exceed max_hw_sectors. 148 **/ 149 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 150 { 151 struct queue_limits *limits = &q->limits; 152 unsigned int max_sectors; 153 154 if ((max_hw_sectors << 9) < PAGE_SIZE) { 155 max_hw_sectors = 1 << (PAGE_SHIFT - 9); 156 printk(KERN_INFO "%s: set to minimum %d\n", 157 __func__, max_hw_sectors); 158 } 159 160 limits->max_hw_sectors = max_hw_sectors; 161 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); 162 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); 163 limits->max_sectors = max_sectors; 164 q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); 165 } 166 EXPORT_SYMBOL(blk_queue_max_hw_sectors); 167 168 /** 169 * blk_queue_chunk_sectors - set size of the chunk for this queue 170 * @q: the request queue for the device 171 * @chunk_sectors: chunk sectors in the usual 512b unit 172 * 173 * Description: 174 * If a driver doesn't want IOs to cross a given chunk size, it can set 175 * this limit and prevent merging across chunks. Note that the block layer 176 * must accept a page worth of data at any offset. So if the crossing of 177 * chunks is a hard limitation in the driver, it must still be prepared 178 * to split single page bios. 179 **/ 180 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) 181 { 182 q->limits.chunk_sectors = chunk_sectors; 183 } 184 EXPORT_SYMBOL(blk_queue_chunk_sectors); 185 186 /** 187 * blk_queue_max_discard_sectors - set max sectors for a single discard 188 * @q: the request queue for the device 189 * @max_discard_sectors: maximum number of sectors to discard 190 **/ 191 void blk_queue_max_discard_sectors(struct request_queue *q, 192 unsigned int max_discard_sectors) 193 { 194 q->limits.max_hw_discard_sectors = max_discard_sectors; 195 q->limits.max_discard_sectors = max_discard_sectors; 196 } 197 EXPORT_SYMBOL(blk_queue_max_discard_sectors); 198 199 /** 200 * blk_queue_max_write_same_sectors - set max sectors for a single write same 201 * @q: the request queue for the device 202 * @max_write_same_sectors: maximum number of sectors to write per command 203 **/ 204 void blk_queue_max_write_same_sectors(struct request_queue *q, 205 unsigned int max_write_same_sectors) 206 { 207 q->limits.max_write_same_sectors = max_write_same_sectors; 208 } 209 EXPORT_SYMBOL(blk_queue_max_write_same_sectors); 210 211 /** 212 * blk_queue_max_write_zeroes_sectors - set max sectors for a single 213 * write zeroes 214 * @q: the request queue for the device 215 * @max_write_zeroes_sectors: maximum number of sectors to write per command 216 **/ 217 void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 218 unsigned int max_write_zeroes_sectors) 219 { 220 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; 221 } 222 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors); 223 224 /** 225 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append 226 * @q: the request queue for the device 227 * @max_zone_append_sectors: maximum number of sectors to write per command 228 **/ 229 void blk_queue_max_zone_append_sectors(struct request_queue *q, 230 unsigned int max_zone_append_sectors) 231 { 232 unsigned int max_sectors; 233 234 if (WARN_ON(!blk_queue_is_zoned(q))) 235 return; 236 237 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); 238 max_sectors = min(q->limits.chunk_sectors, max_sectors); 239 240 /* 241 * Signal eventual driver bugs resulting in the max_zone_append sectors limit 242 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set, 243 * or the max_hw_sectors limit not set. 244 */ 245 WARN_ON(!max_sectors); 246 247 q->limits.max_zone_append_sectors = max_sectors; 248 } 249 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors); 250 251 /** 252 * blk_queue_max_segments - set max hw segments for a request for this queue 253 * @q: the request queue for the device 254 * @max_segments: max number of segments 255 * 256 * Description: 257 * Enables a low level driver to set an upper limit on the number of 258 * hw data segments in a request. 259 **/ 260 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) 261 { 262 if (!max_segments) { 263 max_segments = 1; 264 printk(KERN_INFO "%s: set to minimum %d\n", 265 __func__, max_segments); 266 } 267 268 q->limits.max_segments = max_segments; 269 } 270 EXPORT_SYMBOL(blk_queue_max_segments); 271 272 /** 273 * blk_queue_max_discard_segments - set max segments for discard requests 274 * @q: the request queue for the device 275 * @max_segments: max number of segments 276 * 277 * Description: 278 * Enables a low level driver to set an upper limit on the number of 279 * segments in a discard request. 280 **/ 281 void blk_queue_max_discard_segments(struct request_queue *q, 282 unsigned short max_segments) 283 { 284 q->limits.max_discard_segments = max_segments; 285 } 286 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments); 287 288 /** 289 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg 290 * @q: the request queue for the device 291 * @max_size: max size of segment in bytes 292 * 293 * Description: 294 * Enables a low level driver to set an upper limit on the size of a 295 * coalesced segment 296 **/ 297 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 298 { 299 if (max_size < PAGE_SIZE) { 300 max_size = PAGE_SIZE; 301 printk(KERN_INFO "%s: set to minimum %d\n", 302 __func__, max_size); 303 } 304 305 /* see blk_queue_virt_boundary() for the explanation */ 306 WARN_ON_ONCE(q->limits.virt_boundary_mask); 307 308 q->limits.max_segment_size = max_size; 309 } 310 EXPORT_SYMBOL(blk_queue_max_segment_size); 311 312 /** 313 * blk_queue_logical_block_size - set logical block size for the queue 314 * @q: the request queue for the device 315 * @size: the logical block size, in bytes 316 * 317 * Description: 318 * This should be set to the lowest possible block size that the 319 * storage device can address. The default of 512 covers most 320 * hardware. 321 **/ 322 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) 323 { 324 q->limits.logical_block_size = size; 325 326 if (q->limits.physical_block_size < size) 327 q->limits.physical_block_size = size; 328 329 if (q->limits.io_min < q->limits.physical_block_size) 330 q->limits.io_min = q->limits.physical_block_size; 331 } 332 EXPORT_SYMBOL(blk_queue_logical_block_size); 333 334 /** 335 * blk_queue_physical_block_size - set physical block size for the queue 336 * @q: the request queue for the device 337 * @size: the physical block size, in bytes 338 * 339 * Description: 340 * This should be set to the lowest possible sector size that the 341 * hardware can operate on without reverting to read-modify-write 342 * operations. 343 */ 344 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) 345 { 346 q->limits.physical_block_size = size; 347 348 if (q->limits.physical_block_size < q->limits.logical_block_size) 349 q->limits.physical_block_size = q->limits.logical_block_size; 350 351 if (q->limits.io_min < q->limits.physical_block_size) 352 q->limits.io_min = q->limits.physical_block_size; 353 } 354 EXPORT_SYMBOL(blk_queue_physical_block_size); 355 356 /** 357 * blk_queue_alignment_offset - set physical block alignment offset 358 * @q: the request queue for the device 359 * @offset: alignment offset in bytes 360 * 361 * Description: 362 * Some devices are naturally misaligned to compensate for things like 363 * the legacy DOS partition table 63-sector offset. Low-level drivers 364 * should call this function for devices whose first sector is not 365 * naturally aligned. 366 */ 367 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) 368 { 369 q->limits.alignment_offset = 370 offset & (q->limits.physical_block_size - 1); 371 q->limits.misaligned = 0; 372 } 373 EXPORT_SYMBOL(blk_queue_alignment_offset); 374 375 void blk_queue_update_readahead(struct request_queue *q) 376 { 377 /* 378 * For read-ahead of large files to be effective, we need to read ahead 379 * at least twice the optimal I/O size. 380 */ 381 q->backing_dev_info->ra_pages = 382 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); 383 q->backing_dev_info->io_pages = 384 queue_max_sectors(q) >> (PAGE_SHIFT - 9); 385 } 386 EXPORT_SYMBOL_GPL(blk_queue_update_readahead); 387 388 /** 389 * blk_limits_io_min - set minimum request size for a device 390 * @limits: the queue limits 391 * @min: smallest I/O size in bytes 392 * 393 * Description: 394 * Some devices have an internal block size bigger than the reported 395 * hardware sector size. This function can be used to signal the 396 * smallest I/O the device can perform without incurring a performance 397 * penalty. 398 */ 399 void blk_limits_io_min(struct queue_limits *limits, unsigned int min) 400 { 401 limits->io_min = min; 402 403 if (limits->io_min < limits->logical_block_size) 404 limits->io_min = limits->logical_block_size; 405 406 if (limits->io_min < limits->physical_block_size) 407 limits->io_min = limits->physical_block_size; 408 } 409 EXPORT_SYMBOL(blk_limits_io_min); 410 411 /** 412 * blk_queue_io_min - set minimum request size for the queue 413 * @q: the request queue for the device 414 * @min: smallest I/O size in bytes 415 * 416 * Description: 417 * Storage devices may report a granularity or preferred minimum I/O 418 * size which is the smallest request the device can perform without 419 * incurring a performance penalty. For disk drives this is often the 420 * physical block size. For RAID arrays it is often the stripe chunk 421 * size. A properly aligned multiple of minimum_io_size is the 422 * preferred request size for workloads where a high number of I/O 423 * operations is desired. 424 */ 425 void blk_queue_io_min(struct request_queue *q, unsigned int min) 426 { 427 blk_limits_io_min(&q->limits, min); 428 } 429 EXPORT_SYMBOL(blk_queue_io_min); 430 431 /** 432 * blk_limits_io_opt - set optimal request size for a device 433 * @limits: the queue limits 434 * @opt: smallest I/O size in bytes 435 * 436 * Description: 437 * Storage devices may report an optimal I/O size, which is the 438 * device's preferred unit for sustained I/O. This is rarely reported 439 * for disk drives. For RAID arrays it is usually the stripe width or 440 * the internal track size. A properly aligned multiple of 441 * optimal_io_size is the preferred request size for workloads where 442 * sustained throughput is desired. 443 */ 444 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) 445 { 446 limits->io_opt = opt; 447 } 448 EXPORT_SYMBOL(blk_limits_io_opt); 449 450 /** 451 * blk_queue_io_opt - set optimal request size for the queue 452 * @q: the request queue for the device 453 * @opt: optimal request size in bytes 454 * 455 * Description: 456 * Storage devices may report an optimal I/O size, which is the 457 * device's preferred unit for sustained I/O. This is rarely reported 458 * for disk drives. For RAID arrays it is usually the stripe width or 459 * the internal track size. A properly aligned multiple of 460 * optimal_io_size is the preferred request size for workloads where 461 * sustained throughput is desired. 462 */ 463 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 464 { 465 blk_limits_io_opt(&q->limits, opt); 466 q->backing_dev_info->ra_pages = 467 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); 468 } 469 EXPORT_SYMBOL(blk_queue_io_opt); 470 471 /** 472 * blk_stack_limits - adjust queue_limits for stacked devices 473 * @t: the stacking driver limits (top device) 474 * @b: the underlying queue limits (bottom, component device) 475 * @start: first data sector within component device 476 * 477 * Description: 478 * This function is used by stacking drivers like MD and DM to ensure 479 * that all component devices have compatible block sizes and 480 * alignments. The stacking driver must provide a queue_limits 481 * struct (top) and then iteratively call the stacking function for 482 * all component (bottom) devices. The stacking function will 483 * attempt to combine the values and ensure proper alignment. 484 * 485 * Returns 0 if the top and bottom queue_limits are compatible. The 486 * top device's block sizes and alignment offsets may be adjusted to 487 * ensure alignment with the bottom device. If no compatible sizes 488 * and alignments exist, -1 is returned and the resulting top 489 * queue_limits will have the misaligned flag set to indicate that 490 * the alignment_offset is undefined. 491 */ 492 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 493 sector_t start) 494 { 495 unsigned int top, bottom, alignment, ret = 0; 496 497 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 498 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 499 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); 500 t->max_write_same_sectors = min(t->max_write_same_sectors, 501 b->max_write_same_sectors); 502 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, 503 b->max_write_zeroes_sectors); 504 t->max_zone_append_sectors = min(t->max_zone_append_sectors, 505 b->max_zone_append_sectors); 506 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); 507 508 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 509 b->seg_boundary_mask); 510 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, 511 b->virt_boundary_mask); 512 513 t->max_segments = min_not_zero(t->max_segments, b->max_segments); 514 t->max_discard_segments = min_not_zero(t->max_discard_segments, 515 b->max_discard_segments); 516 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, 517 b->max_integrity_segments); 518 519 t->max_segment_size = min_not_zero(t->max_segment_size, 520 b->max_segment_size); 521 522 t->misaligned |= b->misaligned; 523 524 alignment = queue_limit_alignment_offset(b, start); 525 526 /* Bottom device has different alignment. Check that it is 527 * compatible with the current top alignment. 528 */ 529 if (t->alignment_offset != alignment) { 530 531 top = max(t->physical_block_size, t->io_min) 532 + t->alignment_offset; 533 bottom = max(b->physical_block_size, b->io_min) + alignment; 534 535 /* Verify that top and bottom intervals line up */ 536 if (max(top, bottom) % min(top, bottom)) { 537 t->misaligned = 1; 538 ret = -1; 539 } 540 } 541 542 t->logical_block_size = max(t->logical_block_size, 543 b->logical_block_size); 544 545 t->physical_block_size = max(t->physical_block_size, 546 b->physical_block_size); 547 548 t->io_min = max(t->io_min, b->io_min); 549 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); 550 t->chunk_sectors = lcm_not_zero(t->chunk_sectors, b->chunk_sectors); 551 552 /* Physical block size a multiple of the logical block size? */ 553 if (t->physical_block_size & (t->logical_block_size - 1)) { 554 t->physical_block_size = t->logical_block_size; 555 t->misaligned = 1; 556 ret = -1; 557 } 558 559 /* Minimum I/O a multiple of the physical block size? */ 560 if (t->io_min & (t->physical_block_size - 1)) { 561 t->io_min = t->physical_block_size; 562 t->misaligned = 1; 563 ret = -1; 564 } 565 566 /* Optimal I/O a multiple of the physical block size? */ 567 if (t->io_opt & (t->physical_block_size - 1)) { 568 t->io_opt = 0; 569 t->misaligned = 1; 570 ret = -1; 571 } 572 573 /* chunk_sectors a multiple of the physical block size? */ 574 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { 575 t->chunk_sectors = 0; 576 t->misaligned = 1; 577 ret = -1; 578 } 579 580 t->raid_partial_stripes_expensive = 581 max(t->raid_partial_stripes_expensive, 582 b->raid_partial_stripes_expensive); 583 584 /* Find lowest common alignment_offset */ 585 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) 586 % max(t->physical_block_size, t->io_min); 587 588 /* Verify that new alignment_offset is on a logical block boundary */ 589 if (t->alignment_offset & (t->logical_block_size - 1)) { 590 t->misaligned = 1; 591 ret = -1; 592 } 593 594 /* Discard alignment and granularity */ 595 if (b->discard_granularity) { 596 alignment = queue_limit_discard_alignment(b, start); 597 598 if (t->discard_granularity != 0 && 599 t->discard_alignment != alignment) { 600 top = t->discard_granularity + t->discard_alignment; 601 bottom = b->discard_granularity + alignment; 602 603 /* Verify that top and bottom intervals line up */ 604 if ((max(top, bottom) % min(top, bottom)) != 0) 605 t->discard_misaligned = 1; 606 } 607 608 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, 609 b->max_discard_sectors); 610 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, 611 b->max_hw_discard_sectors); 612 t->discard_granularity = max(t->discard_granularity, 613 b->discard_granularity); 614 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % 615 t->discard_granularity; 616 } 617 618 t->zoned = max(t->zoned, b->zoned); 619 return ret; 620 } 621 EXPORT_SYMBOL(blk_stack_limits); 622 623 /** 624 * disk_stack_limits - adjust queue limits for stacked drivers 625 * @disk: MD/DM gendisk (top) 626 * @bdev: the underlying block device (bottom) 627 * @offset: offset to beginning of data within component device 628 * 629 * Description: 630 * Merges the limits for a top level gendisk and a bottom level 631 * block_device. 632 */ 633 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 634 sector_t offset) 635 { 636 struct request_queue *t = disk->queue; 637 638 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, 639 get_start_sect(bdev) + (offset >> 9)) < 0) { 640 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; 641 642 disk_name(disk, 0, top); 643 bdevname(bdev, bottom); 644 645 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 646 top, bottom); 647 } 648 649 blk_queue_update_readahead(disk->queue); 650 } 651 EXPORT_SYMBOL(disk_stack_limits); 652 653 /** 654 * blk_queue_update_dma_pad - update pad mask 655 * @q: the request queue for the device 656 * @mask: pad mask 657 * 658 * Update dma pad mask. 659 * 660 * Appending pad buffer to a request modifies the last entry of a 661 * scatter list such that it includes the pad buffer. 662 **/ 663 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) 664 { 665 if (mask > q->dma_pad_mask) 666 q->dma_pad_mask = mask; 667 } 668 EXPORT_SYMBOL(blk_queue_update_dma_pad); 669 670 /** 671 * blk_queue_segment_boundary - set boundary rules for segment merging 672 * @q: the request queue for the device 673 * @mask: the memory boundary mask 674 **/ 675 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 676 { 677 if (mask < PAGE_SIZE - 1) { 678 mask = PAGE_SIZE - 1; 679 printk(KERN_INFO "%s: set to minimum %lx\n", 680 __func__, mask); 681 } 682 683 q->limits.seg_boundary_mask = mask; 684 } 685 EXPORT_SYMBOL(blk_queue_segment_boundary); 686 687 /** 688 * blk_queue_virt_boundary - set boundary rules for bio merging 689 * @q: the request queue for the device 690 * @mask: the memory boundary mask 691 **/ 692 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) 693 { 694 q->limits.virt_boundary_mask = mask; 695 696 /* 697 * Devices that require a virtual boundary do not support scatter/gather 698 * I/O natively, but instead require a descriptor list entry for each 699 * page (which might not be idential to the Linux PAGE_SIZE). Because 700 * of that they are not limited by our notion of "segment size". 701 */ 702 if (mask) 703 q->limits.max_segment_size = UINT_MAX; 704 } 705 EXPORT_SYMBOL(blk_queue_virt_boundary); 706 707 /** 708 * blk_queue_dma_alignment - set dma length and memory alignment 709 * @q: the request queue for the device 710 * @mask: alignment mask 711 * 712 * description: 713 * set required memory and length alignment for direct dma transactions. 714 * this is used when building direct io requests for the queue. 715 * 716 **/ 717 void blk_queue_dma_alignment(struct request_queue *q, int mask) 718 { 719 q->dma_alignment = mask; 720 } 721 EXPORT_SYMBOL(blk_queue_dma_alignment); 722 723 /** 724 * blk_queue_update_dma_alignment - update dma length and memory alignment 725 * @q: the request queue for the device 726 * @mask: alignment mask 727 * 728 * description: 729 * update required memory and length alignment for direct dma transactions. 730 * If the requested alignment is larger than the current alignment, then 731 * the current queue alignment is updated to the new value, otherwise it 732 * is left alone. The design of this is to allow multiple objects 733 * (driver, device, transport etc) to set their respective 734 * alignments without having them interfere. 735 * 736 **/ 737 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) 738 { 739 BUG_ON(mask > PAGE_SIZE); 740 741 if (mask > q->dma_alignment) 742 q->dma_alignment = mask; 743 } 744 EXPORT_SYMBOL(blk_queue_update_dma_alignment); 745 746 /** 747 * blk_set_queue_depth - tell the block layer about the device queue depth 748 * @q: the request queue for the device 749 * @depth: queue depth 750 * 751 */ 752 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) 753 { 754 q->queue_depth = depth; 755 rq_qos_queue_depth_changed(q); 756 } 757 EXPORT_SYMBOL(blk_set_queue_depth); 758 759 /** 760 * blk_queue_write_cache - configure queue's write cache 761 * @q: the request queue for the device 762 * @wc: write back cache on or off 763 * @fua: device supports FUA writes, if true 764 * 765 * Tell the block layer about the write cache of @q. 766 */ 767 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) 768 { 769 if (wc) 770 blk_queue_flag_set(QUEUE_FLAG_WC, q); 771 else 772 blk_queue_flag_clear(QUEUE_FLAG_WC, q); 773 if (fua) 774 blk_queue_flag_set(QUEUE_FLAG_FUA, q); 775 else 776 blk_queue_flag_clear(QUEUE_FLAG_FUA, q); 777 778 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); 779 } 780 EXPORT_SYMBOL_GPL(blk_queue_write_cache); 781 782 /** 783 * blk_queue_required_elevator_features - Set a queue required elevator features 784 * @q: the request queue for the target device 785 * @features: Required elevator features OR'ed together 786 * 787 * Tell the block layer that for the device controlled through @q, only the 788 * only elevators that can be used are those that implement at least the set of 789 * features specified by @features. 790 */ 791 void blk_queue_required_elevator_features(struct request_queue *q, 792 unsigned int features) 793 { 794 q->required_elevator_features = features; 795 } 796 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features); 797 798 /** 799 * blk_queue_can_use_dma_map_merging - configure queue for merging segments. 800 * @q: the request queue for the device 801 * @dev: the device pointer for dma 802 * 803 * Tell the block layer about merging the segments by dma map of @q. 804 */ 805 bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 806 struct device *dev) 807 { 808 unsigned long boundary = dma_get_merge_boundary(dev); 809 810 if (!boundary) 811 return false; 812 813 /* No need to update max_segment_size. see blk_queue_virt_boundary() */ 814 blk_queue_virt_boundary(q, boundary); 815 816 return true; 817 } 818 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging); 819 820 /** 821 * blk_queue_set_zoned - configure a disk queue zoned model. 822 * @disk: the gendisk of the queue to configure 823 * @model: the zoned model to set 824 * 825 * Set the zoned model of the request queue of @disk according to @model. 826 * When @model is BLK_ZONED_HM (host managed), this should be called only 827 * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option). 828 * If @model specifies BLK_ZONED_HA (host aware), the effective model used 829 * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions 830 * on the disk. 831 */ 832 void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model) 833 { 834 switch (model) { 835 case BLK_ZONED_HM: 836 /* 837 * Host managed devices are supported only if 838 * CONFIG_BLK_DEV_ZONED is enabled. 839 */ 840 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)); 841 break; 842 case BLK_ZONED_HA: 843 /* 844 * Host aware devices can be treated either as regular block 845 * devices (similar to drive managed devices) or as zoned block 846 * devices to take advantage of the zone command set, similarly 847 * to host managed devices. We try the latter if there are no 848 * partitions and zoned block device support is enabled, else 849 * we do nothing special as far as the block layer is concerned. 850 */ 851 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || 852 disk_has_partitions(disk)) 853 model = BLK_ZONED_NONE; 854 break; 855 case BLK_ZONED_NONE: 856 default: 857 if (WARN_ON_ONCE(model != BLK_ZONED_NONE)) 858 model = BLK_ZONED_NONE; 859 break; 860 } 861 862 disk->queue->limits.zoned = model; 863 } 864 EXPORT_SYMBOL_GPL(blk_queue_set_zoned); 865 866 static int __init blk_settings_init(void) 867 { 868 blk_max_low_pfn = max_low_pfn - 1; 869 blk_max_pfn = max_pfn - 1; 870 return 0; 871 } 872 subsys_initcall(blk_settings_init); 873