1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to setting various queue properties from drivers 4 */ 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/pagemap.h> 11 #include <linux/gcd.h> 12 #include <linux/lcm.h> 13 #include <linux/jiffies.h> 14 #include <linux/gfp.h> 15 #include <linux/dma-mapping.h> 16 17 #include "blk.h" 18 #include "blk-wbt.h" 19 20 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) 21 { 22 q->rq_timeout = timeout; 23 } 24 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); 25 26 /** 27 * blk_set_default_limits - reset limits to default values 28 * @lim: the queue_limits structure to reset 29 * 30 * Description: 31 * Returns a queue_limit struct to its default state. 32 */ 33 void blk_set_default_limits(struct queue_limits *lim) 34 { 35 lim->max_segments = BLK_MAX_SEGMENTS; 36 lim->max_discard_segments = 1; 37 lim->max_integrity_segments = 0; 38 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 39 lim->virt_boundary_mask = 0; 40 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 41 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; 42 lim->max_dev_sectors = 0; 43 lim->chunk_sectors = 0; 44 lim->max_write_same_sectors = 0; 45 lim->max_write_zeroes_sectors = 0; 46 lim->max_zone_append_sectors = 0; 47 lim->max_discard_sectors = 0; 48 lim->max_hw_discard_sectors = 0; 49 lim->discard_granularity = 0; 50 lim->discard_alignment = 0; 51 lim->discard_misaligned = 0; 52 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 53 lim->bounce = BLK_BOUNCE_NONE; 54 lim->alignment_offset = 0; 55 lim->io_opt = 0; 56 lim->misaligned = 0; 57 lim->zoned = BLK_ZONED_NONE; 58 lim->zone_write_granularity = 0; 59 } 60 EXPORT_SYMBOL(blk_set_default_limits); 61 62 /** 63 * blk_set_stacking_limits - set default limits for stacking devices 64 * @lim: the queue_limits structure to reset 65 * 66 * Description: 67 * Returns a queue_limit struct to its default state. Should be used 68 * by stacking drivers like DM that have no internal limits. 69 */ 70 void blk_set_stacking_limits(struct queue_limits *lim) 71 { 72 blk_set_default_limits(lim); 73 74 /* Inherit limits from component devices */ 75 lim->max_segments = USHRT_MAX; 76 lim->max_discard_segments = USHRT_MAX; 77 lim->max_hw_sectors = UINT_MAX; 78 lim->max_segment_size = UINT_MAX; 79 lim->max_sectors = UINT_MAX; 80 lim->max_dev_sectors = UINT_MAX; 81 lim->max_write_same_sectors = UINT_MAX; 82 lim->max_write_zeroes_sectors = UINT_MAX; 83 lim->max_zone_append_sectors = UINT_MAX; 84 } 85 EXPORT_SYMBOL(blk_set_stacking_limits); 86 87 /** 88 * blk_queue_bounce_limit - set bounce buffer limit for queue 89 * @q: the request queue for the device 90 * @bounce: bounce limit to enforce 91 * 92 * Description: 93 * Force bouncing for ISA DMA ranges or highmem. 94 * 95 * DEPRECATED, don't use in new code. 96 **/ 97 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) 98 { 99 q->limits.bounce = bounce; 100 } 101 EXPORT_SYMBOL(blk_queue_bounce_limit); 102 103 /** 104 * blk_queue_max_hw_sectors - set max sectors for a request for this queue 105 * @q: the request queue for the device 106 * @max_hw_sectors: max hardware sectors in the usual 512b unit 107 * 108 * Description: 109 * Enables a low level driver to set a hard upper limit, 110 * max_hw_sectors, on the size of requests. max_hw_sectors is set by 111 * the device driver based upon the capabilities of the I/O 112 * controller. 113 * 114 * max_dev_sectors is a hard limit imposed by the storage device for 115 * READ/WRITE requests. It is set by the disk driver. 116 * 117 * max_sectors is a soft limit imposed by the block layer for 118 * filesystem type requests. This value can be overridden on a 119 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 120 * The soft limit can not exceed max_hw_sectors. 121 **/ 122 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 123 { 124 struct queue_limits *limits = &q->limits; 125 unsigned int max_sectors; 126 127 if ((max_hw_sectors << 9) < PAGE_SIZE) { 128 max_hw_sectors = 1 << (PAGE_SHIFT - 9); 129 printk(KERN_INFO "%s: set to minimum %d\n", 130 __func__, max_hw_sectors); 131 } 132 133 max_hw_sectors = round_down(max_hw_sectors, 134 limits->logical_block_size >> SECTOR_SHIFT); 135 limits->max_hw_sectors = max_hw_sectors; 136 137 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); 138 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); 139 max_sectors = round_down(max_sectors, 140 limits->logical_block_size >> SECTOR_SHIFT); 141 limits->max_sectors = max_sectors; 142 143 q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); 144 } 145 EXPORT_SYMBOL(blk_queue_max_hw_sectors); 146 147 /** 148 * blk_queue_chunk_sectors - set size of the chunk for this queue 149 * @q: the request queue for the device 150 * @chunk_sectors: chunk sectors in the usual 512b unit 151 * 152 * Description: 153 * If a driver doesn't want IOs to cross a given chunk size, it can set 154 * this limit and prevent merging across chunks. Note that the block layer 155 * must accept a page worth of data at any offset. So if the crossing of 156 * chunks is a hard limitation in the driver, it must still be prepared 157 * to split single page bios. 158 **/ 159 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) 160 { 161 q->limits.chunk_sectors = chunk_sectors; 162 } 163 EXPORT_SYMBOL(blk_queue_chunk_sectors); 164 165 /** 166 * blk_queue_max_discard_sectors - set max sectors for a single discard 167 * @q: the request queue for the device 168 * @max_discard_sectors: maximum number of sectors to discard 169 **/ 170 void blk_queue_max_discard_sectors(struct request_queue *q, 171 unsigned int max_discard_sectors) 172 { 173 q->limits.max_hw_discard_sectors = max_discard_sectors; 174 q->limits.max_discard_sectors = max_discard_sectors; 175 } 176 EXPORT_SYMBOL(blk_queue_max_discard_sectors); 177 178 /** 179 * blk_queue_max_write_same_sectors - set max sectors for a single write same 180 * @q: the request queue for the device 181 * @max_write_same_sectors: maximum number of sectors to write per command 182 **/ 183 void blk_queue_max_write_same_sectors(struct request_queue *q, 184 unsigned int max_write_same_sectors) 185 { 186 q->limits.max_write_same_sectors = max_write_same_sectors; 187 } 188 EXPORT_SYMBOL(blk_queue_max_write_same_sectors); 189 190 /** 191 * blk_queue_max_write_zeroes_sectors - set max sectors for a single 192 * write zeroes 193 * @q: the request queue for the device 194 * @max_write_zeroes_sectors: maximum number of sectors to write per command 195 **/ 196 void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 197 unsigned int max_write_zeroes_sectors) 198 { 199 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; 200 } 201 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors); 202 203 /** 204 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append 205 * @q: the request queue for the device 206 * @max_zone_append_sectors: maximum number of sectors to write per command 207 **/ 208 void blk_queue_max_zone_append_sectors(struct request_queue *q, 209 unsigned int max_zone_append_sectors) 210 { 211 unsigned int max_sectors; 212 213 if (WARN_ON(!blk_queue_is_zoned(q))) 214 return; 215 216 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); 217 max_sectors = min(q->limits.chunk_sectors, max_sectors); 218 219 /* 220 * Signal eventual driver bugs resulting in the max_zone_append sectors limit 221 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set, 222 * or the max_hw_sectors limit not set. 223 */ 224 WARN_ON(!max_sectors); 225 226 q->limits.max_zone_append_sectors = max_sectors; 227 } 228 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors); 229 230 /** 231 * blk_queue_max_segments - set max hw segments for a request for this queue 232 * @q: the request queue for the device 233 * @max_segments: max number of segments 234 * 235 * Description: 236 * Enables a low level driver to set an upper limit on the number of 237 * hw data segments in a request. 238 **/ 239 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) 240 { 241 if (!max_segments) { 242 max_segments = 1; 243 printk(KERN_INFO "%s: set to minimum %d\n", 244 __func__, max_segments); 245 } 246 247 q->limits.max_segments = max_segments; 248 } 249 EXPORT_SYMBOL(blk_queue_max_segments); 250 251 /** 252 * blk_queue_max_discard_segments - set max segments for discard requests 253 * @q: the request queue for the device 254 * @max_segments: max number of segments 255 * 256 * Description: 257 * Enables a low level driver to set an upper limit on the number of 258 * segments in a discard request. 259 **/ 260 void blk_queue_max_discard_segments(struct request_queue *q, 261 unsigned short max_segments) 262 { 263 q->limits.max_discard_segments = max_segments; 264 } 265 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments); 266 267 /** 268 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg 269 * @q: the request queue for the device 270 * @max_size: max size of segment in bytes 271 * 272 * Description: 273 * Enables a low level driver to set an upper limit on the size of a 274 * coalesced segment 275 **/ 276 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 277 { 278 if (max_size < PAGE_SIZE) { 279 max_size = PAGE_SIZE; 280 printk(KERN_INFO "%s: set to minimum %d\n", 281 __func__, max_size); 282 } 283 284 /* see blk_queue_virt_boundary() for the explanation */ 285 WARN_ON_ONCE(q->limits.virt_boundary_mask); 286 287 q->limits.max_segment_size = max_size; 288 } 289 EXPORT_SYMBOL(blk_queue_max_segment_size); 290 291 /** 292 * blk_queue_logical_block_size - set logical block size for the queue 293 * @q: the request queue for the device 294 * @size: the logical block size, in bytes 295 * 296 * Description: 297 * This should be set to the lowest possible block size that the 298 * storage device can address. The default of 512 covers most 299 * hardware. 300 **/ 301 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) 302 { 303 struct queue_limits *limits = &q->limits; 304 305 limits->logical_block_size = size; 306 307 if (limits->physical_block_size < size) 308 limits->physical_block_size = size; 309 310 if (limits->io_min < limits->physical_block_size) 311 limits->io_min = limits->physical_block_size; 312 313 limits->max_hw_sectors = 314 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT); 315 limits->max_sectors = 316 round_down(limits->max_sectors, size >> SECTOR_SHIFT); 317 } 318 EXPORT_SYMBOL(blk_queue_logical_block_size); 319 320 /** 321 * blk_queue_physical_block_size - set physical block size for the queue 322 * @q: the request queue for the device 323 * @size: the physical block size, in bytes 324 * 325 * Description: 326 * This should be set to the lowest possible sector size that the 327 * hardware can operate on without reverting to read-modify-write 328 * operations. 329 */ 330 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) 331 { 332 q->limits.physical_block_size = size; 333 334 if (q->limits.physical_block_size < q->limits.logical_block_size) 335 q->limits.physical_block_size = q->limits.logical_block_size; 336 337 if (q->limits.io_min < q->limits.physical_block_size) 338 q->limits.io_min = q->limits.physical_block_size; 339 } 340 EXPORT_SYMBOL(blk_queue_physical_block_size); 341 342 /** 343 * blk_queue_zone_write_granularity - set zone write granularity for the queue 344 * @q: the request queue for the zoned device 345 * @size: the zone write granularity size, in bytes 346 * 347 * Description: 348 * This should be set to the lowest possible size allowing to write in 349 * sequential zones of a zoned block device. 350 */ 351 void blk_queue_zone_write_granularity(struct request_queue *q, 352 unsigned int size) 353 { 354 if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) 355 return; 356 357 q->limits.zone_write_granularity = size; 358 359 if (q->limits.zone_write_granularity < q->limits.logical_block_size) 360 q->limits.zone_write_granularity = q->limits.logical_block_size; 361 } 362 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity); 363 364 /** 365 * blk_queue_alignment_offset - set physical block alignment offset 366 * @q: the request queue for the device 367 * @offset: alignment offset in bytes 368 * 369 * Description: 370 * Some devices are naturally misaligned to compensate for things like 371 * the legacy DOS partition table 63-sector offset. Low-level drivers 372 * should call this function for devices whose first sector is not 373 * naturally aligned. 374 */ 375 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) 376 { 377 q->limits.alignment_offset = 378 offset & (q->limits.physical_block_size - 1); 379 q->limits.misaligned = 0; 380 } 381 EXPORT_SYMBOL(blk_queue_alignment_offset); 382 383 void blk_queue_update_readahead(struct request_queue *q) 384 { 385 /* 386 * For read-ahead of large files to be effective, we need to read ahead 387 * at least twice the optimal I/O size. 388 */ 389 q->backing_dev_info->ra_pages = 390 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); 391 q->backing_dev_info->io_pages = 392 queue_max_sectors(q) >> (PAGE_SHIFT - 9); 393 } 394 EXPORT_SYMBOL_GPL(blk_queue_update_readahead); 395 396 /** 397 * blk_limits_io_min - set minimum request size for a device 398 * @limits: the queue limits 399 * @min: smallest I/O size in bytes 400 * 401 * Description: 402 * Some devices have an internal block size bigger than the reported 403 * hardware sector size. This function can be used to signal the 404 * smallest I/O the device can perform without incurring a performance 405 * penalty. 406 */ 407 void blk_limits_io_min(struct queue_limits *limits, unsigned int min) 408 { 409 limits->io_min = min; 410 411 if (limits->io_min < limits->logical_block_size) 412 limits->io_min = limits->logical_block_size; 413 414 if (limits->io_min < limits->physical_block_size) 415 limits->io_min = limits->physical_block_size; 416 } 417 EXPORT_SYMBOL(blk_limits_io_min); 418 419 /** 420 * blk_queue_io_min - set minimum request size for the queue 421 * @q: the request queue for the device 422 * @min: smallest I/O size in bytes 423 * 424 * Description: 425 * Storage devices may report a granularity or preferred minimum I/O 426 * size which is the smallest request the device can perform without 427 * incurring a performance penalty. For disk drives this is often the 428 * physical block size. For RAID arrays it is often the stripe chunk 429 * size. A properly aligned multiple of minimum_io_size is the 430 * preferred request size for workloads where a high number of I/O 431 * operations is desired. 432 */ 433 void blk_queue_io_min(struct request_queue *q, unsigned int min) 434 { 435 blk_limits_io_min(&q->limits, min); 436 } 437 EXPORT_SYMBOL(blk_queue_io_min); 438 439 /** 440 * blk_limits_io_opt - set optimal request size for a device 441 * @limits: the queue limits 442 * @opt: smallest I/O size in bytes 443 * 444 * Description: 445 * Storage devices may report an optimal I/O size, which is the 446 * device's preferred unit for sustained I/O. This is rarely reported 447 * for disk drives. For RAID arrays it is usually the stripe width or 448 * the internal track size. A properly aligned multiple of 449 * optimal_io_size is the preferred request size for workloads where 450 * sustained throughput is desired. 451 */ 452 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) 453 { 454 limits->io_opt = opt; 455 } 456 EXPORT_SYMBOL(blk_limits_io_opt); 457 458 /** 459 * blk_queue_io_opt - set optimal request size for the queue 460 * @q: the request queue for the device 461 * @opt: optimal request size in bytes 462 * 463 * Description: 464 * Storage devices may report an optimal I/O size, which is the 465 * device's preferred unit for sustained I/O. This is rarely reported 466 * for disk drives. For RAID arrays it is usually the stripe width or 467 * the internal track size. A properly aligned multiple of 468 * optimal_io_size is the preferred request size for workloads where 469 * sustained throughput is desired. 470 */ 471 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 472 { 473 blk_limits_io_opt(&q->limits, opt); 474 q->backing_dev_info->ra_pages = 475 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); 476 } 477 EXPORT_SYMBOL(blk_queue_io_opt); 478 479 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs) 480 { 481 sectors = round_down(sectors, lbs >> SECTOR_SHIFT); 482 if (sectors < PAGE_SIZE >> SECTOR_SHIFT) 483 sectors = PAGE_SIZE >> SECTOR_SHIFT; 484 return sectors; 485 } 486 487 /** 488 * blk_stack_limits - adjust queue_limits for stacked devices 489 * @t: the stacking driver limits (top device) 490 * @b: the underlying queue limits (bottom, component device) 491 * @start: first data sector within component device 492 * 493 * Description: 494 * This function is used by stacking drivers like MD and DM to ensure 495 * that all component devices have compatible block sizes and 496 * alignments. The stacking driver must provide a queue_limits 497 * struct (top) and then iteratively call the stacking function for 498 * all component (bottom) devices. The stacking function will 499 * attempt to combine the values and ensure proper alignment. 500 * 501 * Returns 0 if the top and bottom queue_limits are compatible. The 502 * top device's block sizes and alignment offsets may be adjusted to 503 * ensure alignment with the bottom device. If no compatible sizes 504 * and alignments exist, -1 is returned and the resulting top 505 * queue_limits will have the misaligned flag set to indicate that 506 * the alignment_offset is undefined. 507 */ 508 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 509 sector_t start) 510 { 511 unsigned int top, bottom, alignment, ret = 0; 512 513 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 514 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 515 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); 516 t->max_write_same_sectors = min(t->max_write_same_sectors, 517 b->max_write_same_sectors); 518 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, 519 b->max_write_zeroes_sectors); 520 t->max_zone_append_sectors = min(t->max_zone_append_sectors, 521 b->max_zone_append_sectors); 522 t->bounce = max(t->bounce, b->bounce); 523 524 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 525 b->seg_boundary_mask); 526 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, 527 b->virt_boundary_mask); 528 529 t->max_segments = min_not_zero(t->max_segments, b->max_segments); 530 t->max_discard_segments = min_not_zero(t->max_discard_segments, 531 b->max_discard_segments); 532 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, 533 b->max_integrity_segments); 534 535 t->max_segment_size = min_not_zero(t->max_segment_size, 536 b->max_segment_size); 537 538 t->misaligned |= b->misaligned; 539 540 alignment = queue_limit_alignment_offset(b, start); 541 542 /* Bottom device has different alignment. Check that it is 543 * compatible with the current top alignment. 544 */ 545 if (t->alignment_offset != alignment) { 546 547 top = max(t->physical_block_size, t->io_min) 548 + t->alignment_offset; 549 bottom = max(b->physical_block_size, b->io_min) + alignment; 550 551 /* Verify that top and bottom intervals line up */ 552 if (max(top, bottom) % min(top, bottom)) { 553 t->misaligned = 1; 554 ret = -1; 555 } 556 } 557 558 t->logical_block_size = max(t->logical_block_size, 559 b->logical_block_size); 560 561 t->physical_block_size = max(t->physical_block_size, 562 b->physical_block_size); 563 564 t->io_min = max(t->io_min, b->io_min); 565 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); 566 567 /* Set non-power-of-2 compatible chunk_sectors boundary */ 568 if (b->chunk_sectors) 569 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); 570 571 /* Physical block size a multiple of the logical block size? */ 572 if (t->physical_block_size & (t->logical_block_size - 1)) { 573 t->physical_block_size = t->logical_block_size; 574 t->misaligned = 1; 575 ret = -1; 576 } 577 578 /* Minimum I/O a multiple of the physical block size? */ 579 if (t->io_min & (t->physical_block_size - 1)) { 580 t->io_min = t->physical_block_size; 581 t->misaligned = 1; 582 ret = -1; 583 } 584 585 /* Optimal I/O a multiple of the physical block size? */ 586 if (t->io_opt & (t->physical_block_size - 1)) { 587 t->io_opt = 0; 588 t->misaligned = 1; 589 ret = -1; 590 } 591 592 /* chunk_sectors a multiple of the physical block size? */ 593 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { 594 t->chunk_sectors = 0; 595 t->misaligned = 1; 596 ret = -1; 597 } 598 599 t->raid_partial_stripes_expensive = 600 max(t->raid_partial_stripes_expensive, 601 b->raid_partial_stripes_expensive); 602 603 /* Find lowest common alignment_offset */ 604 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) 605 % max(t->physical_block_size, t->io_min); 606 607 /* Verify that new alignment_offset is on a logical block boundary */ 608 if (t->alignment_offset & (t->logical_block_size - 1)) { 609 t->misaligned = 1; 610 ret = -1; 611 } 612 613 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); 614 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); 615 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); 616 617 /* Discard alignment and granularity */ 618 if (b->discard_granularity) { 619 alignment = queue_limit_discard_alignment(b, start); 620 621 if (t->discard_granularity != 0 && 622 t->discard_alignment != alignment) { 623 top = t->discard_granularity + t->discard_alignment; 624 bottom = b->discard_granularity + alignment; 625 626 /* Verify that top and bottom intervals line up */ 627 if ((max(top, bottom) % min(top, bottom)) != 0) 628 t->discard_misaligned = 1; 629 } 630 631 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, 632 b->max_discard_sectors); 633 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, 634 b->max_hw_discard_sectors); 635 t->discard_granularity = max(t->discard_granularity, 636 b->discard_granularity); 637 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % 638 t->discard_granularity; 639 } 640 641 t->zone_write_granularity = max(t->zone_write_granularity, 642 b->zone_write_granularity); 643 t->zoned = max(t->zoned, b->zoned); 644 return ret; 645 } 646 EXPORT_SYMBOL(blk_stack_limits); 647 648 /** 649 * disk_stack_limits - adjust queue limits for stacked drivers 650 * @disk: MD/DM gendisk (top) 651 * @bdev: the underlying block device (bottom) 652 * @offset: offset to beginning of data within component device 653 * 654 * Description: 655 * Merges the limits for a top level gendisk and a bottom level 656 * block_device. 657 */ 658 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 659 sector_t offset) 660 { 661 struct request_queue *t = disk->queue; 662 663 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, 664 get_start_sect(bdev) + (offset >> 9)) < 0) { 665 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; 666 667 disk_name(disk, 0, top); 668 bdevname(bdev, bottom); 669 670 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 671 top, bottom); 672 } 673 674 blk_queue_update_readahead(disk->queue); 675 } 676 EXPORT_SYMBOL(disk_stack_limits); 677 678 /** 679 * blk_queue_update_dma_pad - update pad mask 680 * @q: the request queue for the device 681 * @mask: pad mask 682 * 683 * Update dma pad mask. 684 * 685 * Appending pad buffer to a request modifies the last entry of a 686 * scatter list such that it includes the pad buffer. 687 **/ 688 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) 689 { 690 if (mask > q->dma_pad_mask) 691 q->dma_pad_mask = mask; 692 } 693 EXPORT_SYMBOL(blk_queue_update_dma_pad); 694 695 /** 696 * blk_queue_segment_boundary - set boundary rules for segment merging 697 * @q: the request queue for the device 698 * @mask: the memory boundary mask 699 **/ 700 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 701 { 702 if (mask < PAGE_SIZE - 1) { 703 mask = PAGE_SIZE - 1; 704 printk(KERN_INFO "%s: set to minimum %lx\n", 705 __func__, mask); 706 } 707 708 q->limits.seg_boundary_mask = mask; 709 } 710 EXPORT_SYMBOL(blk_queue_segment_boundary); 711 712 /** 713 * blk_queue_virt_boundary - set boundary rules for bio merging 714 * @q: the request queue for the device 715 * @mask: the memory boundary mask 716 **/ 717 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) 718 { 719 q->limits.virt_boundary_mask = mask; 720 721 /* 722 * Devices that require a virtual boundary do not support scatter/gather 723 * I/O natively, but instead require a descriptor list entry for each 724 * page (which might not be idential to the Linux PAGE_SIZE). Because 725 * of that they are not limited by our notion of "segment size". 726 */ 727 if (mask) 728 q->limits.max_segment_size = UINT_MAX; 729 } 730 EXPORT_SYMBOL(blk_queue_virt_boundary); 731 732 /** 733 * blk_queue_dma_alignment - set dma length and memory alignment 734 * @q: the request queue for the device 735 * @mask: alignment mask 736 * 737 * description: 738 * set required memory and length alignment for direct dma transactions. 739 * this is used when building direct io requests for the queue. 740 * 741 **/ 742 void blk_queue_dma_alignment(struct request_queue *q, int mask) 743 { 744 q->dma_alignment = mask; 745 } 746 EXPORT_SYMBOL(blk_queue_dma_alignment); 747 748 /** 749 * blk_queue_update_dma_alignment - update dma length and memory alignment 750 * @q: the request queue for the device 751 * @mask: alignment mask 752 * 753 * description: 754 * update required memory and length alignment for direct dma transactions. 755 * If the requested alignment is larger than the current alignment, then 756 * the current queue alignment is updated to the new value, otherwise it 757 * is left alone. The design of this is to allow multiple objects 758 * (driver, device, transport etc) to set their respective 759 * alignments without having them interfere. 760 * 761 **/ 762 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) 763 { 764 BUG_ON(mask > PAGE_SIZE); 765 766 if (mask > q->dma_alignment) 767 q->dma_alignment = mask; 768 } 769 EXPORT_SYMBOL(blk_queue_update_dma_alignment); 770 771 /** 772 * blk_set_queue_depth - tell the block layer about the device queue depth 773 * @q: the request queue for the device 774 * @depth: queue depth 775 * 776 */ 777 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) 778 { 779 q->queue_depth = depth; 780 rq_qos_queue_depth_changed(q); 781 } 782 EXPORT_SYMBOL(blk_set_queue_depth); 783 784 /** 785 * blk_queue_write_cache - configure queue's write cache 786 * @q: the request queue for the device 787 * @wc: write back cache on or off 788 * @fua: device supports FUA writes, if true 789 * 790 * Tell the block layer about the write cache of @q. 791 */ 792 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) 793 { 794 if (wc) 795 blk_queue_flag_set(QUEUE_FLAG_WC, q); 796 else 797 blk_queue_flag_clear(QUEUE_FLAG_WC, q); 798 if (fua) 799 blk_queue_flag_set(QUEUE_FLAG_FUA, q); 800 else 801 blk_queue_flag_clear(QUEUE_FLAG_FUA, q); 802 803 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); 804 } 805 EXPORT_SYMBOL_GPL(blk_queue_write_cache); 806 807 /** 808 * blk_queue_required_elevator_features - Set a queue required elevator features 809 * @q: the request queue for the target device 810 * @features: Required elevator features OR'ed together 811 * 812 * Tell the block layer that for the device controlled through @q, only the 813 * only elevators that can be used are those that implement at least the set of 814 * features specified by @features. 815 */ 816 void blk_queue_required_elevator_features(struct request_queue *q, 817 unsigned int features) 818 { 819 q->required_elevator_features = features; 820 } 821 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features); 822 823 /** 824 * blk_queue_can_use_dma_map_merging - configure queue for merging segments. 825 * @q: the request queue for the device 826 * @dev: the device pointer for dma 827 * 828 * Tell the block layer about merging the segments by dma map of @q. 829 */ 830 bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 831 struct device *dev) 832 { 833 unsigned long boundary = dma_get_merge_boundary(dev); 834 835 if (!boundary) 836 return false; 837 838 /* No need to update max_segment_size. see blk_queue_virt_boundary() */ 839 blk_queue_virt_boundary(q, boundary); 840 841 return true; 842 } 843 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging); 844 845 /** 846 * blk_queue_set_zoned - configure a disk queue zoned model. 847 * @disk: the gendisk of the queue to configure 848 * @model: the zoned model to set 849 * 850 * Set the zoned model of the request queue of @disk according to @model. 851 * When @model is BLK_ZONED_HM (host managed), this should be called only 852 * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option). 853 * If @model specifies BLK_ZONED_HA (host aware), the effective model used 854 * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions 855 * on the disk. 856 */ 857 void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model) 858 { 859 struct request_queue *q = disk->queue; 860 861 switch (model) { 862 case BLK_ZONED_HM: 863 /* 864 * Host managed devices are supported only if 865 * CONFIG_BLK_DEV_ZONED is enabled. 866 */ 867 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)); 868 break; 869 case BLK_ZONED_HA: 870 /* 871 * Host aware devices can be treated either as regular block 872 * devices (similar to drive managed devices) or as zoned block 873 * devices to take advantage of the zone command set, similarly 874 * to host managed devices. We try the latter if there are no 875 * partitions and zoned block device support is enabled, else 876 * we do nothing special as far as the block layer is concerned. 877 */ 878 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || 879 !xa_empty(&disk->part_tbl)) 880 model = BLK_ZONED_NONE; 881 break; 882 case BLK_ZONED_NONE: 883 default: 884 if (WARN_ON_ONCE(model != BLK_ZONED_NONE)) 885 model = BLK_ZONED_NONE; 886 break; 887 } 888 889 q->limits.zoned = model; 890 if (model != BLK_ZONED_NONE) { 891 /* 892 * Set the zone write granularity to the device logical block 893 * size by default. The driver can change this value if needed. 894 */ 895 blk_queue_zone_write_granularity(q, 896 queue_logical_block_size(q)); 897 } else { 898 blk_queue_clear_zone_settings(q); 899 } 900 } 901 EXPORT_SYMBOL_GPL(blk_queue_set_zoned); 902