1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to setting various queue properties from drivers 4 */ 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */ 11 #include <linux/gcd.h> 12 #include <linux/lcm.h> 13 #include <linux/jiffies.h> 14 #include <linux/gfp.h> 15 #include <linux/dma-mapping.h> 16 17 #include "blk.h" 18 #include "blk-wbt.h" 19 20 unsigned long blk_max_low_pfn; 21 EXPORT_SYMBOL(blk_max_low_pfn); 22 23 unsigned long blk_max_pfn; 24 25 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) 26 { 27 q->rq_timeout = timeout; 28 } 29 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); 30 31 /** 32 * blk_set_default_limits - reset limits to default values 33 * @lim: the queue_limits structure to reset 34 * 35 * Description: 36 * Returns a queue_limit struct to its default state. 37 */ 38 void blk_set_default_limits(struct queue_limits *lim) 39 { 40 lim->max_segments = BLK_MAX_SEGMENTS; 41 lim->max_discard_segments = 1; 42 lim->max_integrity_segments = 0; 43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 44 lim->virt_boundary_mask = 0; 45 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 46 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; 47 lim->max_dev_sectors = 0; 48 lim->chunk_sectors = 0; 49 lim->max_write_same_sectors = 0; 50 lim->max_write_zeroes_sectors = 0; 51 lim->max_discard_sectors = 0; 52 lim->max_hw_discard_sectors = 0; 53 lim->discard_granularity = 0; 54 lim->discard_alignment = 0; 55 lim->discard_misaligned = 0; 56 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 57 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); 58 lim->alignment_offset = 0; 59 lim->io_opt = 0; 60 lim->misaligned = 0; 61 lim->zoned = BLK_ZONED_NONE; 62 } 63 EXPORT_SYMBOL(blk_set_default_limits); 64 65 /** 66 * blk_set_stacking_limits - set default limits for stacking devices 67 * @lim: the queue_limits structure to reset 68 * 69 * Description: 70 * Returns a queue_limit struct to its default state. Should be used 71 * by stacking drivers like DM that have no internal limits. 72 */ 73 void blk_set_stacking_limits(struct queue_limits *lim) 74 { 75 blk_set_default_limits(lim); 76 77 /* Inherit limits from component devices */ 78 lim->max_segments = USHRT_MAX; 79 lim->max_discard_segments = USHRT_MAX; 80 lim->max_hw_sectors = UINT_MAX; 81 lim->max_segment_size = UINT_MAX; 82 lim->max_sectors = UINT_MAX; 83 lim->max_dev_sectors = UINT_MAX; 84 lim->max_write_same_sectors = UINT_MAX; 85 lim->max_write_zeroes_sectors = UINT_MAX; 86 } 87 EXPORT_SYMBOL(blk_set_stacking_limits); 88 89 /** 90 * blk_queue_bounce_limit - set bounce buffer limit for queue 91 * @q: the request queue for the device 92 * @max_addr: the maximum address the device can handle 93 * 94 * Description: 95 * Different hardware can have different requirements as to what pages 96 * it can do I/O directly to. A low level driver can call 97 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 98 * buffers for doing I/O to pages residing above @max_addr. 99 **/ 100 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) 101 { 102 unsigned long b_pfn = max_addr >> PAGE_SHIFT; 103 int dma = 0; 104 105 q->bounce_gfp = GFP_NOIO; 106 #if BITS_PER_LONG == 64 107 /* 108 * Assume anything <= 4GB can be handled by IOMMU. Actually 109 * some IOMMUs can handle everything, but I don't know of a 110 * way to test this here. 111 */ 112 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 113 dma = 1; 114 q->limits.bounce_pfn = max(max_low_pfn, b_pfn); 115 #else 116 if (b_pfn < blk_max_low_pfn) 117 dma = 1; 118 q->limits.bounce_pfn = b_pfn; 119 #endif 120 if (dma) { 121 init_emergency_isa_pool(); 122 q->bounce_gfp = GFP_NOIO | GFP_DMA; 123 q->limits.bounce_pfn = b_pfn; 124 } 125 } 126 EXPORT_SYMBOL(blk_queue_bounce_limit); 127 128 /** 129 * blk_queue_max_hw_sectors - set max sectors for a request for this queue 130 * @q: the request queue for the device 131 * @max_hw_sectors: max hardware sectors in the usual 512b unit 132 * 133 * Description: 134 * Enables a low level driver to set a hard upper limit, 135 * max_hw_sectors, on the size of requests. max_hw_sectors is set by 136 * the device driver based upon the capabilities of the I/O 137 * controller. 138 * 139 * max_dev_sectors is a hard limit imposed by the storage device for 140 * READ/WRITE requests. It is set by the disk driver. 141 * 142 * max_sectors is a soft limit imposed by the block layer for 143 * filesystem type requests. This value can be overridden on a 144 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 145 * The soft limit can not exceed max_hw_sectors. 146 **/ 147 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 148 { 149 struct queue_limits *limits = &q->limits; 150 unsigned int max_sectors; 151 152 if ((max_hw_sectors << 9) < PAGE_SIZE) { 153 max_hw_sectors = 1 << (PAGE_SHIFT - 9); 154 printk(KERN_INFO "%s: set to minimum %d\n", 155 __func__, max_hw_sectors); 156 } 157 158 limits->max_hw_sectors = max_hw_sectors; 159 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); 160 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); 161 limits->max_sectors = max_sectors; 162 q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); 163 } 164 EXPORT_SYMBOL(blk_queue_max_hw_sectors); 165 166 /** 167 * blk_queue_chunk_sectors - set size of the chunk for this queue 168 * @q: the request queue for the device 169 * @chunk_sectors: chunk sectors in the usual 512b unit 170 * 171 * Description: 172 * If a driver doesn't want IOs to cross a given chunk size, it can set 173 * this limit and prevent merging across chunks. Note that the chunk size 174 * must currently be a power-of-2 in sectors. Also note that the block 175 * layer must accept a page worth of data at any offset. So if the 176 * crossing of chunks is a hard limitation in the driver, it must still be 177 * prepared to split single page bios. 178 **/ 179 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) 180 { 181 BUG_ON(!is_power_of_2(chunk_sectors)); 182 q->limits.chunk_sectors = chunk_sectors; 183 } 184 EXPORT_SYMBOL(blk_queue_chunk_sectors); 185 186 /** 187 * blk_queue_max_discard_sectors - set max sectors for a single discard 188 * @q: the request queue for the device 189 * @max_discard_sectors: maximum number of sectors to discard 190 **/ 191 void blk_queue_max_discard_sectors(struct request_queue *q, 192 unsigned int max_discard_sectors) 193 { 194 q->limits.max_hw_discard_sectors = max_discard_sectors; 195 q->limits.max_discard_sectors = max_discard_sectors; 196 } 197 EXPORT_SYMBOL(blk_queue_max_discard_sectors); 198 199 /** 200 * blk_queue_max_write_same_sectors - set max sectors for a single write same 201 * @q: the request queue for the device 202 * @max_write_same_sectors: maximum number of sectors to write per command 203 **/ 204 void blk_queue_max_write_same_sectors(struct request_queue *q, 205 unsigned int max_write_same_sectors) 206 { 207 q->limits.max_write_same_sectors = max_write_same_sectors; 208 } 209 EXPORT_SYMBOL(blk_queue_max_write_same_sectors); 210 211 /** 212 * blk_queue_max_write_zeroes_sectors - set max sectors for a single 213 * write zeroes 214 * @q: the request queue for the device 215 * @max_write_zeroes_sectors: maximum number of sectors to write per command 216 **/ 217 void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 218 unsigned int max_write_zeroes_sectors) 219 { 220 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; 221 } 222 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors); 223 224 /** 225 * blk_queue_max_segments - set max hw segments for a request for this queue 226 * @q: the request queue for the device 227 * @max_segments: max number of segments 228 * 229 * Description: 230 * Enables a low level driver to set an upper limit on the number of 231 * hw data segments in a request. 232 **/ 233 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) 234 { 235 if (!max_segments) { 236 max_segments = 1; 237 printk(KERN_INFO "%s: set to minimum %d\n", 238 __func__, max_segments); 239 } 240 241 q->limits.max_segments = max_segments; 242 } 243 EXPORT_SYMBOL(blk_queue_max_segments); 244 245 /** 246 * blk_queue_max_discard_segments - set max segments for discard requests 247 * @q: the request queue for the device 248 * @max_segments: max number of segments 249 * 250 * Description: 251 * Enables a low level driver to set an upper limit on the number of 252 * segments in a discard request. 253 **/ 254 void blk_queue_max_discard_segments(struct request_queue *q, 255 unsigned short max_segments) 256 { 257 q->limits.max_discard_segments = max_segments; 258 } 259 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments); 260 261 /** 262 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg 263 * @q: the request queue for the device 264 * @max_size: max size of segment in bytes 265 * 266 * Description: 267 * Enables a low level driver to set an upper limit on the size of a 268 * coalesced segment 269 **/ 270 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 271 { 272 if (max_size < PAGE_SIZE) { 273 max_size = PAGE_SIZE; 274 printk(KERN_INFO "%s: set to minimum %d\n", 275 __func__, max_size); 276 } 277 278 /* see blk_queue_virt_boundary() for the explanation */ 279 WARN_ON_ONCE(q->limits.virt_boundary_mask); 280 281 q->limits.max_segment_size = max_size; 282 } 283 EXPORT_SYMBOL(blk_queue_max_segment_size); 284 285 /** 286 * blk_queue_logical_block_size - set logical block size for the queue 287 * @q: the request queue for the device 288 * @size: the logical block size, in bytes 289 * 290 * Description: 291 * This should be set to the lowest possible block size that the 292 * storage device can address. The default of 512 covers most 293 * hardware. 294 **/ 295 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) 296 { 297 q->limits.logical_block_size = size; 298 299 if (q->limits.physical_block_size < size) 300 q->limits.physical_block_size = size; 301 302 if (q->limits.io_min < q->limits.physical_block_size) 303 q->limits.io_min = q->limits.physical_block_size; 304 } 305 EXPORT_SYMBOL(blk_queue_logical_block_size); 306 307 /** 308 * blk_queue_physical_block_size - set physical block size for the queue 309 * @q: the request queue for the device 310 * @size: the physical block size, in bytes 311 * 312 * Description: 313 * This should be set to the lowest possible sector size that the 314 * hardware can operate on without reverting to read-modify-write 315 * operations. 316 */ 317 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) 318 { 319 q->limits.physical_block_size = size; 320 321 if (q->limits.physical_block_size < q->limits.logical_block_size) 322 q->limits.physical_block_size = q->limits.logical_block_size; 323 324 if (q->limits.io_min < q->limits.physical_block_size) 325 q->limits.io_min = q->limits.physical_block_size; 326 } 327 EXPORT_SYMBOL(blk_queue_physical_block_size); 328 329 /** 330 * blk_queue_alignment_offset - set physical block alignment offset 331 * @q: the request queue for the device 332 * @offset: alignment offset in bytes 333 * 334 * Description: 335 * Some devices are naturally misaligned to compensate for things like 336 * the legacy DOS partition table 63-sector offset. Low-level drivers 337 * should call this function for devices whose first sector is not 338 * naturally aligned. 339 */ 340 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) 341 { 342 q->limits.alignment_offset = 343 offset & (q->limits.physical_block_size - 1); 344 q->limits.misaligned = 0; 345 } 346 EXPORT_SYMBOL(blk_queue_alignment_offset); 347 348 /** 349 * blk_limits_io_min - set minimum request size for a device 350 * @limits: the queue limits 351 * @min: smallest I/O size in bytes 352 * 353 * Description: 354 * Some devices have an internal block size bigger than the reported 355 * hardware sector size. This function can be used to signal the 356 * smallest I/O the device can perform without incurring a performance 357 * penalty. 358 */ 359 void blk_limits_io_min(struct queue_limits *limits, unsigned int min) 360 { 361 limits->io_min = min; 362 363 if (limits->io_min < limits->logical_block_size) 364 limits->io_min = limits->logical_block_size; 365 366 if (limits->io_min < limits->physical_block_size) 367 limits->io_min = limits->physical_block_size; 368 } 369 EXPORT_SYMBOL(blk_limits_io_min); 370 371 /** 372 * blk_queue_io_min - set minimum request size for the queue 373 * @q: the request queue for the device 374 * @min: smallest I/O size in bytes 375 * 376 * Description: 377 * Storage devices may report a granularity or preferred minimum I/O 378 * size which is the smallest request the device can perform without 379 * incurring a performance penalty. For disk drives this is often the 380 * physical block size. For RAID arrays it is often the stripe chunk 381 * size. A properly aligned multiple of minimum_io_size is the 382 * preferred request size for workloads where a high number of I/O 383 * operations is desired. 384 */ 385 void blk_queue_io_min(struct request_queue *q, unsigned int min) 386 { 387 blk_limits_io_min(&q->limits, min); 388 } 389 EXPORT_SYMBOL(blk_queue_io_min); 390 391 /** 392 * blk_limits_io_opt - set optimal request size for a device 393 * @limits: the queue limits 394 * @opt: smallest I/O size in bytes 395 * 396 * Description: 397 * Storage devices may report an optimal I/O size, which is the 398 * device's preferred unit for sustained I/O. This is rarely reported 399 * for disk drives. For RAID arrays it is usually the stripe width or 400 * the internal track size. A properly aligned multiple of 401 * optimal_io_size is the preferred request size for workloads where 402 * sustained throughput is desired. 403 */ 404 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) 405 { 406 limits->io_opt = opt; 407 } 408 EXPORT_SYMBOL(blk_limits_io_opt); 409 410 /** 411 * blk_queue_io_opt - set optimal request size for the queue 412 * @q: the request queue for the device 413 * @opt: optimal request size in bytes 414 * 415 * Description: 416 * Storage devices may report an optimal I/O size, which is the 417 * device's preferred unit for sustained I/O. This is rarely reported 418 * for disk drives. For RAID arrays it is usually the stripe width or 419 * the internal track size. A properly aligned multiple of 420 * optimal_io_size is the preferred request size for workloads where 421 * sustained throughput is desired. 422 */ 423 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 424 { 425 blk_limits_io_opt(&q->limits, opt); 426 } 427 EXPORT_SYMBOL(blk_queue_io_opt); 428 429 /** 430 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers 431 * @t: the stacking driver (top) 432 * @b: the underlying device (bottom) 433 **/ 434 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 435 { 436 blk_stack_limits(&t->limits, &b->limits, 0); 437 } 438 EXPORT_SYMBOL(blk_queue_stack_limits); 439 440 /** 441 * blk_stack_limits - adjust queue_limits for stacked devices 442 * @t: the stacking driver limits (top device) 443 * @b: the underlying queue limits (bottom, component device) 444 * @start: first data sector within component device 445 * 446 * Description: 447 * This function is used by stacking drivers like MD and DM to ensure 448 * that all component devices have compatible block sizes and 449 * alignments. The stacking driver must provide a queue_limits 450 * struct (top) and then iteratively call the stacking function for 451 * all component (bottom) devices. The stacking function will 452 * attempt to combine the values and ensure proper alignment. 453 * 454 * Returns 0 if the top and bottom queue_limits are compatible. The 455 * top device's block sizes and alignment offsets may be adjusted to 456 * ensure alignment with the bottom device. If no compatible sizes 457 * and alignments exist, -1 is returned and the resulting top 458 * queue_limits will have the misaligned flag set to indicate that 459 * the alignment_offset is undefined. 460 */ 461 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 462 sector_t start) 463 { 464 unsigned int top, bottom, alignment, ret = 0; 465 466 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 467 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 468 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); 469 t->max_write_same_sectors = min(t->max_write_same_sectors, 470 b->max_write_same_sectors); 471 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, 472 b->max_write_zeroes_sectors); 473 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); 474 475 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 476 b->seg_boundary_mask); 477 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, 478 b->virt_boundary_mask); 479 480 t->max_segments = min_not_zero(t->max_segments, b->max_segments); 481 t->max_discard_segments = min_not_zero(t->max_discard_segments, 482 b->max_discard_segments); 483 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, 484 b->max_integrity_segments); 485 486 t->max_segment_size = min_not_zero(t->max_segment_size, 487 b->max_segment_size); 488 489 t->misaligned |= b->misaligned; 490 491 alignment = queue_limit_alignment_offset(b, start); 492 493 /* Bottom device has different alignment. Check that it is 494 * compatible with the current top alignment. 495 */ 496 if (t->alignment_offset != alignment) { 497 498 top = max(t->physical_block_size, t->io_min) 499 + t->alignment_offset; 500 bottom = max(b->physical_block_size, b->io_min) + alignment; 501 502 /* Verify that top and bottom intervals line up */ 503 if (max(top, bottom) % min(top, bottom)) { 504 t->misaligned = 1; 505 ret = -1; 506 } 507 } 508 509 t->logical_block_size = max(t->logical_block_size, 510 b->logical_block_size); 511 512 t->physical_block_size = max(t->physical_block_size, 513 b->physical_block_size); 514 515 t->io_min = max(t->io_min, b->io_min); 516 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); 517 518 /* Physical block size a multiple of the logical block size? */ 519 if (t->physical_block_size & (t->logical_block_size - 1)) { 520 t->physical_block_size = t->logical_block_size; 521 t->misaligned = 1; 522 ret = -1; 523 } 524 525 /* Minimum I/O a multiple of the physical block size? */ 526 if (t->io_min & (t->physical_block_size - 1)) { 527 t->io_min = t->physical_block_size; 528 t->misaligned = 1; 529 ret = -1; 530 } 531 532 /* Optimal I/O a multiple of the physical block size? */ 533 if (t->io_opt & (t->physical_block_size - 1)) { 534 t->io_opt = 0; 535 t->misaligned = 1; 536 ret = -1; 537 } 538 539 t->raid_partial_stripes_expensive = 540 max(t->raid_partial_stripes_expensive, 541 b->raid_partial_stripes_expensive); 542 543 /* Find lowest common alignment_offset */ 544 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) 545 % max(t->physical_block_size, t->io_min); 546 547 /* Verify that new alignment_offset is on a logical block boundary */ 548 if (t->alignment_offset & (t->logical_block_size - 1)) { 549 t->misaligned = 1; 550 ret = -1; 551 } 552 553 /* Discard alignment and granularity */ 554 if (b->discard_granularity) { 555 alignment = queue_limit_discard_alignment(b, start); 556 557 if (t->discard_granularity != 0 && 558 t->discard_alignment != alignment) { 559 top = t->discard_granularity + t->discard_alignment; 560 bottom = b->discard_granularity + alignment; 561 562 /* Verify that top and bottom intervals line up */ 563 if ((max(top, bottom) % min(top, bottom)) != 0) 564 t->discard_misaligned = 1; 565 } 566 567 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, 568 b->max_discard_sectors); 569 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, 570 b->max_hw_discard_sectors); 571 t->discard_granularity = max(t->discard_granularity, 572 b->discard_granularity); 573 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % 574 t->discard_granularity; 575 } 576 577 if (b->chunk_sectors) 578 t->chunk_sectors = min_not_zero(t->chunk_sectors, 579 b->chunk_sectors); 580 581 return ret; 582 } 583 EXPORT_SYMBOL(blk_stack_limits); 584 585 /** 586 * bdev_stack_limits - adjust queue limits for stacked drivers 587 * @t: the stacking driver limits (top device) 588 * @bdev: the component block_device (bottom) 589 * @start: first data sector within component device 590 * 591 * Description: 592 * Merges queue limits for a top device and a block_device. Returns 593 * 0 if alignment didn't change. Returns -1 if adding the bottom 594 * device caused misalignment. 595 */ 596 int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 597 sector_t start) 598 { 599 struct request_queue *bq = bdev_get_queue(bdev); 600 601 start += get_start_sect(bdev); 602 603 return blk_stack_limits(t, &bq->limits, start); 604 } 605 EXPORT_SYMBOL(bdev_stack_limits); 606 607 /** 608 * disk_stack_limits - adjust queue limits for stacked drivers 609 * @disk: MD/DM gendisk (top) 610 * @bdev: the underlying block device (bottom) 611 * @offset: offset to beginning of data within component device 612 * 613 * Description: 614 * Merges the limits for a top level gendisk and a bottom level 615 * block_device. 616 */ 617 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 618 sector_t offset) 619 { 620 struct request_queue *t = disk->queue; 621 622 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { 623 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; 624 625 disk_name(disk, 0, top); 626 bdevname(bdev, bottom); 627 628 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 629 top, bottom); 630 } 631 632 t->backing_dev_info->io_pages = 633 t->limits.max_sectors >> (PAGE_SHIFT - 9); 634 } 635 EXPORT_SYMBOL(disk_stack_limits); 636 637 /** 638 * blk_queue_update_dma_pad - update pad mask 639 * @q: the request queue for the device 640 * @mask: pad mask 641 * 642 * Update dma pad mask. 643 * 644 * Appending pad buffer to a request modifies the last entry of a 645 * scatter list such that it includes the pad buffer. 646 **/ 647 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) 648 { 649 if (mask > q->dma_pad_mask) 650 q->dma_pad_mask = mask; 651 } 652 EXPORT_SYMBOL(blk_queue_update_dma_pad); 653 654 /** 655 * blk_queue_dma_drain - Set up a drain buffer for excess dma. 656 * @q: the request queue for the device 657 * @dma_drain_needed: fn which returns non-zero if drain is necessary 658 * @buf: physically contiguous buffer 659 * @size: size of the buffer in bytes 660 * 661 * Some devices have excess DMA problems and can't simply discard (or 662 * zero fill) the unwanted piece of the transfer. They have to have a 663 * real area of memory to transfer it into. The use case for this is 664 * ATAPI devices in DMA mode. If the packet command causes a transfer 665 * bigger than the transfer size some HBAs will lock up if there 666 * aren't DMA elements to contain the excess transfer. What this API 667 * does is adjust the queue so that the buf is always appended 668 * silently to the scatterlist. 669 * 670 * Note: This routine adjusts max_hw_segments to make room for appending 671 * the drain buffer. If you call blk_queue_max_segments() after calling 672 * this routine, you must set the limit to one fewer than your device 673 * can support otherwise there won't be room for the drain buffer. 674 */ 675 int blk_queue_dma_drain(struct request_queue *q, 676 dma_drain_needed_fn *dma_drain_needed, 677 void *buf, unsigned int size) 678 { 679 if (queue_max_segments(q) < 2) 680 return -EINVAL; 681 /* make room for appending the drain */ 682 blk_queue_max_segments(q, queue_max_segments(q) - 1); 683 q->dma_drain_needed = dma_drain_needed; 684 q->dma_drain_buffer = buf; 685 q->dma_drain_size = size; 686 687 return 0; 688 } 689 EXPORT_SYMBOL_GPL(blk_queue_dma_drain); 690 691 /** 692 * blk_queue_segment_boundary - set boundary rules for segment merging 693 * @q: the request queue for the device 694 * @mask: the memory boundary mask 695 **/ 696 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 697 { 698 if (mask < PAGE_SIZE - 1) { 699 mask = PAGE_SIZE - 1; 700 printk(KERN_INFO "%s: set to minimum %lx\n", 701 __func__, mask); 702 } 703 704 q->limits.seg_boundary_mask = mask; 705 } 706 EXPORT_SYMBOL(blk_queue_segment_boundary); 707 708 /** 709 * blk_queue_virt_boundary - set boundary rules for bio merging 710 * @q: the request queue for the device 711 * @mask: the memory boundary mask 712 **/ 713 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) 714 { 715 q->limits.virt_boundary_mask = mask; 716 717 /* 718 * Devices that require a virtual boundary do not support scatter/gather 719 * I/O natively, but instead require a descriptor list entry for each 720 * page (which might not be idential to the Linux PAGE_SIZE). Because 721 * of that they are not limited by our notion of "segment size". 722 */ 723 if (mask) 724 q->limits.max_segment_size = UINT_MAX; 725 } 726 EXPORT_SYMBOL(blk_queue_virt_boundary); 727 728 /** 729 * blk_queue_dma_alignment - set dma length and memory alignment 730 * @q: the request queue for the device 731 * @mask: alignment mask 732 * 733 * description: 734 * set required memory and length alignment for direct dma transactions. 735 * this is used when building direct io requests for the queue. 736 * 737 **/ 738 void blk_queue_dma_alignment(struct request_queue *q, int mask) 739 { 740 q->dma_alignment = mask; 741 } 742 EXPORT_SYMBOL(blk_queue_dma_alignment); 743 744 /** 745 * blk_queue_update_dma_alignment - update dma length and memory alignment 746 * @q: the request queue for the device 747 * @mask: alignment mask 748 * 749 * description: 750 * update required memory and length alignment for direct dma transactions. 751 * If the requested alignment is larger than the current alignment, then 752 * the current queue alignment is updated to the new value, otherwise it 753 * is left alone. The design of this is to allow multiple objects 754 * (driver, device, transport etc) to set their respective 755 * alignments without having them interfere. 756 * 757 **/ 758 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) 759 { 760 BUG_ON(mask > PAGE_SIZE); 761 762 if (mask > q->dma_alignment) 763 q->dma_alignment = mask; 764 } 765 EXPORT_SYMBOL(blk_queue_update_dma_alignment); 766 767 /** 768 * blk_set_queue_depth - tell the block layer about the device queue depth 769 * @q: the request queue for the device 770 * @depth: queue depth 771 * 772 */ 773 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) 774 { 775 q->queue_depth = depth; 776 rq_qos_queue_depth_changed(q); 777 } 778 EXPORT_SYMBOL(blk_set_queue_depth); 779 780 /** 781 * blk_queue_write_cache - configure queue's write cache 782 * @q: the request queue for the device 783 * @wc: write back cache on or off 784 * @fua: device supports FUA writes, if true 785 * 786 * Tell the block layer about the write cache of @q. 787 */ 788 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) 789 { 790 if (wc) 791 blk_queue_flag_set(QUEUE_FLAG_WC, q); 792 else 793 blk_queue_flag_clear(QUEUE_FLAG_WC, q); 794 if (fua) 795 blk_queue_flag_set(QUEUE_FLAG_FUA, q); 796 else 797 blk_queue_flag_clear(QUEUE_FLAG_FUA, q); 798 799 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); 800 } 801 EXPORT_SYMBOL_GPL(blk_queue_write_cache); 802 803 /** 804 * blk_queue_required_elevator_features - Set a queue required elevator features 805 * @q: the request queue for the target device 806 * @features: Required elevator features OR'ed together 807 * 808 * Tell the block layer that for the device controlled through @q, only the 809 * only elevators that can be used are those that implement at least the set of 810 * features specified by @features. 811 */ 812 void blk_queue_required_elevator_features(struct request_queue *q, 813 unsigned int features) 814 { 815 q->required_elevator_features = features; 816 } 817 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features); 818 819 /** 820 * blk_queue_can_use_dma_map_merging - configure queue for merging segments. 821 * @q: the request queue for the device 822 * @dev: the device pointer for dma 823 * 824 * Tell the block layer about merging the segments by dma map of @q. 825 */ 826 bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 827 struct device *dev) 828 { 829 unsigned long boundary = dma_get_merge_boundary(dev); 830 831 if (!boundary) 832 return false; 833 834 /* No need to update max_segment_size. see blk_queue_virt_boundary() */ 835 blk_queue_virt_boundary(q, boundary); 836 837 return true; 838 } 839 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging); 840 841 static int __init blk_settings_init(void) 842 { 843 blk_max_low_pfn = max_low_pfn - 1; 844 blk_max_pfn = max_pfn - 1; 845 return 0; 846 } 847 subsys_initcall(blk_settings_init); 848