1 /* 2 * Functions related to setting various queue properties from drivers 3 */ 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/init.h> 7 #include <linux/bio.h> 8 #include <linux/blkdev.h> 9 #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ 10 #include <linux/gcd.h> 11 #include <linux/lcm.h> 12 #include <linux/jiffies.h> 13 #include <linux/gfp.h> 14 15 #include "blk.h" 16 17 unsigned long blk_max_low_pfn; 18 EXPORT_SYMBOL(blk_max_low_pfn); 19 20 unsigned long blk_max_pfn; 21 22 /** 23 * blk_queue_prep_rq - set a prepare_request function for queue 24 * @q: queue 25 * @pfn: prepare_request function 26 * 27 * It's possible for a queue to register a prepare_request callback which 28 * is invoked before the request is handed to the request_fn. The goal of 29 * the function is to prepare a request for I/O, it can be used to build a 30 * cdb from the request data for instance. 31 * 32 */ 33 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) 34 { 35 q->prep_rq_fn = pfn; 36 } 37 EXPORT_SYMBOL(blk_queue_prep_rq); 38 39 /** 40 * blk_queue_unprep_rq - set an unprepare_request function for queue 41 * @q: queue 42 * @ufn: unprepare_request function 43 * 44 * It's possible for a queue to register an unprepare_request callback 45 * which is invoked before the request is finally completed. The goal 46 * of the function is to deallocate any data that was allocated in the 47 * prepare_request callback. 48 * 49 */ 50 void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) 51 { 52 q->unprep_rq_fn = ufn; 53 } 54 EXPORT_SYMBOL(blk_queue_unprep_rq); 55 56 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) 57 { 58 q->softirq_done_fn = fn; 59 } 60 EXPORT_SYMBOL(blk_queue_softirq_done); 61 62 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) 63 { 64 q->rq_timeout = timeout; 65 } 66 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); 67 68 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) 69 { 70 q->rq_timed_out_fn = fn; 71 } 72 EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); 73 74 void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) 75 { 76 q->lld_busy_fn = fn; 77 } 78 EXPORT_SYMBOL_GPL(blk_queue_lld_busy); 79 80 /** 81 * blk_set_default_limits - reset limits to default values 82 * @lim: the queue_limits structure to reset 83 * 84 * Description: 85 * Returns a queue_limit struct to its default state. 86 */ 87 void blk_set_default_limits(struct queue_limits *lim) 88 { 89 lim->max_segments = BLK_MAX_SEGMENTS; 90 lim->max_integrity_segments = 0; 91 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 92 lim->virt_boundary_mask = 0; 93 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 94 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; 95 lim->chunk_sectors = 0; 96 lim->max_write_same_sectors = 0; 97 lim->max_discard_sectors = 0; 98 lim->max_hw_discard_sectors = 0; 99 lim->discard_granularity = 0; 100 lim->discard_alignment = 0; 101 lim->discard_misaligned = 0; 102 lim->discard_zeroes_data = 0; 103 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 104 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); 105 lim->alignment_offset = 0; 106 lim->io_opt = 0; 107 lim->misaligned = 0; 108 lim->cluster = 1; 109 } 110 EXPORT_SYMBOL(blk_set_default_limits); 111 112 /** 113 * blk_set_stacking_limits - set default limits for stacking devices 114 * @lim: the queue_limits structure to reset 115 * 116 * Description: 117 * Returns a queue_limit struct to its default state. Should be used 118 * by stacking drivers like DM that have no internal limits. 119 */ 120 void blk_set_stacking_limits(struct queue_limits *lim) 121 { 122 blk_set_default_limits(lim); 123 124 /* Inherit limits from component devices */ 125 lim->discard_zeroes_data = 1; 126 lim->max_segments = USHRT_MAX; 127 lim->max_hw_sectors = UINT_MAX; 128 lim->max_segment_size = UINT_MAX; 129 lim->max_sectors = UINT_MAX; 130 lim->max_write_same_sectors = UINT_MAX; 131 } 132 EXPORT_SYMBOL(blk_set_stacking_limits); 133 134 /** 135 * blk_queue_make_request - define an alternate make_request function for a device 136 * @q: the request queue for the device to be affected 137 * @mfn: the alternate make_request function 138 * 139 * Description: 140 * The normal way for &struct bios to be passed to a device 141 * driver is for them to be collected into requests on a request 142 * queue, and then to allow the device driver to select requests 143 * off that queue when it is ready. This works well for many block 144 * devices. However some block devices (typically virtual devices 145 * such as md or lvm) do not benefit from the processing on the 146 * request queue, and are served best by having the requests passed 147 * directly to them. This can be achieved by providing a function 148 * to blk_queue_make_request(). 149 * 150 * Caveat: 151 * The driver that does this *must* be able to deal appropriately 152 * with buffers in "highmemory". This can be accomplished by either calling 153 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling 154 * blk_queue_bounce() to create a buffer in normal memory. 155 **/ 156 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) 157 { 158 /* 159 * set defaults 160 */ 161 q->nr_requests = BLKDEV_MAX_RQ; 162 163 q->make_request_fn = mfn; 164 blk_queue_dma_alignment(q, 511); 165 blk_queue_congestion_threshold(q); 166 q->nr_batching = BLK_BATCH_REQ; 167 168 blk_set_default_limits(&q->limits); 169 170 /* 171 * by default assume old behaviour and bounce for any highmem page 172 */ 173 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 174 } 175 EXPORT_SYMBOL(blk_queue_make_request); 176 177 /** 178 * blk_queue_bounce_limit - set bounce buffer limit for queue 179 * @q: the request queue for the device 180 * @max_addr: the maximum address the device can handle 181 * 182 * Description: 183 * Different hardware can have different requirements as to what pages 184 * it can do I/O directly to. A low level driver can call 185 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 186 * buffers for doing I/O to pages residing above @max_addr. 187 **/ 188 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) 189 { 190 unsigned long b_pfn = max_addr >> PAGE_SHIFT; 191 int dma = 0; 192 193 q->bounce_gfp = GFP_NOIO; 194 #if BITS_PER_LONG == 64 195 /* 196 * Assume anything <= 4GB can be handled by IOMMU. Actually 197 * some IOMMUs can handle everything, but I don't know of a 198 * way to test this here. 199 */ 200 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 201 dma = 1; 202 q->limits.bounce_pfn = max(max_low_pfn, b_pfn); 203 #else 204 if (b_pfn < blk_max_low_pfn) 205 dma = 1; 206 q->limits.bounce_pfn = b_pfn; 207 #endif 208 if (dma) { 209 init_emergency_isa_pool(); 210 q->bounce_gfp = GFP_NOIO | GFP_DMA; 211 q->limits.bounce_pfn = b_pfn; 212 } 213 } 214 EXPORT_SYMBOL(blk_queue_bounce_limit); 215 216 /** 217 * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request 218 * @limits: the queue limits 219 * @max_hw_sectors: max hardware sectors in the usual 512b unit 220 * 221 * Description: 222 * Enables a low level driver to set a hard upper limit, 223 * max_hw_sectors, on the size of requests. max_hw_sectors is set by 224 * the device driver based upon the capabilities of the I/O 225 * controller. 226 * 227 * max_sectors is a soft limit imposed by the block layer for 228 * filesystem type requests. This value can be overridden on a 229 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 230 * The soft limit can not exceed max_hw_sectors. 231 **/ 232 void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) 233 { 234 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { 235 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 236 printk(KERN_INFO "%s: set to minimum %d\n", 237 __func__, max_hw_sectors); 238 } 239 240 limits->max_hw_sectors = max_hw_sectors; 241 limits->max_sectors = min_t(unsigned int, max_hw_sectors, 242 BLK_DEF_MAX_SECTORS); 243 } 244 EXPORT_SYMBOL(blk_limits_max_hw_sectors); 245 246 /** 247 * blk_queue_max_hw_sectors - set max sectors for a request for this queue 248 * @q: the request queue for the device 249 * @max_hw_sectors: max hardware sectors in the usual 512b unit 250 * 251 * Description: 252 * See description for blk_limits_max_hw_sectors(). 253 **/ 254 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 255 { 256 blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); 257 } 258 EXPORT_SYMBOL(blk_queue_max_hw_sectors); 259 260 /** 261 * blk_queue_chunk_sectors - set size of the chunk for this queue 262 * @q: the request queue for the device 263 * @chunk_sectors: chunk sectors in the usual 512b unit 264 * 265 * Description: 266 * If a driver doesn't want IOs to cross a given chunk size, it can set 267 * this limit and prevent merging across chunks. Note that the chunk size 268 * must currently be a power-of-2 in sectors. Also note that the block 269 * layer must accept a page worth of data at any offset. So if the 270 * crossing of chunks is a hard limitation in the driver, it must still be 271 * prepared to split single page bios. 272 **/ 273 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) 274 { 275 BUG_ON(!is_power_of_2(chunk_sectors)); 276 q->limits.chunk_sectors = chunk_sectors; 277 } 278 EXPORT_SYMBOL(blk_queue_chunk_sectors); 279 280 /** 281 * blk_queue_max_discard_sectors - set max sectors for a single discard 282 * @q: the request queue for the device 283 * @max_discard_sectors: maximum number of sectors to discard 284 **/ 285 void blk_queue_max_discard_sectors(struct request_queue *q, 286 unsigned int max_discard_sectors) 287 { 288 q->limits.max_hw_discard_sectors = max_discard_sectors; 289 q->limits.max_discard_sectors = max_discard_sectors; 290 } 291 EXPORT_SYMBOL(blk_queue_max_discard_sectors); 292 293 /** 294 * blk_queue_max_write_same_sectors - set max sectors for a single write same 295 * @q: the request queue for the device 296 * @max_write_same_sectors: maximum number of sectors to write per command 297 **/ 298 void blk_queue_max_write_same_sectors(struct request_queue *q, 299 unsigned int max_write_same_sectors) 300 { 301 q->limits.max_write_same_sectors = max_write_same_sectors; 302 } 303 EXPORT_SYMBOL(blk_queue_max_write_same_sectors); 304 305 /** 306 * blk_queue_max_segments - set max hw segments for a request for this queue 307 * @q: the request queue for the device 308 * @max_segments: max number of segments 309 * 310 * Description: 311 * Enables a low level driver to set an upper limit on the number of 312 * hw data segments in a request. 313 **/ 314 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) 315 { 316 if (!max_segments) { 317 max_segments = 1; 318 printk(KERN_INFO "%s: set to minimum %d\n", 319 __func__, max_segments); 320 } 321 322 q->limits.max_segments = max_segments; 323 } 324 EXPORT_SYMBOL(blk_queue_max_segments); 325 326 /** 327 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg 328 * @q: the request queue for the device 329 * @max_size: max size of segment in bytes 330 * 331 * Description: 332 * Enables a low level driver to set an upper limit on the size of a 333 * coalesced segment 334 **/ 335 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 336 { 337 if (max_size < PAGE_CACHE_SIZE) { 338 max_size = PAGE_CACHE_SIZE; 339 printk(KERN_INFO "%s: set to minimum %d\n", 340 __func__, max_size); 341 } 342 343 q->limits.max_segment_size = max_size; 344 } 345 EXPORT_SYMBOL(blk_queue_max_segment_size); 346 347 /** 348 * blk_queue_logical_block_size - set logical block size for the queue 349 * @q: the request queue for the device 350 * @size: the logical block size, in bytes 351 * 352 * Description: 353 * This should be set to the lowest possible block size that the 354 * storage device can address. The default of 512 covers most 355 * hardware. 356 **/ 357 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) 358 { 359 q->limits.logical_block_size = size; 360 361 if (q->limits.physical_block_size < size) 362 q->limits.physical_block_size = size; 363 364 if (q->limits.io_min < q->limits.physical_block_size) 365 q->limits.io_min = q->limits.physical_block_size; 366 } 367 EXPORT_SYMBOL(blk_queue_logical_block_size); 368 369 /** 370 * blk_queue_physical_block_size - set physical block size for the queue 371 * @q: the request queue for the device 372 * @size: the physical block size, in bytes 373 * 374 * Description: 375 * This should be set to the lowest possible sector size that the 376 * hardware can operate on without reverting to read-modify-write 377 * operations. 378 */ 379 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) 380 { 381 q->limits.physical_block_size = size; 382 383 if (q->limits.physical_block_size < q->limits.logical_block_size) 384 q->limits.physical_block_size = q->limits.logical_block_size; 385 386 if (q->limits.io_min < q->limits.physical_block_size) 387 q->limits.io_min = q->limits.physical_block_size; 388 } 389 EXPORT_SYMBOL(blk_queue_physical_block_size); 390 391 /** 392 * blk_queue_alignment_offset - set physical block alignment offset 393 * @q: the request queue for the device 394 * @offset: alignment offset in bytes 395 * 396 * Description: 397 * Some devices are naturally misaligned to compensate for things like 398 * the legacy DOS partition table 63-sector offset. Low-level drivers 399 * should call this function for devices whose first sector is not 400 * naturally aligned. 401 */ 402 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) 403 { 404 q->limits.alignment_offset = 405 offset & (q->limits.physical_block_size - 1); 406 q->limits.misaligned = 0; 407 } 408 EXPORT_SYMBOL(blk_queue_alignment_offset); 409 410 /** 411 * blk_limits_io_min - set minimum request size for a device 412 * @limits: the queue limits 413 * @min: smallest I/O size in bytes 414 * 415 * Description: 416 * Some devices have an internal block size bigger than the reported 417 * hardware sector size. This function can be used to signal the 418 * smallest I/O the device can perform without incurring a performance 419 * penalty. 420 */ 421 void blk_limits_io_min(struct queue_limits *limits, unsigned int min) 422 { 423 limits->io_min = min; 424 425 if (limits->io_min < limits->logical_block_size) 426 limits->io_min = limits->logical_block_size; 427 428 if (limits->io_min < limits->physical_block_size) 429 limits->io_min = limits->physical_block_size; 430 } 431 EXPORT_SYMBOL(blk_limits_io_min); 432 433 /** 434 * blk_queue_io_min - set minimum request size for the queue 435 * @q: the request queue for the device 436 * @min: smallest I/O size in bytes 437 * 438 * Description: 439 * Storage devices may report a granularity or preferred minimum I/O 440 * size which is the smallest request the device can perform without 441 * incurring a performance penalty. For disk drives this is often the 442 * physical block size. For RAID arrays it is often the stripe chunk 443 * size. A properly aligned multiple of minimum_io_size is the 444 * preferred request size for workloads where a high number of I/O 445 * operations is desired. 446 */ 447 void blk_queue_io_min(struct request_queue *q, unsigned int min) 448 { 449 blk_limits_io_min(&q->limits, min); 450 } 451 EXPORT_SYMBOL(blk_queue_io_min); 452 453 /** 454 * blk_limits_io_opt - set optimal request size for a device 455 * @limits: the queue limits 456 * @opt: smallest I/O size in bytes 457 * 458 * Description: 459 * Storage devices may report an optimal I/O size, which is the 460 * device's preferred unit for sustained I/O. This is rarely reported 461 * for disk drives. For RAID arrays it is usually the stripe width or 462 * the internal track size. A properly aligned multiple of 463 * optimal_io_size is the preferred request size for workloads where 464 * sustained throughput is desired. 465 */ 466 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) 467 { 468 limits->io_opt = opt; 469 } 470 EXPORT_SYMBOL(blk_limits_io_opt); 471 472 /** 473 * blk_queue_io_opt - set optimal request size for the queue 474 * @q: the request queue for the device 475 * @opt: optimal request size in bytes 476 * 477 * Description: 478 * Storage devices may report an optimal I/O size, which is the 479 * device's preferred unit for sustained I/O. This is rarely reported 480 * for disk drives. For RAID arrays it is usually the stripe width or 481 * the internal track size. A properly aligned multiple of 482 * optimal_io_size is the preferred request size for workloads where 483 * sustained throughput is desired. 484 */ 485 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 486 { 487 blk_limits_io_opt(&q->limits, opt); 488 } 489 EXPORT_SYMBOL(blk_queue_io_opt); 490 491 /** 492 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers 493 * @t: the stacking driver (top) 494 * @b: the underlying device (bottom) 495 **/ 496 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 497 { 498 blk_stack_limits(&t->limits, &b->limits, 0); 499 } 500 EXPORT_SYMBOL(blk_queue_stack_limits); 501 502 /** 503 * blk_stack_limits - adjust queue_limits for stacked devices 504 * @t: the stacking driver limits (top device) 505 * @b: the underlying queue limits (bottom, component device) 506 * @start: first data sector within component device 507 * 508 * Description: 509 * This function is used by stacking drivers like MD and DM to ensure 510 * that all component devices have compatible block sizes and 511 * alignments. The stacking driver must provide a queue_limits 512 * struct (top) and then iteratively call the stacking function for 513 * all component (bottom) devices. The stacking function will 514 * attempt to combine the values and ensure proper alignment. 515 * 516 * Returns 0 if the top and bottom queue_limits are compatible. The 517 * top device's block sizes and alignment offsets may be adjusted to 518 * ensure alignment with the bottom device. If no compatible sizes 519 * and alignments exist, -1 is returned and the resulting top 520 * queue_limits will have the misaligned flag set to indicate that 521 * the alignment_offset is undefined. 522 */ 523 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 524 sector_t start) 525 { 526 unsigned int top, bottom, alignment, ret = 0; 527 528 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 529 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 530 t->max_write_same_sectors = min(t->max_write_same_sectors, 531 b->max_write_same_sectors); 532 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); 533 534 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 535 b->seg_boundary_mask); 536 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, 537 b->virt_boundary_mask); 538 539 t->max_segments = min_not_zero(t->max_segments, b->max_segments); 540 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, 541 b->max_integrity_segments); 542 543 t->max_segment_size = min_not_zero(t->max_segment_size, 544 b->max_segment_size); 545 546 t->misaligned |= b->misaligned; 547 548 alignment = queue_limit_alignment_offset(b, start); 549 550 /* Bottom device has different alignment. Check that it is 551 * compatible with the current top alignment. 552 */ 553 if (t->alignment_offset != alignment) { 554 555 top = max(t->physical_block_size, t->io_min) 556 + t->alignment_offset; 557 bottom = max(b->physical_block_size, b->io_min) + alignment; 558 559 /* Verify that top and bottom intervals line up */ 560 if (max(top, bottom) % min(top, bottom)) { 561 t->misaligned = 1; 562 ret = -1; 563 } 564 } 565 566 t->logical_block_size = max(t->logical_block_size, 567 b->logical_block_size); 568 569 t->physical_block_size = max(t->physical_block_size, 570 b->physical_block_size); 571 572 t->io_min = max(t->io_min, b->io_min); 573 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); 574 575 t->cluster &= b->cluster; 576 t->discard_zeroes_data &= b->discard_zeroes_data; 577 578 /* Physical block size a multiple of the logical block size? */ 579 if (t->physical_block_size & (t->logical_block_size - 1)) { 580 t->physical_block_size = t->logical_block_size; 581 t->misaligned = 1; 582 ret = -1; 583 } 584 585 /* Minimum I/O a multiple of the physical block size? */ 586 if (t->io_min & (t->physical_block_size - 1)) { 587 t->io_min = t->physical_block_size; 588 t->misaligned = 1; 589 ret = -1; 590 } 591 592 /* Optimal I/O a multiple of the physical block size? */ 593 if (t->io_opt & (t->physical_block_size - 1)) { 594 t->io_opt = 0; 595 t->misaligned = 1; 596 ret = -1; 597 } 598 599 t->raid_partial_stripes_expensive = 600 max(t->raid_partial_stripes_expensive, 601 b->raid_partial_stripes_expensive); 602 603 /* Find lowest common alignment_offset */ 604 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) 605 % max(t->physical_block_size, t->io_min); 606 607 /* Verify that new alignment_offset is on a logical block boundary */ 608 if (t->alignment_offset & (t->logical_block_size - 1)) { 609 t->misaligned = 1; 610 ret = -1; 611 } 612 613 /* Discard alignment and granularity */ 614 if (b->discard_granularity) { 615 alignment = queue_limit_discard_alignment(b, start); 616 617 if (t->discard_granularity != 0 && 618 t->discard_alignment != alignment) { 619 top = t->discard_granularity + t->discard_alignment; 620 bottom = b->discard_granularity + alignment; 621 622 /* Verify that top and bottom intervals line up */ 623 if ((max(top, bottom) % min(top, bottom)) != 0) 624 t->discard_misaligned = 1; 625 } 626 627 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, 628 b->max_discard_sectors); 629 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, 630 b->max_hw_discard_sectors); 631 t->discard_granularity = max(t->discard_granularity, 632 b->discard_granularity); 633 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % 634 t->discard_granularity; 635 } 636 637 return ret; 638 } 639 EXPORT_SYMBOL(blk_stack_limits); 640 641 /** 642 * bdev_stack_limits - adjust queue limits for stacked drivers 643 * @t: the stacking driver limits (top device) 644 * @bdev: the component block_device (bottom) 645 * @start: first data sector within component device 646 * 647 * Description: 648 * Merges queue limits for a top device and a block_device. Returns 649 * 0 if alignment didn't change. Returns -1 if adding the bottom 650 * device caused misalignment. 651 */ 652 int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 653 sector_t start) 654 { 655 struct request_queue *bq = bdev_get_queue(bdev); 656 657 start += get_start_sect(bdev); 658 659 return blk_stack_limits(t, &bq->limits, start); 660 } 661 EXPORT_SYMBOL(bdev_stack_limits); 662 663 /** 664 * disk_stack_limits - adjust queue limits for stacked drivers 665 * @disk: MD/DM gendisk (top) 666 * @bdev: the underlying block device (bottom) 667 * @offset: offset to beginning of data within component device 668 * 669 * Description: 670 * Merges the limits for a top level gendisk and a bottom level 671 * block_device. 672 */ 673 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 674 sector_t offset) 675 { 676 struct request_queue *t = disk->queue; 677 678 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { 679 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; 680 681 disk_name(disk, 0, top); 682 bdevname(bdev, bottom); 683 684 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 685 top, bottom); 686 } 687 } 688 EXPORT_SYMBOL(disk_stack_limits); 689 690 /** 691 * blk_queue_dma_pad - set pad mask 692 * @q: the request queue for the device 693 * @mask: pad mask 694 * 695 * Set dma pad mask. 696 * 697 * Appending pad buffer to a request modifies the last entry of a 698 * scatter list such that it includes the pad buffer. 699 **/ 700 void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) 701 { 702 q->dma_pad_mask = mask; 703 } 704 EXPORT_SYMBOL(blk_queue_dma_pad); 705 706 /** 707 * blk_queue_update_dma_pad - update pad mask 708 * @q: the request queue for the device 709 * @mask: pad mask 710 * 711 * Update dma pad mask. 712 * 713 * Appending pad buffer to a request modifies the last entry of a 714 * scatter list such that it includes the pad buffer. 715 **/ 716 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) 717 { 718 if (mask > q->dma_pad_mask) 719 q->dma_pad_mask = mask; 720 } 721 EXPORT_SYMBOL(blk_queue_update_dma_pad); 722 723 /** 724 * blk_queue_dma_drain - Set up a drain buffer for excess dma. 725 * @q: the request queue for the device 726 * @dma_drain_needed: fn which returns non-zero if drain is necessary 727 * @buf: physically contiguous buffer 728 * @size: size of the buffer in bytes 729 * 730 * Some devices have excess DMA problems and can't simply discard (or 731 * zero fill) the unwanted piece of the transfer. They have to have a 732 * real area of memory to transfer it into. The use case for this is 733 * ATAPI devices in DMA mode. If the packet command causes a transfer 734 * bigger than the transfer size some HBAs will lock up if there 735 * aren't DMA elements to contain the excess transfer. What this API 736 * does is adjust the queue so that the buf is always appended 737 * silently to the scatterlist. 738 * 739 * Note: This routine adjusts max_hw_segments to make room for appending 740 * the drain buffer. If you call blk_queue_max_segments() after calling 741 * this routine, you must set the limit to one fewer than your device 742 * can support otherwise there won't be room for the drain buffer. 743 */ 744 int blk_queue_dma_drain(struct request_queue *q, 745 dma_drain_needed_fn *dma_drain_needed, 746 void *buf, unsigned int size) 747 { 748 if (queue_max_segments(q) < 2) 749 return -EINVAL; 750 /* make room for appending the drain */ 751 blk_queue_max_segments(q, queue_max_segments(q) - 1); 752 q->dma_drain_needed = dma_drain_needed; 753 q->dma_drain_buffer = buf; 754 q->dma_drain_size = size; 755 756 return 0; 757 } 758 EXPORT_SYMBOL_GPL(blk_queue_dma_drain); 759 760 /** 761 * blk_queue_segment_boundary - set boundary rules for segment merging 762 * @q: the request queue for the device 763 * @mask: the memory boundary mask 764 **/ 765 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 766 { 767 if (mask < PAGE_CACHE_SIZE - 1) { 768 mask = PAGE_CACHE_SIZE - 1; 769 printk(KERN_INFO "%s: set to minimum %lx\n", 770 __func__, mask); 771 } 772 773 q->limits.seg_boundary_mask = mask; 774 } 775 EXPORT_SYMBOL(blk_queue_segment_boundary); 776 777 /** 778 * blk_queue_virt_boundary - set boundary rules for bio merging 779 * @q: the request queue for the device 780 * @mask: the memory boundary mask 781 **/ 782 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) 783 { 784 q->limits.virt_boundary_mask = mask; 785 } 786 EXPORT_SYMBOL(blk_queue_virt_boundary); 787 788 /** 789 * blk_queue_dma_alignment - set dma length and memory alignment 790 * @q: the request queue for the device 791 * @mask: alignment mask 792 * 793 * description: 794 * set required memory and length alignment for direct dma transactions. 795 * this is used when building direct io requests for the queue. 796 * 797 **/ 798 void blk_queue_dma_alignment(struct request_queue *q, int mask) 799 { 800 q->dma_alignment = mask; 801 } 802 EXPORT_SYMBOL(blk_queue_dma_alignment); 803 804 /** 805 * blk_queue_update_dma_alignment - update dma length and memory alignment 806 * @q: the request queue for the device 807 * @mask: alignment mask 808 * 809 * description: 810 * update required memory and length alignment for direct dma transactions. 811 * If the requested alignment is larger than the current alignment, then 812 * the current queue alignment is updated to the new value, otherwise it 813 * is left alone. The design of this is to allow multiple objects 814 * (driver, device, transport etc) to set their respective 815 * alignments without having them interfere. 816 * 817 **/ 818 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) 819 { 820 BUG_ON(mask > PAGE_SIZE); 821 822 if (mask > q->dma_alignment) 823 q->dma_alignment = mask; 824 } 825 EXPORT_SYMBOL(blk_queue_update_dma_alignment); 826 827 /** 828 * blk_queue_flush - configure queue's cache flush capability 829 * @q: the request queue for the device 830 * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA 831 * 832 * Tell block layer cache flush capability of @q. If it supports 833 * flushing, REQ_FLUSH should be set. If it supports bypassing 834 * write cache for individual writes, REQ_FUA should be set. 835 */ 836 void blk_queue_flush(struct request_queue *q, unsigned int flush) 837 { 838 WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); 839 840 if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) 841 flush &= ~REQ_FUA; 842 843 q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); 844 } 845 EXPORT_SYMBOL_GPL(blk_queue_flush); 846 847 void blk_queue_flush_queueable(struct request_queue *q, bool queueable) 848 { 849 q->flush_not_queueable = !queueable; 850 } 851 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); 852 853 static int __init blk_settings_init(void) 854 { 855 blk_max_low_pfn = max_low_pfn - 1; 856 blk_max_pfn = max_pfn - 1; 857 return 0; 858 } 859 subsys_initcall(blk_settings_init); 860