blk-mq.h (edd1dbc83b1de3b98590b76e09b86ddf6887fce7) blk-mq.h (6deacb3bfac2b720e707c566549a7041f17db9c8)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
5#include "blk-stat.h"
6#include "blk-mq-tag.h"
7
8struct blk_mq_tag_set;

--- 280 unchanged lines hidden (view full) ---

289 int cpu;
290
291 for_each_possible_cpu(cpu)
292 qmap->mq_map[cpu] = 0;
293}
294
295/*
296 * blk_mq_plug() - Get caller context plug
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
5#include "blk-stat.h"
6#include "blk-mq-tag.h"
7
8struct blk_mq_tag_set;

--- 280 unchanged lines hidden (view full) ---

289 int cpu;
290
291 for_each_possible_cpu(cpu)
292 qmap->mq_map[cpu] = 0;
293}
294
295/*
296 * blk_mq_plug() - Get caller context plug
297 * @q: request queue
298 * @bio : the bio being submitted by the caller context
299 *
300 * Plugging, by design, may delay the insertion of BIOs into the elevator in
301 * order to increase BIO merging opportunities. This however can cause BIO
302 * insertion order to change from the order in which submit_bio() is being
303 * executed in the case of multiple contexts concurrently issuing BIOs to a
304 * device, even if these context are synchronized to tightly control BIO issuing
305 * order. While this is not a problem with regular block devices, this ordering
306 * change can cause write BIO failures with zoned block devices as these
307 * require sequential write patterns to zones. Prevent this from happening by
297 * @bio : the bio being submitted by the caller context
298 *
299 * Plugging, by design, may delay the insertion of BIOs into the elevator in
300 * order to increase BIO merging opportunities. This however can cause BIO
301 * insertion order to change from the order in which submit_bio() is being
302 * executed in the case of multiple contexts concurrently issuing BIOs to a
303 * device, even if these context are synchronized to tightly control BIO issuing
304 * order. While this is not a problem with regular block devices, this ordering
305 * change can cause write BIO failures with zoned block devices as these
306 * require sequential write patterns to zones. Prevent this from happening by
308 * ignoring the plug state of a BIO issuing context if the target request queue
309 * is for a zoned block device and the BIO to plug is a write operation.
307 * ignoring the plug state of a BIO issuing context if it is for a zoned block
308 * device and the BIO to plug is a write operation.
310 *
311 * Return current->plug if the bio can be plugged and NULL otherwise
312 */
309 *
310 * Return current->plug if the bio can be plugged and NULL otherwise
311 */
313static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
314 struct bio *bio)
312static inline struct blk_plug *blk_mq_plug( struct bio *bio)
315{
313{
314 /* Zoned block device write operation case: do not plug the BIO */
315 if (bdev_is_zoned(bio->bi_bdev) && op_is_write(bio_op(bio)))
316 return NULL;
317
316 /*
317 * For regular block devices or read operations, use the context plug
318 * which may be NULL if blk_start_plug() was not executed.
319 */
318 /*
319 * For regular block devices or read operations, use the context plug
320 * which may be NULL if blk_start_plug() was not executed.
321 */
320 if (!bdev_is_zoned(bio->bi_bdev) || !op_is_write(bio_op(bio)))
321 return current->plug;
322
323 /* Zoned block device write operation case: do not plug the BIO */
324 return NULL;
322 return current->plug;
325}
326
327/* Free all requests on the list */
328static inline void blk_mq_free_requests(struct list_head *list)
329{
330 while (!list_empty(list)) {
331 struct request *rq = list_entry_rq(list->next);
332

--- 66 unchanged lines hidden ---
323}
324
325/* Free all requests on the list */
326static inline void blk_mq_free_requests(struct list_head *list)
327{
328 while (!list_empty(list)) {
329 struct request *rq = list_entry_rq(list->next);
330

--- 66 unchanged lines hidden ---