blk-map.c (8c57a5e7b2820f349c95b8c8393fec1e0f4070d2) | blk-map.c (98d61d5b1a65a9df7cb3d9605f5d37d3dbbb4b5e) |
---|---|
1/* 2 * Functions related to mapping data to requests 3 */ 4#include <linux/kernel.h> 5#include <linux/module.h> 6#include <linux/bio.h> 7#include <linux/blkdev.h> 8#include <linux/uio.h> 9 10#include "blk.h" 11 | 1/* 2 * Functions related to mapping data to requests 3 */ 4#include <linux/kernel.h> 5#include <linux/module.h> 6#include <linux/bio.h> 7#include <linux/blkdev.h> 8#include <linux/uio.h> 9 10#include "blk.h" 11 |
12int blk_rq_append_bio(struct request_queue *q, struct request *rq, 13 struct bio *bio) | 12/* 13 * Append a bio to a passthrough request. Only works can be merged into 14 * the request based on the driver constraints. 15 */ 16int blk_rq_append_bio(struct request *rq, struct bio *bio) |
14{ | 17{ |
15 if (!rq->bio) 16 blk_rq_bio_prep(q, rq, bio); 17 else if (!ll_back_merge_fn(q, rq, bio)) 18 return -EINVAL; 19 else { | 18 if (!rq->bio) { 19 blk_rq_bio_prep(rq->q, rq, bio); 20 } else { 21 if (!ll_back_merge_fn(rq->q, rq, bio)) 22 return -EINVAL; 23 |
20 rq->biotail->bi_next = bio; 21 rq->biotail = bio; | 24 rq->biotail->bi_next = bio; 25 rq->biotail = bio; |
22 | |
23 rq->__data_len += bio->bi_iter.bi_size; 24 } | 26 rq->__data_len += bio->bi_iter.bi_size; 27 } |
28 |
|
25 return 0; 26} | 29 return 0; 30} |
31EXPORT_SYMBOL(blk_rq_append_bio); |
|
27 28static int __blk_rq_unmap_user(struct bio *bio) 29{ 30 int ret = 0; 31 32 if (bio) { 33 if (bio_flagged(bio, BIO_USER_MAPPED)) 34 bio_unmap_user(bio); --- 31 unchanged lines hidden (view full) --- 66 blk_queue_bounce(q, &bio); 67 68 /* 69 * We link the bounce buffer in and could have to traverse it 70 * later so we have to get a ref to prevent it from being freed 71 */ 72 bio_get(bio); 73 | 32 33static int __blk_rq_unmap_user(struct bio *bio) 34{ 35 int ret = 0; 36 37 if (bio) { 38 if (bio_flagged(bio, BIO_USER_MAPPED)) 39 bio_unmap_user(bio); --- 31 unchanged lines hidden (view full) --- 71 blk_queue_bounce(q, &bio); 72 73 /* 74 * We link the bounce buffer in and could have to traverse it 75 * later so we have to get a ref to prevent it from being freed 76 */ 77 bio_get(bio); 78 |
74 ret = blk_rq_append_bio(q, rq, bio); | 79 ret = blk_rq_append_bio(rq, bio); |
75 if (ret) { 76 bio_endio(bio); 77 __blk_rq_unmap_user(orig_bio); 78 bio_put(bio); 79 return ret; 80 } 81 82 return 0; --- 136 unchanged lines hidden (view full) --- 219 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 220 else 221 bio = bio_map_kern(q, kbuf, len, gfp_mask); 222 223 if (IS_ERR(bio)) 224 return PTR_ERR(bio); 225 226 if (!reading) | 80 if (ret) { 81 bio_endio(bio); 82 __blk_rq_unmap_user(orig_bio); 83 bio_put(bio); 84 return ret; 85 } 86 87 return 0; --- 136 unchanged lines hidden (view full) --- 224 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 225 else 226 bio = bio_map_kern(q, kbuf, len, gfp_mask); 227 228 if (IS_ERR(bio)) 229 return PTR_ERR(bio); 230 231 if (!reading) |
227 bio->bi_rw |= REQ_WRITE; | 232 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
228 229 if (do_copy) 230 rq->cmd_flags |= REQ_COPY_USER; 231 | 233 234 if (do_copy) 235 rq->cmd_flags |= REQ_COPY_USER; 236 |
232 ret = blk_rq_append_bio(q, rq, bio); | 237 ret = blk_rq_append_bio(rq, bio); |
233 if (unlikely(ret)) { 234 /* request is too big */ 235 bio_put(bio); 236 return ret; 237 } 238 239 blk_queue_bounce(q, &rq->bio); 240 return 0; 241} 242EXPORT_SYMBOL(blk_rq_map_kern); | 238 if (unlikely(ret)) { 239 /* request is too big */ 240 bio_put(bio); 241 return ret; 242 } 243 244 blk_queue_bounce(q, &rq->bio); 245 return 0; 246} 247EXPORT_SYMBOL(blk_rq_map_kern); |