1 /* 2 * Functions related to mapping data to requests 3 */ 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/bio.h> 7 #include <linux/blkdev.h> 8 #include <scsi/sg.h> /* for struct sg_iovec */ 9 10 #include "blk.h" 11 12 int blk_rq_append_bio(struct request_queue *q, struct request *rq, 13 struct bio *bio) 14 { 15 if (!rq->bio) 16 blk_rq_bio_prep(q, rq, bio); 17 else if (!ll_back_merge_fn(q, rq, bio)) 18 return -EINVAL; 19 else { 20 rq->biotail->bi_next = bio; 21 rq->biotail = bio; 22 23 rq->__data_len += bio->bi_iter.bi_size; 24 } 25 return 0; 26 } 27 28 static int __blk_rq_unmap_user(struct bio *bio) 29 { 30 int ret = 0; 31 32 if (bio) { 33 if (bio_flagged(bio, BIO_USER_MAPPED)) 34 bio_unmap_user(bio); 35 else 36 ret = bio_uncopy_user(bio); 37 } 38 39 return ret; 40 } 41 42 /** 43 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 44 * @q: request queue where request should be inserted 45 * @rq: request to map data to 46 * @map_data: pointer to the rq_map_data holding pages (if necessary) 47 * @iov: pointer to the iovec 48 * @iov_count: number of elements in the iovec 49 * @len: I/O byte count 50 * @gfp_mask: memory allocation flags 51 * 52 * Description: 53 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 54 * a kernel bounce buffer is used. 55 * 56 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 57 * still in process context. 58 * 59 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 60 * before being submitted to the device, as pages mapped may be out of 61 * reach. It's the callers responsibility to make sure this happens. The 62 * original bio must be passed back in to blk_rq_unmap_user() for proper 63 * unmapping. 64 */ 65 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 66 struct rq_map_data *map_data, const struct sg_iovec *iov, 67 int iov_count, unsigned int len, gfp_t gfp_mask) 68 { 69 struct bio *bio; 70 int i, read = rq_data_dir(rq) == READ; 71 int unaligned = 0; 72 73 if (!iov || iov_count <= 0) 74 return -EINVAL; 75 76 for (i = 0; i < iov_count; i++) { 77 unsigned long uaddr = (unsigned long)iov[i].iov_base; 78 79 if (!iov[i].iov_len) 80 return -EINVAL; 81 82 /* 83 * Keep going so we check length of all segments 84 */ 85 if (uaddr & queue_dma_alignment(q)) 86 unaligned = 1; 87 } 88 89 if (unaligned || (q->dma_pad_mask & len) || map_data) 90 bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, 91 gfp_mask); 92 else 93 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); 94 95 if (IS_ERR(bio)) 96 return PTR_ERR(bio); 97 98 if (bio->bi_iter.bi_size != len) { 99 /* 100 * Grab an extra reference to this bio, as bio_unmap_user() 101 * expects to be able to drop it twice as it happens on the 102 * normal IO completion path 103 */ 104 bio_get(bio); 105 bio_endio(bio, 0); 106 __blk_rq_unmap_user(bio); 107 return -EINVAL; 108 } 109 110 if (!bio_flagged(bio, BIO_USER_MAPPED)) 111 rq->cmd_flags |= REQ_COPY_USER; 112 113 blk_queue_bounce(q, &bio); 114 bio_get(bio); 115 blk_rq_bio_prep(q, rq, bio); 116 return 0; 117 } 118 EXPORT_SYMBOL(blk_rq_map_user_iov); 119 120 int blk_rq_map_user(struct request_queue *q, struct request *rq, 121 struct rq_map_data *map_data, void __user *ubuf, 122 unsigned long len, gfp_t gfp_mask) 123 { 124 struct sg_iovec iov; 125 126 iov.iov_base = (void __user *)ubuf; 127 iov.iov_len = len; 128 129 return blk_rq_map_user_iov(q, rq, map_data, &iov, 1, len, gfp_mask); 130 } 131 EXPORT_SYMBOL(blk_rq_map_user); 132 133 /** 134 * blk_rq_unmap_user - unmap a request with user data 135 * @bio: start of bio list 136 * 137 * Description: 138 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 139 * supply the original rq->bio from the blk_rq_map_user() return, since 140 * the I/O completion may have changed rq->bio. 141 */ 142 int blk_rq_unmap_user(struct bio *bio) 143 { 144 struct bio *mapped_bio; 145 int ret = 0, ret2; 146 147 while (bio) { 148 mapped_bio = bio; 149 if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 150 mapped_bio = bio->bi_private; 151 152 ret2 = __blk_rq_unmap_user(mapped_bio); 153 if (ret2 && !ret) 154 ret = ret2; 155 156 mapped_bio = bio; 157 bio = bio->bi_next; 158 bio_put(mapped_bio); 159 } 160 161 return ret; 162 } 163 EXPORT_SYMBOL(blk_rq_unmap_user); 164 165 /** 166 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage 167 * @q: request queue where request should be inserted 168 * @rq: request to fill 169 * @kbuf: the kernel buffer 170 * @len: length of user data 171 * @gfp_mask: memory allocation flags 172 * 173 * Description: 174 * Data will be mapped directly if possible. Otherwise a bounce 175 * buffer is used. Can be called multiple times to append multiple 176 * buffers. 177 */ 178 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 179 unsigned int len, gfp_t gfp_mask) 180 { 181 int reading = rq_data_dir(rq) == READ; 182 unsigned long addr = (unsigned long) kbuf; 183 int do_copy = 0; 184 struct bio *bio; 185 int ret; 186 187 if (len > (queue_max_hw_sectors(q) << 9)) 188 return -EINVAL; 189 if (!len || !kbuf) 190 return -EINVAL; 191 192 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); 193 if (do_copy) 194 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 195 else 196 bio = bio_map_kern(q, kbuf, len, gfp_mask); 197 198 if (IS_ERR(bio)) 199 return PTR_ERR(bio); 200 201 if (!reading) 202 bio->bi_rw |= REQ_WRITE; 203 204 if (do_copy) 205 rq->cmd_flags |= REQ_COPY_USER; 206 207 ret = blk_rq_append_bio(q, rq, bio); 208 if (unlikely(ret)) { 209 /* request is too big */ 210 bio_put(bio); 211 return ret; 212 } 213 214 blk_queue_bounce(q, &rq->bio); 215 return 0; 216 } 217 EXPORT_SYMBOL(blk_rq_map_kern); 218