1 /* 2 * Functions related to mapping data to requests 3 */ 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/bio.h> 7 #include <linux/blkdev.h> 8 #include <scsi/sg.h> /* for struct sg_iovec */ 9 10 #include "blk.h" 11 12 int blk_rq_append_bio(struct request_queue *q, struct request *rq, 13 struct bio *bio) 14 { 15 if (!rq->bio) 16 blk_rq_bio_prep(q, rq, bio); 17 else if (!ll_back_merge_fn(q, rq, bio)) 18 return -EINVAL; 19 else { 20 rq->biotail->bi_next = bio; 21 rq->biotail = bio; 22 23 rq->__data_len += bio->bi_iter.bi_size; 24 } 25 return 0; 26 } 27 28 static int __blk_rq_unmap_user(struct bio *bio) 29 { 30 int ret = 0; 31 32 if (bio) { 33 if (bio_flagged(bio, BIO_USER_MAPPED)) 34 bio_unmap_user(bio); 35 else 36 ret = bio_uncopy_user(bio); 37 } 38 39 return ret; 40 } 41 42 static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 43 struct rq_map_data *map_data, void __user *ubuf, 44 unsigned int len, gfp_t gfp_mask) 45 { 46 unsigned long uaddr; 47 struct bio *bio, *orig_bio; 48 int reading, ret; 49 50 reading = rq_data_dir(rq) == READ; 51 52 /* 53 * if alignment requirement is satisfied, map in user pages for 54 * direct dma. else, set up kernel bounce buffers 55 */ 56 uaddr = (unsigned long) ubuf; 57 if (blk_rq_aligned(q, uaddr, len) && !map_data) 58 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); 59 else 60 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); 61 62 if (IS_ERR(bio)) 63 return PTR_ERR(bio); 64 65 if (map_data && map_data->null_mapped) 66 bio->bi_flags |= (1 << BIO_NULL_MAPPED); 67 68 orig_bio = bio; 69 blk_queue_bounce(q, &bio); 70 71 /* 72 * We link the bounce buffer in and could have to traverse it 73 * later so we have to get a ref to prevent it from being freed 74 */ 75 bio_get(bio); 76 77 ret = blk_rq_append_bio(q, rq, bio); 78 if (!ret) 79 return bio->bi_iter.bi_size; 80 81 /* if it was boucned we must call the end io function */ 82 bio_endio(bio, 0); 83 __blk_rq_unmap_user(orig_bio); 84 bio_put(bio); 85 return ret; 86 } 87 88 /** 89 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage 90 * @q: request queue where request should be inserted 91 * @rq: request structure to fill 92 * @map_data: pointer to the rq_map_data holding pages (if necessary) 93 * @ubuf: the user buffer 94 * @len: length of user data 95 * @gfp_mask: memory allocation flags 96 * 97 * Description: 98 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 99 * a kernel bounce buffer is used. 100 * 101 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 102 * still in process context. 103 * 104 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 105 * before being submitted to the device, as pages mapped may be out of 106 * reach. It's the callers responsibility to make sure this happens. The 107 * original bio must be passed back in to blk_rq_unmap_user() for proper 108 * unmapping. 109 */ 110 int blk_rq_map_user(struct request_queue *q, struct request *rq, 111 struct rq_map_data *map_data, void __user *ubuf, 112 unsigned long len, gfp_t gfp_mask) 113 { 114 unsigned long bytes_read = 0; 115 struct bio *bio = NULL; 116 int ret; 117 118 if (len > (queue_max_hw_sectors(q) << 9)) 119 return -EINVAL; 120 if (!len) 121 return -EINVAL; 122 123 if (!ubuf && (!map_data || !map_data->null_mapped)) 124 return -EINVAL; 125 126 while (bytes_read != len) { 127 unsigned long map_len, end, start; 128 129 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); 130 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) 131 >> PAGE_SHIFT; 132 start = (unsigned long)ubuf >> PAGE_SHIFT; 133 134 /* 135 * A bad offset could cause us to require BIO_MAX_PAGES + 1 136 * pages. If this happens we just lower the requested 137 * mapping len by a page so that we can fit 138 */ 139 if (end - start > BIO_MAX_PAGES) 140 map_len -= PAGE_SIZE; 141 142 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, 143 gfp_mask); 144 if (ret < 0) 145 goto unmap_rq; 146 if (!bio) 147 bio = rq->bio; 148 bytes_read += ret; 149 ubuf += ret; 150 151 if (map_data) 152 map_data->offset += ret; 153 } 154 155 if (!bio_flagged(bio, BIO_USER_MAPPED)) 156 rq->cmd_flags |= REQ_COPY_USER; 157 158 return 0; 159 unmap_rq: 160 blk_rq_unmap_user(bio); 161 rq->bio = NULL; 162 return ret; 163 } 164 EXPORT_SYMBOL(blk_rq_map_user); 165 166 /** 167 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 168 * @q: request queue where request should be inserted 169 * @rq: request to map data to 170 * @map_data: pointer to the rq_map_data holding pages (if necessary) 171 * @iov: pointer to the iovec 172 * @iov_count: number of elements in the iovec 173 * @len: I/O byte count 174 * @gfp_mask: memory allocation flags 175 * 176 * Description: 177 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 178 * a kernel bounce buffer is used. 179 * 180 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 181 * still in process context. 182 * 183 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 184 * before being submitted to the device, as pages mapped may be out of 185 * reach. It's the callers responsibility to make sure this happens. The 186 * original bio must be passed back in to blk_rq_unmap_user() for proper 187 * unmapping. 188 */ 189 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 190 struct rq_map_data *map_data, const struct sg_iovec *iov, 191 int iov_count, unsigned int len, gfp_t gfp_mask) 192 { 193 struct bio *bio; 194 int i, read = rq_data_dir(rq) == READ; 195 int unaligned = 0; 196 197 if (!iov || iov_count <= 0) 198 return -EINVAL; 199 200 for (i = 0; i < iov_count; i++) { 201 unsigned long uaddr = (unsigned long)iov[i].iov_base; 202 203 if (!iov[i].iov_len) 204 return -EINVAL; 205 206 /* 207 * Keep going so we check length of all segments 208 */ 209 if (uaddr & queue_dma_alignment(q)) 210 unaligned = 1; 211 } 212 213 if (unaligned || (q->dma_pad_mask & len) || map_data) 214 bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, 215 gfp_mask); 216 else 217 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); 218 219 if (IS_ERR(bio)) 220 return PTR_ERR(bio); 221 222 if (bio->bi_iter.bi_size != len) { 223 /* 224 * Grab an extra reference to this bio, as bio_unmap_user() 225 * expects to be able to drop it twice as it happens on the 226 * normal IO completion path 227 */ 228 bio_get(bio); 229 bio_endio(bio, 0); 230 __blk_rq_unmap_user(bio); 231 return -EINVAL; 232 } 233 234 if (!bio_flagged(bio, BIO_USER_MAPPED)) 235 rq->cmd_flags |= REQ_COPY_USER; 236 237 blk_queue_bounce(q, &bio); 238 bio_get(bio); 239 blk_rq_bio_prep(q, rq, bio); 240 return 0; 241 } 242 EXPORT_SYMBOL(blk_rq_map_user_iov); 243 244 /** 245 * blk_rq_unmap_user - unmap a request with user data 246 * @bio: start of bio list 247 * 248 * Description: 249 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 250 * supply the original rq->bio from the blk_rq_map_user() return, since 251 * the I/O completion may have changed rq->bio. 252 */ 253 int blk_rq_unmap_user(struct bio *bio) 254 { 255 struct bio *mapped_bio; 256 int ret = 0, ret2; 257 258 while (bio) { 259 mapped_bio = bio; 260 if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 261 mapped_bio = bio->bi_private; 262 263 ret2 = __blk_rq_unmap_user(mapped_bio); 264 if (ret2 && !ret) 265 ret = ret2; 266 267 mapped_bio = bio; 268 bio = bio->bi_next; 269 bio_put(mapped_bio); 270 } 271 272 return ret; 273 } 274 EXPORT_SYMBOL(blk_rq_unmap_user); 275 276 /** 277 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage 278 * @q: request queue where request should be inserted 279 * @rq: request to fill 280 * @kbuf: the kernel buffer 281 * @len: length of user data 282 * @gfp_mask: memory allocation flags 283 * 284 * Description: 285 * Data will be mapped directly if possible. Otherwise a bounce 286 * buffer is used. Can be called multiple times to append multiple 287 * buffers. 288 */ 289 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 290 unsigned int len, gfp_t gfp_mask) 291 { 292 int reading = rq_data_dir(rq) == READ; 293 unsigned long addr = (unsigned long) kbuf; 294 int do_copy = 0; 295 struct bio *bio; 296 int ret; 297 298 if (len > (queue_max_hw_sectors(q) << 9)) 299 return -EINVAL; 300 if (!len || !kbuf) 301 return -EINVAL; 302 303 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); 304 if (do_copy) 305 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 306 else 307 bio = bio_map_kern(q, kbuf, len, gfp_mask); 308 309 if (IS_ERR(bio)) 310 return PTR_ERR(bio); 311 312 if (!reading) 313 bio->bi_rw |= REQ_WRITE; 314 315 if (do_copy) 316 rq->cmd_flags |= REQ_COPY_USER; 317 318 ret = blk_rq_append_bio(q, rq, bio); 319 if (unlikely(ret)) { 320 /* request is too big */ 321 bio_put(bio); 322 return ret; 323 } 324 325 blk_queue_bounce(q, &rq->bio); 326 return 0; 327 } 328 EXPORT_SYMBOL(blk_rq_map_kern); 329