1 /* 2 * Functions related to mapping data to requests 3 */ 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/bio.h> 7 #include <linux/blkdev.h> 8 #include <scsi/sg.h> /* for struct sg_iovec */ 9 10 #include "blk.h" 11 12 int blk_rq_append_bio(struct request_queue *q, struct request *rq, 13 struct bio *bio) 14 { 15 if (!rq->bio) 16 blk_rq_bio_prep(q, rq, bio); 17 else if (!ll_back_merge_fn(q, rq, bio)) 18 return -EINVAL; 19 else { 20 rq->biotail->bi_next = bio; 21 rq->biotail = bio; 22 23 rq->data_len += bio->bi_size; 24 } 25 return 0; 26 } 27 EXPORT_SYMBOL(blk_rq_append_bio); 28 29 static int __blk_rq_unmap_user(struct bio *bio) 30 { 31 int ret = 0; 32 33 if (bio) { 34 if (bio_flagged(bio, BIO_USER_MAPPED)) 35 bio_unmap_user(bio); 36 else 37 ret = bio_uncopy_user(bio); 38 } 39 40 return ret; 41 } 42 43 static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 44 void __user *ubuf, unsigned int len) 45 { 46 unsigned long uaddr; 47 unsigned int alignment; 48 struct bio *bio, *orig_bio; 49 int reading, ret; 50 51 reading = rq_data_dir(rq) == READ; 52 53 /* 54 * if alignment requirement is satisfied, map in user pages for 55 * direct dma. else, set up kernel bounce buffers 56 */ 57 uaddr = (unsigned long) ubuf; 58 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 59 if (!(uaddr & alignment) && !(len & alignment)) 60 bio = bio_map_user(q, NULL, uaddr, len, reading); 61 else 62 bio = bio_copy_user(q, uaddr, len, reading); 63 64 if (IS_ERR(bio)) 65 return PTR_ERR(bio); 66 67 orig_bio = bio; 68 blk_queue_bounce(q, &bio); 69 70 /* 71 * We link the bounce buffer in and could have to traverse it 72 * later so we have to get a ref to prevent it from being freed 73 */ 74 bio_get(bio); 75 76 ret = blk_rq_append_bio(q, rq, bio); 77 if (!ret) 78 return bio->bi_size; 79 80 /* if it was boucned we must call the end io function */ 81 bio_endio(bio, 0); 82 __blk_rq_unmap_user(orig_bio); 83 bio_put(bio); 84 return ret; 85 } 86 87 /** 88 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage 89 * @q: request queue where request should be inserted 90 * @rq: request structure to fill 91 * @ubuf: the user buffer 92 * @len: length of user data 93 * 94 * Description: 95 * Data will be mapped directly for zero copy io, if possible. Otherwise 96 * a kernel bounce buffer is used. 97 * 98 * A matching blk_rq_unmap_user() must be issued at the end of io, while 99 * still in process context. 100 * 101 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 102 * before being submitted to the device, as pages mapped may be out of 103 * reach. It's the callers responsibility to make sure this happens. The 104 * original bio must be passed back in to blk_rq_unmap_user() for proper 105 * unmapping. 106 */ 107 int blk_rq_map_user(struct request_queue *q, struct request *rq, 108 void __user *ubuf, unsigned long len) 109 { 110 unsigned long bytes_read = 0; 111 struct bio *bio = NULL; 112 int ret; 113 114 if (len > (q->max_hw_sectors << 9)) 115 return -EINVAL; 116 if (!len || !ubuf) 117 return -EINVAL; 118 119 while (bytes_read != len) { 120 unsigned long map_len, end, start; 121 122 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); 123 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) 124 >> PAGE_SHIFT; 125 start = (unsigned long)ubuf >> PAGE_SHIFT; 126 127 /* 128 * A bad offset could cause us to require BIO_MAX_PAGES + 1 129 * pages. If this happens we just lower the requested 130 * mapping len by a page so that we can fit 131 */ 132 if (end - start > BIO_MAX_PAGES) 133 map_len -= PAGE_SIZE; 134 135 ret = __blk_rq_map_user(q, rq, ubuf, map_len); 136 if (ret < 0) 137 goto unmap_rq; 138 if (!bio) 139 bio = rq->bio; 140 bytes_read += ret; 141 ubuf += ret; 142 } 143 144 if (!bio_flagged(bio, BIO_USER_MAPPED)) 145 rq->cmd_flags |= REQ_COPY_USER; 146 147 rq->buffer = rq->data = NULL; 148 return 0; 149 unmap_rq: 150 blk_rq_unmap_user(bio); 151 rq->bio = NULL; 152 return ret; 153 } 154 EXPORT_SYMBOL(blk_rq_map_user); 155 156 /** 157 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage 158 * @q: request queue where request should be inserted 159 * @rq: request to map data to 160 * @iov: pointer to the iovec 161 * @iov_count: number of elements in the iovec 162 * @len: I/O byte count 163 * 164 * Description: 165 * Data will be mapped directly for zero copy io, if possible. Otherwise 166 * a kernel bounce buffer is used. 167 * 168 * A matching blk_rq_unmap_user() must be issued at the end of io, while 169 * still in process context. 170 * 171 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 172 * before being submitted to the device, as pages mapped may be out of 173 * reach. It's the callers responsibility to make sure this happens. The 174 * original bio must be passed back in to blk_rq_unmap_user() for proper 175 * unmapping. 176 */ 177 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 178 struct sg_iovec *iov, int iov_count, unsigned int len) 179 { 180 struct bio *bio; 181 int i, read = rq_data_dir(rq) == READ; 182 int unaligned = 0; 183 184 if (!iov || iov_count <= 0) 185 return -EINVAL; 186 187 for (i = 0; i < iov_count; i++) { 188 unsigned long uaddr = (unsigned long)iov[i].iov_base; 189 190 if (uaddr & queue_dma_alignment(q)) { 191 unaligned = 1; 192 break; 193 } 194 } 195 196 if (unaligned || (q->dma_pad_mask & len)) 197 bio = bio_copy_user_iov(q, iov, iov_count, read); 198 else 199 bio = bio_map_user_iov(q, NULL, iov, iov_count, read); 200 201 if (IS_ERR(bio)) 202 return PTR_ERR(bio); 203 204 if (bio->bi_size != len) { 205 bio_endio(bio, 0); 206 bio_unmap_user(bio); 207 return -EINVAL; 208 } 209 210 if (!bio_flagged(bio, BIO_USER_MAPPED)) 211 rq->cmd_flags |= REQ_COPY_USER; 212 213 bio_get(bio); 214 blk_rq_bio_prep(q, rq, bio); 215 rq->buffer = rq->data = NULL; 216 return 0; 217 } 218 219 /** 220 * blk_rq_unmap_user - unmap a request with user data 221 * @bio: start of bio list 222 * 223 * Description: 224 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 225 * supply the original rq->bio from the blk_rq_map_user() return, since 226 * the io completion may have changed rq->bio. 227 */ 228 int blk_rq_unmap_user(struct bio *bio) 229 { 230 struct bio *mapped_bio; 231 int ret = 0, ret2; 232 233 while (bio) { 234 mapped_bio = bio; 235 if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 236 mapped_bio = bio->bi_private; 237 238 ret2 = __blk_rq_unmap_user(mapped_bio); 239 if (ret2 && !ret) 240 ret = ret2; 241 242 mapped_bio = bio; 243 bio = bio->bi_next; 244 bio_put(mapped_bio); 245 } 246 247 return ret; 248 } 249 EXPORT_SYMBOL(blk_rq_unmap_user); 250 251 /** 252 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage 253 * @q: request queue where request should be inserted 254 * @rq: request to fill 255 * @kbuf: the kernel buffer 256 * @len: length of user data 257 * @gfp_mask: memory allocation flags 258 * 259 * Description: 260 * Data will be mapped directly if possible. Otherwise a bounce 261 * buffer is used. 262 */ 263 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 264 unsigned int len, gfp_t gfp_mask) 265 { 266 unsigned long kaddr; 267 unsigned int alignment; 268 int reading = rq_data_dir(rq) == READ; 269 int do_copy = 0; 270 struct bio *bio; 271 272 if (len > (q->max_hw_sectors << 9)) 273 return -EINVAL; 274 if (!len || !kbuf) 275 return -EINVAL; 276 277 kaddr = (unsigned long)kbuf; 278 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 279 do_copy = ((kaddr & alignment) || (len & alignment)); 280 281 if (do_copy) 282 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 283 else 284 bio = bio_map_kern(q, kbuf, len, gfp_mask); 285 286 if (IS_ERR(bio)) 287 return PTR_ERR(bio); 288 289 if (rq_data_dir(rq) == WRITE) 290 bio->bi_rw |= (1 << BIO_RW); 291 292 if (do_copy) 293 rq->cmd_flags |= REQ_COPY_USER; 294 295 blk_rq_bio_prep(q, rq, bio); 296 blk_queue_bounce(q, &rq->bio); 297 rq->buffer = rq->data = NULL; 298 return 0; 299 } 300 EXPORT_SYMBOL(blk_rq_map_kern); 301