1 /* 2 * Functions related to mapping data to requests 3 */ 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/bio.h> 7 #include <linux/blkdev.h> 8 #include <linux/uio.h> 9 10 #include "blk.h" 11 12 /* 13 * Append a bio to a passthrough request. Only works can be merged into 14 * the request based on the driver constraints. 15 */ 16 int blk_rq_append_bio(struct request *rq, struct bio *bio) 17 { 18 if (!rq->bio) { 19 blk_rq_bio_prep(rq->q, rq, bio); 20 } else { 21 if (!ll_back_merge_fn(rq->q, rq, bio)) 22 return -EINVAL; 23 24 rq->biotail->bi_next = bio; 25 rq->biotail = bio; 26 rq->__data_len += bio->bi_iter.bi_size; 27 } 28 29 return 0; 30 } 31 EXPORT_SYMBOL(blk_rq_append_bio); 32 33 static int __blk_rq_unmap_user(struct bio *bio) 34 { 35 int ret = 0; 36 37 if (bio) { 38 if (bio_flagged(bio, BIO_USER_MAPPED)) 39 bio_unmap_user(bio); 40 else 41 ret = bio_uncopy_user(bio); 42 } 43 44 return ret; 45 } 46 47 static int __blk_rq_map_user_iov(struct request *rq, 48 struct rq_map_data *map_data, struct iov_iter *iter, 49 gfp_t gfp_mask, bool copy) 50 { 51 struct request_queue *q = rq->q; 52 struct bio *bio, *orig_bio; 53 int ret; 54 55 if (copy) 56 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); 57 else 58 bio = bio_map_user_iov(q, iter, gfp_mask); 59 60 if (IS_ERR(bio)) 61 return PTR_ERR(bio); 62 63 if (map_data && map_data->null_mapped) 64 bio_set_flag(bio, BIO_NULL_MAPPED); 65 66 iov_iter_advance(iter, bio->bi_iter.bi_size); 67 if (map_data) 68 map_data->offset += bio->bi_iter.bi_size; 69 70 orig_bio = bio; 71 blk_queue_bounce(q, &bio); 72 73 /* 74 * We link the bounce buffer in and could have to traverse it 75 * later so we have to get a ref to prevent it from being freed 76 */ 77 bio_get(bio); 78 79 ret = blk_rq_append_bio(rq, bio); 80 if (ret) { 81 bio_endio(bio); 82 __blk_rq_unmap_user(orig_bio); 83 bio_put(bio); 84 return ret; 85 } 86 87 return 0; 88 } 89 90 /** 91 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 92 * @q: request queue where request should be inserted 93 * @rq: request to map data to 94 * @map_data: pointer to the rq_map_data holding pages (if necessary) 95 * @iter: iovec iterator 96 * @gfp_mask: memory allocation flags 97 * 98 * Description: 99 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 100 * a kernel bounce buffer is used. 101 * 102 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 103 * still in process context. 104 * 105 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 106 * before being submitted to the device, as pages mapped may be out of 107 * reach. It's the callers responsibility to make sure this happens. The 108 * original bio must be passed back in to blk_rq_unmap_user() for proper 109 * unmapping. 110 */ 111 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 112 struct rq_map_data *map_data, 113 const struct iov_iter *iter, gfp_t gfp_mask) 114 { 115 bool copy = false; 116 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); 117 struct bio *bio = NULL; 118 struct iov_iter i; 119 int ret; 120 121 if (map_data) 122 copy = true; 123 else if (iov_iter_alignment(iter) & align) 124 copy = true; 125 else if (queue_virt_boundary(q)) 126 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); 127 128 i = *iter; 129 do { 130 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); 131 if (ret) 132 goto unmap_rq; 133 if (!bio) 134 bio = rq->bio; 135 } while (iov_iter_count(&i)); 136 137 if (!bio_flagged(bio, BIO_USER_MAPPED)) 138 rq->cmd_flags |= REQ_COPY_USER; 139 return 0; 140 141 unmap_rq: 142 __blk_rq_unmap_user(bio); 143 rq->bio = NULL; 144 return -EINVAL; 145 } 146 EXPORT_SYMBOL(blk_rq_map_user_iov); 147 148 int blk_rq_map_user(struct request_queue *q, struct request *rq, 149 struct rq_map_data *map_data, void __user *ubuf, 150 unsigned long len, gfp_t gfp_mask) 151 { 152 struct iovec iov; 153 struct iov_iter i; 154 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); 155 156 if (unlikely(ret < 0)) 157 return ret; 158 159 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 160 } 161 EXPORT_SYMBOL(blk_rq_map_user); 162 163 /** 164 * blk_rq_unmap_user - unmap a request with user data 165 * @bio: start of bio list 166 * 167 * Description: 168 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 169 * supply the original rq->bio from the blk_rq_map_user() return, since 170 * the I/O completion may have changed rq->bio. 171 */ 172 int blk_rq_unmap_user(struct bio *bio) 173 { 174 struct bio *mapped_bio; 175 int ret = 0, ret2; 176 177 while (bio) { 178 mapped_bio = bio; 179 if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 180 mapped_bio = bio->bi_private; 181 182 ret2 = __blk_rq_unmap_user(mapped_bio); 183 if (ret2 && !ret) 184 ret = ret2; 185 186 mapped_bio = bio; 187 bio = bio->bi_next; 188 bio_put(mapped_bio); 189 } 190 191 return ret; 192 } 193 EXPORT_SYMBOL(blk_rq_unmap_user); 194 195 /** 196 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage 197 * @q: request queue where request should be inserted 198 * @rq: request to fill 199 * @kbuf: the kernel buffer 200 * @len: length of user data 201 * @gfp_mask: memory allocation flags 202 * 203 * Description: 204 * Data will be mapped directly if possible. Otherwise a bounce 205 * buffer is used. Can be called multiple times to append multiple 206 * buffers. 207 */ 208 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 209 unsigned int len, gfp_t gfp_mask) 210 { 211 int reading = rq_data_dir(rq) == READ; 212 unsigned long addr = (unsigned long) kbuf; 213 int do_copy = 0; 214 struct bio *bio; 215 int ret; 216 217 if (len > (queue_max_hw_sectors(q) << 9)) 218 return -EINVAL; 219 if (!len || !kbuf) 220 return -EINVAL; 221 222 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); 223 if (do_copy) 224 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 225 else 226 bio = bio_map_kern(q, kbuf, len, gfp_mask); 227 228 if (IS_ERR(bio)) 229 return PTR_ERR(bio); 230 231 if (!reading) 232 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 233 234 if (do_copy) 235 rq->cmd_flags |= REQ_COPY_USER; 236 237 ret = blk_rq_append_bio(rq, bio); 238 if (unlikely(ret)) { 239 /* request is too big */ 240 bio_put(bio); 241 return ret; 242 } 243 244 blk_queue_bounce(q, &rq->bio); 245 return 0; 246 } 247 EXPORT_SYMBOL(blk_rq_map_kern); 248