blk-map.c (710027a48ede75428cc68eaa8ae2269b1e356e2c) | blk-map.c (a3bce90edd8f6cafe3f63b1a943800792e830178) |
---|---|
1/* 2 * Functions related to mapping data to requests 3 */ 4#include <linux/kernel.h> 5#include <linux/module.h> 6#include <linux/bio.h> 7#include <linux/blkdev.h> 8#include <scsi/sg.h> /* for struct sg_iovec */ --- 27 unchanged lines hidden (view full) --- 36 else 37 ret = bio_uncopy_user(bio); 38 } 39 40 return ret; 41} 42 43static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | 1/* 2 * Functions related to mapping data to requests 3 */ 4#include <linux/kernel.h> 5#include <linux/module.h> 6#include <linux/bio.h> 7#include <linux/blkdev.h> 8#include <scsi/sg.h> /* for struct sg_iovec */ --- 27 unchanged lines hidden (view full) --- 36 else 37 ret = bio_uncopy_user(bio); 38 } 39 40 return ret; 41} 42 43static int __blk_rq_map_user(struct request_queue *q, struct request *rq, |
44 void __user *ubuf, unsigned int len) | 44 void __user *ubuf, unsigned int len, 45 gfp_t gfp_mask) |
45{ 46 unsigned long uaddr; 47 unsigned int alignment; 48 struct bio *bio, *orig_bio; 49 int reading, ret; 50 51 reading = rq_data_dir(rq) == READ; 52 53 /* 54 * if alignment requirement is satisfied, map in user pages for 55 * direct dma. else, set up kernel bounce buffers 56 */ 57 uaddr = (unsigned long) ubuf; 58 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 59 if (!(uaddr & alignment) && !(len & alignment)) | 46{ 47 unsigned long uaddr; 48 unsigned int alignment; 49 struct bio *bio, *orig_bio; 50 int reading, ret; 51 52 reading = rq_data_dir(rq) == READ; 53 54 /* 55 * if alignment requirement is satisfied, map in user pages for 56 * direct dma. else, set up kernel bounce buffers 57 */ 58 uaddr = (unsigned long) ubuf; 59 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 60 if (!(uaddr & alignment) && !(len & alignment)) |
60 bio = bio_map_user(q, NULL, uaddr, len, reading); | 61 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); |
61 else | 62 else |
62 bio = bio_copy_user(q, uaddr, len, reading); | 63 bio = bio_copy_user(q, uaddr, len, reading, gfp_mask); |
63 64 if (IS_ERR(bio)) 65 return PTR_ERR(bio); 66 67 orig_bio = bio; 68 blk_queue_bounce(q, &bio); 69 70 /* --- 14 unchanged lines hidden (view full) --- 85} 86 87/** 88 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage 89 * @q: request queue where request should be inserted 90 * @rq: request structure to fill 91 * @ubuf: the user buffer 92 * @len: length of user data | 64 65 if (IS_ERR(bio)) 66 return PTR_ERR(bio); 67 68 orig_bio = bio; 69 blk_queue_bounce(q, &bio); 70 71 /* --- 14 unchanged lines hidden (view full) --- 86} 87 88/** 89 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage 90 * @q: request queue where request should be inserted 91 * @rq: request structure to fill 92 * @ubuf: the user buffer 93 * @len: length of user data |
94 * @gfp_mask: memory allocation flags |
|
93 * 94 * Description: 95 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 96 * a kernel bounce buffer is used. 97 * 98 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 99 * still in process context. 100 * 101 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 102 * before being submitted to the device, as pages mapped may be out of 103 * reach. It's the callers responsibility to make sure this happens. The 104 * original bio must be passed back in to blk_rq_unmap_user() for proper 105 * unmapping. 106 */ 107int blk_rq_map_user(struct request_queue *q, struct request *rq, | 95 * 96 * Description: 97 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 98 * a kernel bounce buffer is used. 99 * 100 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 101 * still in process context. 102 * 103 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 104 * before being submitted to the device, as pages mapped may be out of 105 * reach. It's the callers responsibility to make sure this happens. The 106 * original bio must be passed back in to blk_rq_unmap_user() for proper 107 * unmapping. 108 */ 109int blk_rq_map_user(struct request_queue *q, struct request *rq, |
108 void __user *ubuf, unsigned long len) | 110 void __user *ubuf, unsigned long len, gfp_t gfp_mask) |
109{ 110 unsigned long bytes_read = 0; 111 struct bio *bio = NULL; 112 int ret; 113 114 if (len > (q->max_hw_sectors << 9)) 115 return -EINVAL; 116 if (!len || !ubuf) --- 10 unchanged lines hidden (view full) --- 127 /* 128 * A bad offset could cause us to require BIO_MAX_PAGES + 1 129 * pages. If this happens we just lower the requested 130 * mapping len by a page so that we can fit 131 */ 132 if (end - start > BIO_MAX_PAGES) 133 map_len -= PAGE_SIZE; 134 | 111{ 112 unsigned long bytes_read = 0; 113 struct bio *bio = NULL; 114 int ret; 115 116 if (len > (q->max_hw_sectors << 9)) 117 return -EINVAL; 118 if (!len || !ubuf) --- 10 unchanged lines hidden (view full) --- 129 /* 130 * A bad offset could cause us to require BIO_MAX_PAGES + 1 131 * pages. If this happens we just lower the requested 132 * mapping len by a page so that we can fit 133 */ 134 if (end - start > BIO_MAX_PAGES) 135 map_len -= PAGE_SIZE; 136 |
135 ret = __blk_rq_map_user(q, rq, ubuf, map_len); | 137 ret = __blk_rq_map_user(q, rq, ubuf, map_len, gfp_mask); |
136 if (ret < 0) 137 goto unmap_rq; 138 if (!bio) 139 bio = rq->bio; 140 bytes_read += ret; 141 ubuf += ret; 142 } 143 --- 11 unchanged lines hidden (view full) --- 155 156/** 157 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 158 * @q: request queue where request should be inserted 159 * @rq: request to map data to 160 * @iov: pointer to the iovec 161 * @iov_count: number of elements in the iovec 162 * @len: I/O byte count | 138 if (ret < 0) 139 goto unmap_rq; 140 if (!bio) 141 bio = rq->bio; 142 bytes_read += ret; 143 ubuf += ret; 144 } 145 --- 11 unchanged lines hidden (view full) --- 157 158/** 159 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 160 * @q: request queue where request should be inserted 161 * @rq: request to map data to 162 * @iov: pointer to the iovec 163 * @iov_count: number of elements in the iovec 164 * @len: I/O byte count |
165 * @gfp_mask: memory allocation flags |
|
163 * 164 * Description: 165 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 166 * a kernel bounce buffer is used. 167 * 168 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 169 * still in process context. 170 * 171 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 172 * before being submitted to the device, as pages mapped may be out of 173 * reach. It's the callers responsibility to make sure this happens. The 174 * original bio must be passed back in to blk_rq_unmap_user() for proper 175 * unmapping. 176 */ 177int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | 166 * 167 * Description: 168 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 169 * a kernel bounce buffer is used. 170 * 171 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 172 * still in process context. 173 * 174 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 175 * before being submitted to the device, as pages mapped may be out of 176 * reach. It's the callers responsibility to make sure this happens. The 177 * original bio must be passed back in to blk_rq_unmap_user() for proper 178 * unmapping. 179 */ 180int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
178 struct sg_iovec *iov, int iov_count, unsigned int len) | 181 struct sg_iovec *iov, int iov_count, unsigned int len, 182 gfp_t gfp_mask) |
179{ 180 struct bio *bio; 181 int i, read = rq_data_dir(rq) == READ; 182 int unaligned = 0; 183 184 if (!iov || iov_count <= 0) 185 return -EINVAL; 186 187 for (i = 0; i < iov_count; i++) { 188 unsigned long uaddr = (unsigned long)iov[i].iov_base; 189 190 if (uaddr & queue_dma_alignment(q)) { 191 unaligned = 1; 192 break; 193 } 194 } 195 196 if (unaligned || (q->dma_pad_mask & len)) | 183{ 184 struct bio *bio; 185 int i, read = rq_data_dir(rq) == READ; 186 int unaligned = 0; 187 188 if (!iov || iov_count <= 0) 189 return -EINVAL; 190 191 for (i = 0; i < iov_count; i++) { 192 unsigned long uaddr = (unsigned long)iov[i].iov_base; 193 194 if (uaddr & queue_dma_alignment(q)) { 195 unaligned = 1; 196 break; 197 } 198 } 199 200 if (unaligned || (q->dma_pad_mask & len)) |
197 bio = bio_copy_user_iov(q, iov, iov_count, read); | 201 bio = bio_copy_user_iov(q, iov, iov_count, read, gfp_mask); |
198 else | 202 else |
199 bio = bio_map_user_iov(q, NULL, iov, iov_count, read); | 203 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); |
200 201 if (IS_ERR(bio)) 202 return PTR_ERR(bio); 203 204 if (bio->bi_size != len) { 205 bio_endio(bio, 0); 206 bio_unmap_user(bio); 207 return -EINVAL; --- 95 unchanged lines hidden --- | 204 205 if (IS_ERR(bio)) 206 return PTR_ERR(bio); 207 208 if (bio->bi_size != len) { 209 bio_endio(bio, 0); 210 bio_unmap_user(bio); 211 return -EINVAL; --- 95 unchanged lines hidden --- |