blk-map.c (144177991ca624841ddbd1e7edff958fc0f6d1fe) blk-map.c (a45dc2d2b8d1afa57c91dcfac224e50ffcd3f805)
1/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <scsi/sg.h> /* for struct sg_iovec */

--- 40 unchanged lines hidden (view full) ---

49
50 reading = rq_data_dir(rq) == READ;
51
52 /*
53 * if alignment requirement is satisfied, map in user pages for
54 * direct dma. else, set up kernel bounce buffers
55 */
56 uaddr = (unsigned long) ubuf;
1/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <scsi/sg.h> /* for struct sg_iovec */

--- 40 unchanged lines hidden (view full) ---

49
50 reading = rq_data_dir(rq) == READ;
51
52 /*
53 * if alignment requirement is satisfied, map in user pages for
54 * direct dma. else, set up kernel bounce buffers
55 */
56 uaddr = (unsigned long) ubuf;
57 if (blk_rq_aligned(q, uaddr, len) && !map_data)
57 if (blk_rq_aligned(q, ubuf, len) && !map_data)
58 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
59 else
60 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
61
62 if (IS_ERR(bio))
63 return PTR_ERR(bio);
64
65 if (map_data && map_data->null_mapped)

--- 217 unchanged lines hidden (view full) ---

283 * Data will be mapped directly if possible. Otherwise a bounce
284 * buffer is used. Can be called multple times to append multple
285 * buffers.
286 */
287int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
288 unsigned int len, gfp_t gfp_mask)
289{
290 int reading = rq_data_dir(rq) == READ;
58 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
59 else
60 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
61
62 if (IS_ERR(bio))
63 return PTR_ERR(bio);
64
65 if (map_data && map_data->null_mapped)

--- 217 unchanged lines hidden (view full) ---

283 * Data will be mapped directly if possible. Otherwise a bounce
284 * buffer is used. Can be called multple times to append multple
285 * buffers.
286 */
287int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
288 unsigned int len, gfp_t gfp_mask)
289{
290 int reading = rq_data_dir(rq) == READ;
291 unsigned long addr = (unsigned long) kbuf;
292 int do_copy = 0;
293 struct bio *bio;
294 int ret;
295
296 if (len > (queue_max_hw_sectors(q) << 9))
297 return -EINVAL;
298 if (!len || !kbuf)
299 return -EINVAL;
300
291 int do_copy = 0;
292 struct bio *bio;
293 int ret;
294
295 if (len > (queue_max_hw_sectors(q) << 9))
296 return -EINVAL;
297 if (!len || !kbuf)
298 return -EINVAL;
299
301 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
300 do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
302 if (do_copy)
303 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
304 else
305 bio = bio_map_kern(q, kbuf, len, gfp_mask);
306
307 if (IS_ERR(bio))
308 return PTR_ERR(bio);
309
310 if (rq_data_dir(rq) == WRITE)
301 if (do_copy)
302 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
303 else
304 bio = bio_map_kern(q, kbuf, len, gfp_mask);
305
306 if (IS_ERR(bio))
307 return PTR_ERR(bio);
308
309 if (rq_data_dir(rq) == WRITE)
311 bio->bi_rw |= (1 << REQ_WRITE);
310 bio->bi_rw |= REQ_WRITE;
312
313 if (do_copy)
314 rq->cmd_flags |= REQ_COPY_USER;
315
316 ret = blk_rq_append_bio(q, rq, bio);
317 if (unlikely(ret)) {
318 /* request is too big */
319 bio_put(bio);
320 return ret;
321 }
322
323 blk_queue_bounce(q, &rq->bio);
324 rq->buffer = NULL;
325 return 0;
326}
327EXPORT_SYMBOL(blk_rq_map_kern);
311
312 if (do_copy)
313 rq->cmd_flags |= REQ_COPY_USER;
314
315 ret = blk_rq_append_bio(q, rq, bio);
316 if (unlikely(ret)) {
317 /* request is too big */
318 bio_put(bio);
319 return ret;
320 }
321
322 blk_queue_bounce(q, &rq->bio);
323 rq->buffer = NULL;
324 return 0;
325}
326EXPORT_SYMBOL(blk_rq_map_kern);