1719c555fSJens Axboe /* bounce buffer handling for block devices 2719c555fSJens Axboe * 3719c555fSJens Axboe * - Split from highmem.c 4719c555fSJens Axboe */ 5719c555fSJens Axboe 6b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7b1de0d13SMitchel Humpherys 8719c555fSJens Axboe #include <linux/mm.h> 9719c555fSJens Axboe #include <linux/export.h> 10719c555fSJens Axboe #include <linux/swap.h> 11719c555fSJens Axboe #include <linux/gfp.h> 12719c555fSJens Axboe #include <linux/bio.h> 13719c555fSJens Axboe #include <linux/pagemap.h> 14719c555fSJens Axboe #include <linux/mempool.h> 15719c555fSJens Axboe #include <linux/blkdev.h> 1666114cadSTejun Heo #include <linux/backing-dev.h> 17719c555fSJens Axboe #include <linux/init.h> 18719c555fSJens Axboe #include <linux/hash.h> 19719c555fSJens Axboe #include <linux/highmem.h> 20719c555fSJens Axboe #include <linux/bootmem.h> 21b1de0d13SMitchel Humpherys #include <linux/printk.h> 22719c555fSJens Axboe #include <asm/tlbflush.h> 23719c555fSJens Axboe 24719c555fSJens Axboe #include <trace/events/block.h> 25719c555fSJens Axboe 26719c555fSJens Axboe #define POOL_SIZE 64 27719c555fSJens Axboe #define ISA_POOL_SIZE 16 28719c555fSJens Axboe 29719c555fSJens Axboe static mempool_t *page_pool, *isa_page_pool; 30719c555fSJens Axboe 31719c555fSJens Axboe #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL) 32719c555fSJens Axboe static __init int init_emergency_pool(void) 33719c555fSJens Axboe { 34719c555fSJens Axboe #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG) 35719c555fSJens Axboe if (max_pfn <= max_low_pfn) 36719c555fSJens Axboe return 0; 37719c555fSJens Axboe #endif 38719c555fSJens Axboe 39719c555fSJens Axboe page_pool = mempool_create_page_pool(POOL_SIZE, 0); 40719c555fSJens Axboe BUG_ON(!page_pool); 41b1de0d13SMitchel Humpherys pr_info("pool size: %d pages\n", POOL_SIZE); 42719c555fSJens Axboe 43719c555fSJens Axboe return 0; 44719c555fSJens Axboe } 45719c555fSJens Axboe 46719c555fSJens Axboe __initcall(init_emergency_pool); 47719c555fSJens Axboe #endif 48719c555fSJens Axboe 49719c555fSJens Axboe #ifdef CONFIG_HIGHMEM 50719c555fSJens Axboe /* 51719c555fSJens Axboe * highmem version, map in to vec 52719c555fSJens Axboe */ 53719c555fSJens Axboe static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) 54719c555fSJens Axboe { 55719c555fSJens Axboe unsigned long flags; 56719c555fSJens Axboe unsigned char *vto; 57719c555fSJens Axboe 58719c555fSJens Axboe local_irq_save(flags); 59719c555fSJens Axboe vto = kmap_atomic(to->bv_page); 60719c555fSJens Axboe memcpy(vto + to->bv_offset, vfrom, to->bv_len); 61719c555fSJens Axboe kunmap_atomic(vto); 62719c555fSJens Axboe local_irq_restore(flags); 63719c555fSJens Axboe } 64719c555fSJens Axboe 65719c555fSJens Axboe #else /* CONFIG_HIGHMEM */ 66719c555fSJens Axboe 67719c555fSJens Axboe #define bounce_copy_vec(to, vfrom) \ 68719c555fSJens Axboe memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) 69719c555fSJens Axboe 70719c555fSJens Axboe #endif /* CONFIG_HIGHMEM */ 71719c555fSJens Axboe 72719c555fSJens Axboe /* 73719c555fSJens Axboe * allocate pages in the DMA region for the ISA pool 74719c555fSJens Axboe */ 75719c555fSJens Axboe static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) 76719c555fSJens Axboe { 77719c555fSJens Axboe return mempool_alloc_pages(gfp_mask | GFP_DMA, data); 78719c555fSJens Axboe } 79719c555fSJens Axboe 80719c555fSJens Axboe /* 81719c555fSJens Axboe * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA 82719c555fSJens Axboe * as the max address, so check if the pool has already been created. 83719c555fSJens Axboe */ 84719c555fSJens Axboe int init_emergency_isa_pool(void) 85719c555fSJens Axboe { 86719c555fSJens Axboe if (isa_page_pool) 87719c555fSJens Axboe return 0; 88719c555fSJens Axboe 89719c555fSJens Axboe isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, 90719c555fSJens Axboe mempool_free_pages, (void *) 0); 91719c555fSJens Axboe BUG_ON(!isa_page_pool); 92719c555fSJens Axboe 93b1de0d13SMitchel Humpherys pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE); 94719c555fSJens Axboe return 0; 95719c555fSJens Axboe } 96719c555fSJens Axboe 97719c555fSJens Axboe /* 98719c555fSJens Axboe * Simple bounce buffer support for highmem pages. Depending on the 99719c555fSJens Axboe * queue gfp mask set, *to may or may not be a highmem page. kmap it 100719c555fSJens Axboe * always, it will do the Right Thing 101719c555fSJens Axboe */ 102719c555fSJens Axboe static void copy_to_high_bio_irq(struct bio *to, struct bio *from) 103719c555fSJens Axboe { 104719c555fSJens Axboe unsigned char *vfrom; 105719c555fSJens Axboe struct bio_vec tovec, *fromvec = from->bi_io_vec; 106719c555fSJens Axboe struct bvec_iter iter; 107719c555fSJens Axboe 108719c555fSJens Axboe bio_for_each_segment(tovec, to, iter) { 109719c555fSJens Axboe if (tovec.bv_page != fromvec->bv_page) { 110719c555fSJens Axboe /* 111719c555fSJens Axboe * fromvec->bv_offset and fromvec->bv_len might have 112719c555fSJens Axboe * been modified by the block layer, so use the original 113719c555fSJens Axboe * copy, bounce_copy_vec already uses tovec->bv_len 114719c555fSJens Axboe */ 115719c555fSJens Axboe vfrom = page_address(fromvec->bv_page) + 116719c555fSJens Axboe tovec.bv_offset; 117719c555fSJens Axboe 118719c555fSJens Axboe bounce_copy_vec(&tovec, vfrom); 119719c555fSJens Axboe flush_dcache_page(tovec.bv_page); 120719c555fSJens Axboe } 121719c555fSJens Axboe 122719c555fSJens Axboe fromvec++; 123719c555fSJens Axboe } 124719c555fSJens Axboe } 125719c555fSJens Axboe 1264246a0b6SChristoph Hellwig static void bounce_end_io(struct bio *bio, mempool_t *pool) 127719c555fSJens Axboe { 128719c555fSJens Axboe struct bio *bio_orig = bio->bi_private; 129719c555fSJens Axboe struct bio_vec *bvec, *org_vec; 130719c555fSJens Axboe int i; 13199451879SMing Lei int start = bio_orig->bi_iter.bi_idx; 132719c555fSJens Axboe 133719c555fSJens Axboe /* 134719c555fSJens Axboe * free up bounce indirect pages used 135719c555fSJens Axboe */ 136719c555fSJens Axboe bio_for_each_segment_all(bvec, bio, i) { 13799451879SMing Lei org_vec = bio_orig->bi_io_vec + i + start; 13899451879SMing Lei 139719c555fSJens Axboe if (bvec->bv_page == org_vec->bv_page) 140719c555fSJens Axboe continue; 141719c555fSJens Axboe 142719c555fSJens Axboe dec_zone_page_state(bvec->bv_page, NR_BOUNCE); 143719c555fSJens Axboe mempool_free(bvec->bv_page, pool); 144719c555fSJens Axboe } 145719c555fSJens Axboe 1464e4cbee9SChristoph Hellwig bio_orig->bi_status = bio->bi_status; 1474246a0b6SChristoph Hellwig bio_endio(bio_orig); 148719c555fSJens Axboe bio_put(bio); 149719c555fSJens Axboe } 150719c555fSJens Axboe 1514246a0b6SChristoph Hellwig static void bounce_end_io_write(struct bio *bio) 152719c555fSJens Axboe { 1534246a0b6SChristoph Hellwig bounce_end_io(bio, page_pool); 154719c555fSJens Axboe } 155719c555fSJens Axboe 1564246a0b6SChristoph Hellwig static void bounce_end_io_write_isa(struct bio *bio) 157719c555fSJens Axboe { 158719c555fSJens Axboe 1594246a0b6SChristoph Hellwig bounce_end_io(bio, isa_page_pool); 160719c555fSJens Axboe } 161719c555fSJens Axboe 1624246a0b6SChristoph Hellwig static void __bounce_end_io_read(struct bio *bio, mempool_t *pool) 163719c555fSJens Axboe { 164719c555fSJens Axboe struct bio *bio_orig = bio->bi_private; 165719c555fSJens Axboe 1664e4cbee9SChristoph Hellwig if (!bio->bi_status) 167719c555fSJens Axboe copy_to_high_bio_irq(bio_orig, bio); 168719c555fSJens Axboe 1694246a0b6SChristoph Hellwig bounce_end_io(bio, pool); 170719c555fSJens Axboe } 171719c555fSJens Axboe 1724246a0b6SChristoph Hellwig static void bounce_end_io_read(struct bio *bio) 173719c555fSJens Axboe { 1744246a0b6SChristoph Hellwig __bounce_end_io_read(bio, page_pool); 175719c555fSJens Axboe } 176719c555fSJens Axboe 1774246a0b6SChristoph Hellwig static void bounce_end_io_read_isa(struct bio *bio) 178719c555fSJens Axboe { 1794246a0b6SChristoph Hellwig __bounce_end_io_read(bio, isa_page_pool); 180719c555fSJens Axboe } 181719c555fSJens Axboe 182719c555fSJens Axboe static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, 183a3ad0a9dSJan Kara mempool_t *pool) 184719c555fSJens Axboe { 185719c555fSJens Axboe struct bio *bio; 186719c555fSJens Axboe int rw = bio_data_dir(*bio_orig); 187719c555fSJens Axboe struct bio_vec *to, from; 188719c555fSJens Axboe struct bvec_iter iter; 189719c555fSJens Axboe unsigned i; 190719c555fSJens Axboe 191719c555fSJens Axboe bio_for_each_segment(from, *bio_orig, iter) 192719c555fSJens Axboe if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) 193719c555fSJens Axboe goto bounce; 194719c555fSJens Axboe 195719c555fSJens Axboe return; 196719c555fSJens Axboe bounce: 197719c555fSJens Axboe bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set); 198719c555fSJens Axboe 199719c555fSJens Axboe bio_for_each_segment_all(to, bio, i) { 200719c555fSJens Axboe struct page *page = to->bv_page; 201719c555fSJens Axboe 202a3ad0a9dSJan Kara if (page_to_pfn(page) <= queue_bounce_pfn(q)) 203719c555fSJens Axboe continue; 204719c555fSJens Axboe 205719c555fSJens Axboe to->bv_page = mempool_alloc(pool, q->bounce_gfp); 206393a3397SWang YanQing inc_zone_page_state(to->bv_page, NR_BOUNCE); 207719c555fSJens Axboe 208719c555fSJens Axboe if (rw == WRITE) { 209719c555fSJens Axboe char *vto, *vfrom; 210719c555fSJens Axboe 211719c555fSJens Axboe flush_dcache_page(page); 212719c555fSJens Axboe 213719c555fSJens Axboe vto = page_address(to->bv_page) + to->bv_offset; 214719c555fSJens Axboe vfrom = kmap_atomic(page) + to->bv_offset; 215719c555fSJens Axboe memcpy(vto, vfrom, to->bv_len); 216719c555fSJens Axboe kunmap_atomic(vfrom); 217719c555fSJens Axboe } 218719c555fSJens Axboe } 219719c555fSJens Axboe 220719c555fSJens Axboe trace_block_bio_bounce(q, *bio_orig); 221719c555fSJens Axboe 222719c555fSJens Axboe bio->bi_flags |= (1 << BIO_BOUNCED); 223719c555fSJens Axboe 224719c555fSJens Axboe if (pool == page_pool) { 225719c555fSJens Axboe bio->bi_end_io = bounce_end_io_write; 226719c555fSJens Axboe if (rw == READ) 227719c555fSJens Axboe bio->bi_end_io = bounce_end_io_read; 228719c555fSJens Axboe } else { 229719c555fSJens Axboe bio->bi_end_io = bounce_end_io_write_isa; 230719c555fSJens Axboe if (rw == READ) 231719c555fSJens Axboe bio->bi_end_io = bounce_end_io_read_isa; 232719c555fSJens Axboe } 233719c555fSJens Axboe 234719c555fSJens Axboe bio->bi_private = *bio_orig; 235719c555fSJens Axboe *bio_orig = bio; 236719c555fSJens Axboe } 237719c555fSJens Axboe 238719c555fSJens Axboe void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) 239719c555fSJens Axboe { 240719c555fSJens Axboe mempool_t *pool; 241719c555fSJens Axboe 242719c555fSJens Axboe /* 243719c555fSJens Axboe * Data-less bio, nothing to bounce 244719c555fSJens Axboe */ 245719c555fSJens Axboe if (!bio_has_data(*bio_orig)) 246719c555fSJens Axboe return; 247719c555fSJens Axboe 248719c555fSJens Axboe /* 249719c555fSJens Axboe * for non-isa bounce case, just check if the bounce pfn is equal 250719c555fSJens Axboe * to or bigger than the highest pfn in the system -- in that case, 251719c555fSJens Axboe * don't waste time iterating over bio segments 252719c555fSJens Axboe */ 253719c555fSJens Axboe if (!(q->bounce_gfp & GFP_DMA)) { 254a3ad0a9dSJan Kara if (queue_bounce_pfn(q) >= blk_max_pfn) 255719c555fSJens Axboe return; 256719c555fSJens Axboe pool = page_pool; 257719c555fSJens Axboe } else { 258719c555fSJens Axboe BUG_ON(!isa_page_pool); 259719c555fSJens Axboe pool = isa_page_pool; 260719c555fSJens Axboe } 261719c555fSJens Axboe 262719c555fSJens Axboe /* 263719c555fSJens Axboe * slow path 264719c555fSJens Axboe */ 265a3ad0a9dSJan Kara __blk_queue_bounce(q, bio_orig, pool); 266719c555fSJens Axboe } 267719c555fSJens Axboe 268719c555fSJens Axboe EXPORT_SYMBOL(blk_queue_bounce); 269