1d475c634SMatthew Wilcox /* 2d475c634SMatthew Wilcox * fs/dax.c - Direct Access filesystem code 3d475c634SMatthew Wilcox * Copyright (c) 2013-2014 Intel Corporation 4d475c634SMatthew Wilcox * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5d475c634SMatthew Wilcox * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6d475c634SMatthew Wilcox * 7d475c634SMatthew Wilcox * This program is free software; you can redistribute it and/or modify it 8d475c634SMatthew Wilcox * under the terms and conditions of the GNU General Public License, 9d475c634SMatthew Wilcox * version 2, as published by the Free Software Foundation. 10d475c634SMatthew Wilcox * 11d475c634SMatthew Wilcox * This program is distributed in the hope it will be useful, but WITHOUT 12d475c634SMatthew Wilcox * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13d475c634SMatthew Wilcox * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14d475c634SMatthew Wilcox * more details. 15d475c634SMatthew Wilcox */ 16d475c634SMatthew Wilcox 17d475c634SMatthew Wilcox #include <linux/atomic.h> 18d475c634SMatthew Wilcox #include <linux/blkdev.h> 19d475c634SMatthew Wilcox #include <linux/buffer_head.h> 20d77e92e2SRoss Zwisler #include <linux/dax.h> 21d475c634SMatthew Wilcox #include <linux/fs.h> 22d475c634SMatthew Wilcox #include <linux/genhd.h> 23f7ca90b1SMatthew Wilcox #include <linux/highmem.h> 24f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h> 25f7ca90b1SMatthew Wilcox #include <linux/mm.h> 26d475c634SMatthew Wilcox #include <linux/mutex.h> 279973c98eSRoss Zwisler #include <linux/pagevec.h> 282765cfbbSRoss Zwisler #include <linux/pmem.h> 29289c6aedSMatthew Wilcox #include <linux/sched.h> 30d475c634SMatthew Wilcox #include <linux/uio.h> 31f7ca90b1SMatthew Wilcox #include <linux/vmstat.h> 3234c0fd54SDan Williams #include <linux/pfn_t.h> 330e749e54SDan Williams #include <linux/sizes.h> 34a254e568SChristoph Hellwig #include <linux/iomap.h> 35a254e568SChristoph Hellwig #include "internal.h" 36d475c634SMatthew Wilcox 37ac401cc7SJan Kara /* We choose 4096 entries - same as per-zone page wait tables */ 38ac401cc7SJan Kara #define DAX_WAIT_TABLE_BITS 12 39ac401cc7SJan Kara #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 40ac401cc7SJan Kara 41ce95ab0fSRoss Zwisler static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 42ac401cc7SJan Kara 43ac401cc7SJan Kara static int __init init_dax_wait_table(void) 44ac401cc7SJan Kara { 45ac401cc7SJan Kara int i; 46ac401cc7SJan Kara 47ac401cc7SJan Kara for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 48ac401cc7SJan Kara init_waitqueue_head(wait_table + i); 49ac401cc7SJan Kara return 0; 50ac401cc7SJan Kara } 51ac401cc7SJan Kara fs_initcall(init_dax_wait_table); 52ac401cc7SJan Kara 53b2e0d162SDan Williams static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax) 54b2e0d162SDan Williams { 55b2e0d162SDan Williams struct request_queue *q = bdev->bd_queue; 56b2e0d162SDan Williams long rc = -EIO; 57b2e0d162SDan Williams 587a9eb206SDan Williams dax->addr = ERR_PTR(-EIO); 59b2e0d162SDan Williams if (blk_queue_enter(q, true) != 0) 60b2e0d162SDan Williams return rc; 61b2e0d162SDan Williams 62b2e0d162SDan Williams rc = bdev_direct_access(bdev, dax); 63b2e0d162SDan Williams if (rc < 0) { 647a9eb206SDan Williams dax->addr = ERR_PTR(rc); 65b2e0d162SDan Williams blk_queue_exit(q); 66b2e0d162SDan Williams return rc; 67b2e0d162SDan Williams } 68b2e0d162SDan Williams return rc; 69b2e0d162SDan Williams } 70b2e0d162SDan Williams 71b2e0d162SDan Williams static void dax_unmap_atomic(struct block_device *bdev, 72b2e0d162SDan Williams const struct blk_dax_ctl *dax) 73b2e0d162SDan Williams { 74b2e0d162SDan Williams if (IS_ERR(dax->addr)) 75b2e0d162SDan Williams return; 76b2e0d162SDan Williams blk_queue_exit(bdev->bd_queue); 77b2e0d162SDan Williams } 78b2e0d162SDan Williams 79642261acSRoss Zwisler static int dax_is_pmd_entry(void *entry) 80642261acSRoss Zwisler { 81642261acSRoss Zwisler return (unsigned long)entry & RADIX_DAX_PMD; 82642261acSRoss Zwisler } 83642261acSRoss Zwisler 84642261acSRoss Zwisler static int dax_is_pte_entry(void *entry) 85642261acSRoss Zwisler { 86642261acSRoss Zwisler return !((unsigned long)entry & RADIX_DAX_PMD); 87642261acSRoss Zwisler } 88642261acSRoss Zwisler 89642261acSRoss Zwisler static int dax_is_zero_entry(void *entry) 90642261acSRoss Zwisler { 91642261acSRoss Zwisler return (unsigned long)entry & RADIX_DAX_HZP; 92642261acSRoss Zwisler } 93642261acSRoss Zwisler 94642261acSRoss Zwisler static int dax_is_empty_entry(void *entry) 95642261acSRoss Zwisler { 96642261acSRoss Zwisler return (unsigned long)entry & RADIX_DAX_EMPTY; 97642261acSRoss Zwisler } 98642261acSRoss Zwisler 99d1a5f2b4SDan Williams struct page *read_dax_sector(struct block_device *bdev, sector_t n) 100d1a5f2b4SDan Williams { 101d1a5f2b4SDan Williams struct page *page = alloc_pages(GFP_KERNEL, 0); 102d1a5f2b4SDan Williams struct blk_dax_ctl dax = { 103d1a5f2b4SDan Williams .size = PAGE_SIZE, 104d1a5f2b4SDan Williams .sector = n & ~((((int) PAGE_SIZE) / 512) - 1), 105d1a5f2b4SDan Williams }; 106d1a5f2b4SDan Williams long rc; 107d1a5f2b4SDan Williams 108d1a5f2b4SDan Williams if (!page) 109d1a5f2b4SDan Williams return ERR_PTR(-ENOMEM); 110d1a5f2b4SDan Williams 111d1a5f2b4SDan Williams rc = dax_map_atomic(bdev, &dax); 112d1a5f2b4SDan Williams if (rc < 0) 113d1a5f2b4SDan Williams return ERR_PTR(rc); 114d1a5f2b4SDan Williams memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE); 115d1a5f2b4SDan Williams dax_unmap_atomic(bdev, &dax); 116d1a5f2b4SDan Williams return page; 117d1a5f2b4SDan Williams } 118d1a5f2b4SDan Williams 119f7ca90b1SMatthew Wilcox /* 120ac401cc7SJan Kara * DAX radix tree locking 121ac401cc7SJan Kara */ 122ac401cc7SJan Kara struct exceptional_entry_key { 123ac401cc7SJan Kara struct address_space *mapping; 12463e95b5cSRoss Zwisler pgoff_t entry_start; 125ac401cc7SJan Kara }; 126ac401cc7SJan Kara 127ac401cc7SJan Kara struct wait_exceptional_entry_queue { 128ac401cc7SJan Kara wait_queue_t wait; 129ac401cc7SJan Kara struct exceptional_entry_key key; 130ac401cc7SJan Kara }; 131ac401cc7SJan Kara 13263e95b5cSRoss Zwisler static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, 13363e95b5cSRoss Zwisler pgoff_t index, void *entry, struct exceptional_entry_key *key) 13463e95b5cSRoss Zwisler { 13563e95b5cSRoss Zwisler unsigned long hash; 13663e95b5cSRoss Zwisler 13763e95b5cSRoss Zwisler /* 13863e95b5cSRoss Zwisler * If 'entry' is a PMD, align the 'index' that we use for the wait 13963e95b5cSRoss Zwisler * queue to the start of that PMD. This ensures that all offsets in 14063e95b5cSRoss Zwisler * the range covered by the PMD map to the same bit lock. 14163e95b5cSRoss Zwisler */ 142642261acSRoss Zwisler if (dax_is_pmd_entry(entry)) 14363e95b5cSRoss Zwisler index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1); 14463e95b5cSRoss Zwisler 14563e95b5cSRoss Zwisler key->mapping = mapping; 14663e95b5cSRoss Zwisler key->entry_start = index; 14763e95b5cSRoss Zwisler 14863e95b5cSRoss Zwisler hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); 14963e95b5cSRoss Zwisler return wait_table + hash; 15063e95b5cSRoss Zwisler } 15163e95b5cSRoss Zwisler 152ac401cc7SJan Kara static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode, 153ac401cc7SJan Kara int sync, void *keyp) 154ac401cc7SJan Kara { 155ac401cc7SJan Kara struct exceptional_entry_key *key = keyp; 156ac401cc7SJan Kara struct wait_exceptional_entry_queue *ewait = 157ac401cc7SJan Kara container_of(wait, struct wait_exceptional_entry_queue, wait); 158ac401cc7SJan Kara 159ac401cc7SJan Kara if (key->mapping != ewait->key.mapping || 16063e95b5cSRoss Zwisler key->entry_start != ewait->key.entry_start) 161ac401cc7SJan Kara return 0; 162ac401cc7SJan Kara return autoremove_wake_function(wait, mode, sync, NULL); 163ac401cc7SJan Kara } 164ac401cc7SJan Kara 165ac401cc7SJan Kara /* 166ac401cc7SJan Kara * Check whether the given slot is locked. The function must be called with 167ac401cc7SJan Kara * mapping->tree_lock held 168ac401cc7SJan Kara */ 169ac401cc7SJan Kara static inline int slot_locked(struct address_space *mapping, void **slot) 170ac401cc7SJan Kara { 171ac401cc7SJan Kara unsigned long entry = (unsigned long) 172ac401cc7SJan Kara radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 173ac401cc7SJan Kara return entry & RADIX_DAX_ENTRY_LOCK; 174ac401cc7SJan Kara } 175ac401cc7SJan Kara 176ac401cc7SJan Kara /* 177ac401cc7SJan Kara * Mark the given slot is locked. The function must be called with 178ac401cc7SJan Kara * mapping->tree_lock held 179ac401cc7SJan Kara */ 180ac401cc7SJan Kara static inline void *lock_slot(struct address_space *mapping, void **slot) 181ac401cc7SJan Kara { 182ac401cc7SJan Kara unsigned long entry = (unsigned long) 183ac401cc7SJan Kara radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 184ac401cc7SJan Kara 185ac401cc7SJan Kara entry |= RADIX_DAX_ENTRY_LOCK; 1866d75f366SJohannes Weiner radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 187ac401cc7SJan Kara return (void *)entry; 188ac401cc7SJan Kara } 189ac401cc7SJan Kara 190ac401cc7SJan Kara /* 191ac401cc7SJan Kara * Mark the given slot is unlocked. The function must be called with 192ac401cc7SJan Kara * mapping->tree_lock held 193ac401cc7SJan Kara */ 194ac401cc7SJan Kara static inline void *unlock_slot(struct address_space *mapping, void **slot) 195ac401cc7SJan Kara { 196ac401cc7SJan Kara unsigned long entry = (unsigned long) 197ac401cc7SJan Kara radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 198ac401cc7SJan Kara 199ac401cc7SJan Kara entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; 2006d75f366SJohannes Weiner radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 201ac401cc7SJan Kara return (void *)entry; 202ac401cc7SJan Kara } 203ac401cc7SJan Kara 204ac401cc7SJan Kara /* 205ac401cc7SJan Kara * Lookup entry in radix tree, wait for it to become unlocked if it is 206ac401cc7SJan Kara * exceptional entry and return it. The caller must call 207ac401cc7SJan Kara * put_unlocked_mapping_entry() when he decided not to lock the entry or 208ac401cc7SJan Kara * put_locked_mapping_entry() when he locked the entry and now wants to 209ac401cc7SJan Kara * unlock it. 210ac401cc7SJan Kara * 211ac401cc7SJan Kara * The function must be called with mapping->tree_lock held. 212ac401cc7SJan Kara */ 213ac401cc7SJan Kara static void *get_unlocked_mapping_entry(struct address_space *mapping, 214ac401cc7SJan Kara pgoff_t index, void ***slotp) 215ac401cc7SJan Kara { 216e3ad61c6SRoss Zwisler void *entry, **slot; 217ac401cc7SJan Kara struct wait_exceptional_entry_queue ewait; 21863e95b5cSRoss Zwisler wait_queue_head_t *wq; 219ac401cc7SJan Kara 220ac401cc7SJan Kara init_wait(&ewait.wait); 221ac401cc7SJan Kara ewait.wait.func = wake_exceptional_entry_func; 222ac401cc7SJan Kara 223ac401cc7SJan Kara for (;;) { 224e3ad61c6SRoss Zwisler entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, 225ac401cc7SJan Kara &slot); 226e3ad61c6SRoss Zwisler if (!entry || !radix_tree_exceptional_entry(entry) || 227ac401cc7SJan Kara !slot_locked(mapping, slot)) { 228ac401cc7SJan Kara if (slotp) 229ac401cc7SJan Kara *slotp = slot; 230e3ad61c6SRoss Zwisler return entry; 231ac401cc7SJan Kara } 23263e95b5cSRoss Zwisler 23363e95b5cSRoss Zwisler wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 234ac401cc7SJan Kara prepare_to_wait_exclusive(wq, &ewait.wait, 235ac401cc7SJan Kara TASK_UNINTERRUPTIBLE); 236ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 237ac401cc7SJan Kara schedule(); 238ac401cc7SJan Kara finish_wait(wq, &ewait.wait); 239ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 240ac401cc7SJan Kara } 241ac401cc7SJan Kara } 242ac401cc7SJan Kara 243b1aa812bSJan Kara static void dax_unlock_mapping_entry(struct address_space *mapping, 244b1aa812bSJan Kara pgoff_t index) 245b1aa812bSJan Kara { 246b1aa812bSJan Kara void *entry, **slot; 247b1aa812bSJan Kara 248b1aa812bSJan Kara spin_lock_irq(&mapping->tree_lock); 249b1aa812bSJan Kara entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); 250b1aa812bSJan Kara if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || 251b1aa812bSJan Kara !slot_locked(mapping, slot))) { 252b1aa812bSJan Kara spin_unlock_irq(&mapping->tree_lock); 253b1aa812bSJan Kara return; 254b1aa812bSJan Kara } 255b1aa812bSJan Kara unlock_slot(mapping, slot); 256b1aa812bSJan Kara spin_unlock_irq(&mapping->tree_lock); 257b1aa812bSJan Kara dax_wake_mapping_entry_waiter(mapping, index, entry, false); 258b1aa812bSJan Kara } 259b1aa812bSJan Kara 260ac401cc7SJan Kara static void put_locked_mapping_entry(struct address_space *mapping, 261ac401cc7SJan Kara pgoff_t index, void *entry) 262ac401cc7SJan Kara { 263ac401cc7SJan Kara if (!radix_tree_exceptional_entry(entry)) { 264ac401cc7SJan Kara unlock_page(entry); 265ac401cc7SJan Kara put_page(entry); 266ac401cc7SJan Kara } else { 267bc2466e4SJan Kara dax_unlock_mapping_entry(mapping, index); 268ac401cc7SJan Kara } 269ac401cc7SJan Kara } 270ac401cc7SJan Kara 271ac401cc7SJan Kara /* 272ac401cc7SJan Kara * Called when we are done with radix tree entry we looked up via 273ac401cc7SJan Kara * get_unlocked_mapping_entry() and which we didn't lock in the end. 274ac401cc7SJan Kara */ 275ac401cc7SJan Kara static void put_unlocked_mapping_entry(struct address_space *mapping, 276ac401cc7SJan Kara pgoff_t index, void *entry) 277ac401cc7SJan Kara { 278ac401cc7SJan Kara if (!radix_tree_exceptional_entry(entry)) 279ac401cc7SJan Kara return; 280ac401cc7SJan Kara 281ac401cc7SJan Kara /* We have to wake up next waiter for the radix tree entry lock */ 282422476c4SRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, false); 283422476c4SRoss Zwisler } 284422476c4SRoss Zwisler 285ac401cc7SJan Kara /* 286ac401cc7SJan Kara * Find radix tree entry at given index. If it points to a page, return with 287ac401cc7SJan Kara * the page locked. If it points to the exceptional entry, return with the 288ac401cc7SJan Kara * radix tree entry locked. If the radix tree doesn't contain given index, 289ac401cc7SJan Kara * create empty exceptional entry for the index and return with it locked. 290ac401cc7SJan Kara * 291642261acSRoss Zwisler * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will 292642261acSRoss Zwisler * either return that locked entry or will return an error. This error will 293642261acSRoss Zwisler * happen if there are any 4k entries (either zero pages or DAX entries) 294642261acSRoss Zwisler * within the 2MiB range that we are requesting. 295642261acSRoss Zwisler * 296642261acSRoss Zwisler * We always favor 4k entries over 2MiB entries. There isn't a flow where we 297642261acSRoss Zwisler * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB 298642261acSRoss Zwisler * insertion will fail if it finds any 4k entries already in the tree, and a 299642261acSRoss Zwisler * 4k insertion will cause an existing 2MiB entry to be unmapped and 300642261acSRoss Zwisler * downgraded to 4k entries. This happens for both 2MiB huge zero pages as 301642261acSRoss Zwisler * well as 2MiB empty entries. 302642261acSRoss Zwisler * 303642261acSRoss Zwisler * The exception to this downgrade path is for 2MiB DAX PMD entries that have 304642261acSRoss Zwisler * real storage backing them. We will leave these real 2MiB DAX entries in 305642261acSRoss Zwisler * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. 306642261acSRoss Zwisler * 307ac401cc7SJan Kara * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 308ac401cc7SJan Kara * persistent memory the benefit is doubtful. We can add that later if we can 309ac401cc7SJan Kara * show it helps. 310ac401cc7SJan Kara */ 311642261acSRoss Zwisler static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, 312642261acSRoss Zwisler unsigned long size_flag) 313ac401cc7SJan Kara { 314642261acSRoss Zwisler bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ 315e3ad61c6SRoss Zwisler void *entry, **slot; 316ac401cc7SJan Kara 317ac401cc7SJan Kara restart: 318ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 319e3ad61c6SRoss Zwisler entry = get_unlocked_mapping_entry(mapping, index, &slot); 320642261acSRoss Zwisler 321642261acSRoss Zwisler if (entry) { 322642261acSRoss Zwisler if (size_flag & RADIX_DAX_PMD) { 323642261acSRoss Zwisler if (!radix_tree_exceptional_entry(entry) || 324642261acSRoss Zwisler dax_is_pte_entry(entry)) { 325642261acSRoss Zwisler put_unlocked_mapping_entry(mapping, index, 326642261acSRoss Zwisler entry); 327642261acSRoss Zwisler entry = ERR_PTR(-EEXIST); 328642261acSRoss Zwisler goto out_unlock; 329642261acSRoss Zwisler } 330642261acSRoss Zwisler } else { /* trying to grab a PTE entry */ 331642261acSRoss Zwisler if (radix_tree_exceptional_entry(entry) && 332642261acSRoss Zwisler dax_is_pmd_entry(entry) && 333642261acSRoss Zwisler (dax_is_zero_entry(entry) || 334642261acSRoss Zwisler dax_is_empty_entry(entry))) { 335642261acSRoss Zwisler pmd_downgrade = true; 336642261acSRoss Zwisler } 337642261acSRoss Zwisler } 338642261acSRoss Zwisler } 339642261acSRoss Zwisler 340ac401cc7SJan Kara /* No entry for given index? Make sure radix tree is big enough. */ 341642261acSRoss Zwisler if (!entry || pmd_downgrade) { 342ac401cc7SJan Kara int err; 343ac401cc7SJan Kara 344642261acSRoss Zwisler if (pmd_downgrade) { 345642261acSRoss Zwisler /* 346642261acSRoss Zwisler * Make sure 'entry' remains valid while we drop 347642261acSRoss Zwisler * mapping->tree_lock. 348642261acSRoss Zwisler */ 349642261acSRoss Zwisler entry = lock_slot(mapping, slot); 350642261acSRoss Zwisler } 351642261acSRoss Zwisler 352ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 353642261acSRoss Zwisler /* 354642261acSRoss Zwisler * Besides huge zero pages the only other thing that gets 355642261acSRoss Zwisler * downgraded are empty entries which don't need to be 356642261acSRoss Zwisler * unmapped. 357642261acSRoss Zwisler */ 358642261acSRoss Zwisler if (pmd_downgrade && dax_is_zero_entry(entry)) 359642261acSRoss Zwisler unmap_mapping_range(mapping, 360642261acSRoss Zwisler (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); 361642261acSRoss Zwisler 3620cb80b48SJan Kara err = radix_tree_preload( 3630cb80b48SJan Kara mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 3640cb80b48SJan Kara if (err) { 3650cb80b48SJan Kara if (pmd_downgrade) 3660cb80b48SJan Kara put_locked_mapping_entry(mapping, index, entry); 3670cb80b48SJan Kara return ERR_PTR(err); 3680cb80b48SJan Kara } 369ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 370642261acSRoss Zwisler 371642261acSRoss Zwisler if (pmd_downgrade) { 372642261acSRoss Zwisler radix_tree_delete(&mapping->page_tree, index); 373642261acSRoss Zwisler mapping->nrexceptional--; 374642261acSRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, 375642261acSRoss Zwisler true); 376642261acSRoss Zwisler } 377642261acSRoss Zwisler 378642261acSRoss Zwisler entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); 379642261acSRoss Zwisler 380642261acSRoss Zwisler err = __radix_tree_insert(&mapping->page_tree, index, 381642261acSRoss Zwisler dax_radix_order(entry), entry); 382ac401cc7SJan Kara radix_tree_preload_end(); 383ac401cc7SJan Kara if (err) { 384ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 385642261acSRoss Zwisler /* 386642261acSRoss Zwisler * Someone already created the entry? This is a 387642261acSRoss Zwisler * normal failure when inserting PMDs in a range 388642261acSRoss Zwisler * that already contains PTEs. In that case we want 389642261acSRoss Zwisler * to return -EEXIST immediately. 390642261acSRoss Zwisler */ 391642261acSRoss Zwisler if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD)) 392ac401cc7SJan Kara goto restart; 393642261acSRoss Zwisler /* 394642261acSRoss Zwisler * Our insertion of a DAX PMD entry failed, most 395642261acSRoss Zwisler * likely because it collided with a PTE sized entry 396642261acSRoss Zwisler * at a different index in the PMD range. We haven't 397642261acSRoss Zwisler * inserted anything into the radix tree and have no 398642261acSRoss Zwisler * waiters to wake. 399642261acSRoss Zwisler */ 400ac401cc7SJan Kara return ERR_PTR(err); 401ac401cc7SJan Kara } 402ac401cc7SJan Kara /* Good, we have inserted empty locked entry into the tree. */ 403ac401cc7SJan Kara mapping->nrexceptional++; 404ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 405e3ad61c6SRoss Zwisler return entry; 406ac401cc7SJan Kara } 407ac401cc7SJan Kara /* Normal page in radix tree? */ 408e3ad61c6SRoss Zwisler if (!radix_tree_exceptional_entry(entry)) { 409e3ad61c6SRoss Zwisler struct page *page = entry; 410ac401cc7SJan Kara 411ac401cc7SJan Kara get_page(page); 412ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 413ac401cc7SJan Kara lock_page(page); 414ac401cc7SJan Kara /* Page got truncated? Retry... */ 415ac401cc7SJan Kara if (unlikely(page->mapping != mapping)) { 416ac401cc7SJan Kara unlock_page(page); 417ac401cc7SJan Kara put_page(page); 418ac401cc7SJan Kara goto restart; 419ac401cc7SJan Kara } 420ac401cc7SJan Kara return page; 421ac401cc7SJan Kara } 422e3ad61c6SRoss Zwisler entry = lock_slot(mapping, slot); 423642261acSRoss Zwisler out_unlock: 424ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 425e3ad61c6SRoss Zwisler return entry; 426ac401cc7SJan Kara } 427ac401cc7SJan Kara 42863e95b5cSRoss Zwisler /* 42963e95b5cSRoss Zwisler * We do not necessarily hold the mapping->tree_lock when we call this 43063e95b5cSRoss Zwisler * function so it is possible that 'entry' is no longer a valid item in the 431642261acSRoss Zwisler * radix tree. This is okay because all we really need to do is to find the 432642261acSRoss Zwisler * correct waitqueue where tasks might be waiting for that old 'entry' and 433642261acSRoss Zwisler * wake them. 43463e95b5cSRoss Zwisler */ 435ac401cc7SJan Kara void dax_wake_mapping_entry_waiter(struct address_space *mapping, 43663e95b5cSRoss Zwisler pgoff_t index, void *entry, bool wake_all) 437ac401cc7SJan Kara { 43863e95b5cSRoss Zwisler struct exceptional_entry_key key; 43963e95b5cSRoss Zwisler wait_queue_head_t *wq; 44063e95b5cSRoss Zwisler 44163e95b5cSRoss Zwisler wq = dax_entry_waitqueue(mapping, index, entry, &key); 442ac401cc7SJan Kara 443ac401cc7SJan Kara /* 444ac401cc7SJan Kara * Checking for locked entry and prepare_to_wait_exclusive() happens 445ac401cc7SJan Kara * under mapping->tree_lock, ditto for entry handling in our callers. 446ac401cc7SJan Kara * So at this point all tasks that could have seen our entry locked 447ac401cc7SJan Kara * must be in the waitqueue and the following check will see them. 448ac401cc7SJan Kara */ 44963e95b5cSRoss Zwisler if (waitqueue_active(wq)) 450ac401cc7SJan Kara __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 451ac401cc7SJan Kara } 452ac401cc7SJan Kara 453ac401cc7SJan Kara /* 454ac401cc7SJan Kara * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree 455ac401cc7SJan Kara * entry to get unlocked before deleting it. 456ac401cc7SJan Kara */ 457ac401cc7SJan Kara int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 458ac401cc7SJan Kara { 459ac401cc7SJan Kara void *entry; 460ac401cc7SJan Kara 461ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 462ac401cc7SJan Kara entry = get_unlocked_mapping_entry(mapping, index, NULL); 463ac401cc7SJan Kara /* 464ac401cc7SJan Kara * This gets called from truncate / punch_hole path. As such, the caller 465ac401cc7SJan Kara * must hold locks protecting against concurrent modifications of the 466ac401cc7SJan Kara * radix tree (usually fs-private i_mmap_sem for writing). Since the 467ac401cc7SJan Kara * caller has seen exceptional entry for this index, we better find it 468ac401cc7SJan Kara * at that index as well... 469ac401cc7SJan Kara */ 470ac401cc7SJan Kara if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) { 471ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 472ac401cc7SJan Kara return 0; 473ac401cc7SJan Kara } 474ac401cc7SJan Kara radix_tree_delete(&mapping->page_tree, index); 475ac401cc7SJan Kara mapping->nrexceptional--; 476ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 47763e95b5cSRoss Zwisler dax_wake_mapping_entry_waiter(mapping, index, entry, true); 478ac401cc7SJan Kara 479ac401cc7SJan Kara return 1; 480ac401cc7SJan Kara } 481ac401cc7SJan Kara 482ac401cc7SJan Kara /* 483f7ca90b1SMatthew Wilcox * The user has performed a load from a hole in the file. Allocating 484f7ca90b1SMatthew Wilcox * a new page in the file would cause excessive storage usage for 485f7ca90b1SMatthew Wilcox * workloads with sparse files. We allocate a page cache page instead. 486f7ca90b1SMatthew Wilcox * We'll kick it out of the page cache if it's ever written to, 487f7ca90b1SMatthew Wilcox * otherwise it will simply fall out of the page cache under memory 488f7ca90b1SMatthew Wilcox * pressure without ever having been dirtied. 489f7ca90b1SMatthew Wilcox */ 490ac401cc7SJan Kara static int dax_load_hole(struct address_space *mapping, void *entry, 491f7ca90b1SMatthew Wilcox struct vm_fault *vmf) 492f7ca90b1SMatthew Wilcox { 493ac401cc7SJan Kara struct page *page; 494f7ca90b1SMatthew Wilcox 495ac401cc7SJan Kara /* Hole page already exists? Return it... */ 496ac401cc7SJan Kara if (!radix_tree_exceptional_entry(entry)) { 497ac401cc7SJan Kara vmf->page = entry; 498ac401cc7SJan Kara return VM_FAULT_LOCKED; 499ac401cc7SJan Kara } 500ac401cc7SJan Kara 501ac401cc7SJan Kara /* This will replace locked radix tree entry with a hole page */ 502ac401cc7SJan Kara page = find_or_create_page(mapping, vmf->pgoff, 503ac401cc7SJan Kara vmf->gfp_mask | __GFP_ZERO); 504b1aa812bSJan Kara if (!page) 505ac401cc7SJan Kara return VM_FAULT_OOM; 506f7ca90b1SMatthew Wilcox vmf->page = page; 507f7ca90b1SMatthew Wilcox return VM_FAULT_LOCKED; 508f7ca90b1SMatthew Wilcox } 509f7ca90b1SMatthew Wilcox 510b0d5e82fSChristoph Hellwig static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size, 511b0d5e82fSChristoph Hellwig struct page *to, unsigned long vaddr) 512f7ca90b1SMatthew Wilcox { 513b2e0d162SDan Williams struct blk_dax_ctl dax = { 514b0d5e82fSChristoph Hellwig .sector = sector, 515b0d5e82fSChristoph Hellwig .size = size, 516b2e0d162SDan Williams }; 517e2e05394SRoss Zwisler void *vto; 518e2e05394SRoss Zwisler 519b2e0d162SDan Williams if (dax_map_atomic(bdev, &dax) < 0) 520b2e0d162SDan Williams return PTR_ERR(dax.addr); 521f7ca90b1SMatthew Wilcox vto = kmap_atomic(to); 522b2e0d162SDan Williams copy_user_page(vto, (void __force *)dax.addr, vaddr, to); 523f7ca90b1SMatthew Wilcox kunmap_atomic(vto); 524b2e0d162SDan Williams dax_unmap_atomic(bdev, &dax); 525f7ca90b1SMatthew Wilcox return 0; 526f7ca90b1SMatthew Wilcox } 527f7ca90b1SMatthew Wilcox 528642261acSRoss Zwisler /* 529642261acSRoss Zwisler * By this point grab_mapping_entry() has ensured that we have a locked entry 530642261acSRoss Zwisler * of the appropriate size so we don't have to worry about downgrading PMDs to 531642261acSRoss Zwisler * PTEs. If we happen to be trying to insert a PTE and there is a PMD 532642261acSRoss Zwisler * already in the tree, we will skip the insertion and just dirty the PMD as 533642261acSRoss Zwisler * appropriate. 534642261acSRoss Zwisler */ 535ac401cc7SJan Kara static void *dax_insert_mapping_entry(struct address_space *mapping, 536ac401cc7SJan Kara struct vm_fault *vmf, 537642261acSRoss Zwisler void *entry, sector_t sector, 538642261acSRoss Zwisler unsigned long flags) 5399973c98eSRoss Zwisler { 5409973c98eSRoss Zwisler struct radix_tree_root *page_tree = &mapping->page_tree; 541ac401cc7SJan Kara int error = 0; 542ac401cc7SJan Kara bool hole_fill = false; 543ac401cc7SJan Kara void *new_entry; 544ac401cc7SJan Kara pgoff_t index = vmf->pgoff; 5459973c98eSRoss Zwisler 546ac401cc7SJan Kara if (vmf->flags & FAULT_FLAG_WRITE) 5479973c98eSRoss Zwisler __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 5489973c98eSRoss Zwisler 549ac401cc7SJan Kara /* Replacing hole page with block mapping? */ 550ac401cc7SJan Kara if (!radix_tree_exceptional_entry(entry)) { 551ac401cc7SJan Kara hole_fill = true; 5529973c98eSRoss Zwisler /* 553ac401cc7SJan Kara * Unmap the page now before we remove it from page cache below. 554ac401cc7SJan Kara * The page is locked so it cannot be faulted in again. 5559973c98eSRoss Zwisler */ 556ac401cc7SJan Kara unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, 557ac401cc7SJan Kara PAGE_SIZE, 0); 558ac401cc7SJan Kara error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM); 5599973c98eSRoss Zwisler if (error) 560ac401cc7SJan Kara return ERR_PTR(error); 561642261acSRoss Zwisler } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) { 562642261acSRoss Zwisler /* replacing huge zero page with PMD block mapping */ 563642261acSRoss Zwisler unmap_mapping_range(mapping, 564642261acSRoss Zwisler (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); 565ac401cc7SJan Kara } 5669973c98eSRoss Zwisler 567ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 568642261acSRoss Zwisler new_entry = dax_radix_locked_entry(sector, flags); 569642261acSRoss Zwisler 570ac401cc7SJan Kara if (hole_fill) { 571ac401cc7SJan Kara __delete_from_page_cache(entry, NULL); 572ac401cc7SJan Kara /* Drop pagecache reference */ 573ac401cc7SJan Kara put_page(entry); 574642261acSRoss Zwisler error = __radix_tree_insert(page_tree, index, 575642261acSRoss Zwisler dax_radix_order(new_entry), new_entry); 576ac401cc7SJan Kara if (error) { 577ac401cc7SJan Kara new_entry = ERR_PTR(error); 578ac401cc7SJan Kara goto unlock; 579ac401cc7SJan Kara } 5809973c98eSRoss Zwisler mapping->nrexceptional++; 581642261acSRoss Zwisler } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 582642261acSRoss Zwisler /* 583642261acSRoss Zwisler * Only swap our new entry into the radix tree if the current 584642261acSRoss Zwisler * entry is a zero page or an empty entry. If a normal PTE or 585642261acSRoss Zwisler * PMD entry is already in the tree, we leave it alone. This 586642261acSRoss Zwisler * means that if we are trying to insert a PTE and the 587642261acSRoss Zwisler * existing entry is a PMD, we will just leave the PMD in the 588642261acSRoss Zwisler * tree and dirty it if necessary. 589642261acSRoss Zwisler */ 590f7942430SJohannes Weiner struct radix_tree_node *node; 591ac401cc7SJan Kara void **slot; 592ac401cc7SJan Kara void *ret; 593ac401cc7SJan Kara 594f7942430SJohannes Weiner ret = __radix_tree_lookup(page_tree, index, &node, &slot); 595ac401cc7SJan Kara WARN_ON_ONCE(ret != entry); 5964d693d08SJohannes Weiner __radix_tree_replace(page_tree, node, slot, 5974d693d08SJohannes Weiner new_entry, NULL, NULL); 598ac401cc7SJan Kara } 599ac401cc7SJan Kara if (vmf->flags & FAULT_FLAG_WRITE) 6009973c98eSRoss Zwisler radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY); 6019973c98eSRoss Zwisler unlock: 6029973c98eSRoss Zwisler spin_unlock_irq(&mapping->tree_lock); 603ac401cc7SJan Kara if (hole_fill) { 604ac401cc7SJan Kara radix_tree_preload_end(); 605ac401cc7SJan Kara /* 606ac401cc7SJan Kara * We don't need hole page anymore, it has been replaced with 607ac401cc7SJan Kara * locked radix tree entry now. 608ac401cc7SJan Kara */ 609ac401cc7SJan Kara if (mapping->a_ops->freepage) 610ac401cc7SJan Kara mapping->a_ops->freepage(entry); 611ac401cc7SJan Kara unlock_page(entry); 612ac401cc7SJan Kara put_page(entry); 613ac401cc7SJan Kara } 614ac401cc7SJan Kara return new_entry; 6159973c98eSRoss Zwisler } 6169973c98eSRoss Zwisler 6179973c98eSRoss Zwisler static int dax_writeback_one(struct block_device *bdev, 6189973c98eSRoss Zwisler struct address_space *mapping, pgoff_t index, void *entry) 6199973c98eSRoss Zwisler { 6209973c98eSRoss Zwisler struct radix_tree_root *page_tree = &mapping->page_tree; 6219973c98eSRoss Zwisler struct blk_dax_ctl dax; 622a6abc2c0SJan Kara void *entry2, **slot; 6239973c98eSRoss Zwisler int ret = 0; 6249973c98eSRoss Zwisler 6259973c98eSRoss Zwisler /* 626a6abc2c0SJan Kara * A page got tagged dirty in DAX mapping? Something is seriously 627a6abc2c0SJan Kara * wrong. 6289973c98eSRoss Zwisler */ 629a6abc2c0SJan Kara if (WARN_ON(!radix_tree_exceptional_entry(entry))) 630a6abc2c0SJan Kara return -EIO; 6319973c98eSRoss Zwisler 632a6abc2c0SJan Kara spin_lock_irq(&mapping->tree_lock); 633a6abc2c0SJan Kara entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 634a6abc2c0SJan Kara /* Entry got punched out / reallocated? */ 635a6abc2c0SJan Kara if (!entry2 || !radix_tree_exceptional_entry(entry2)) 636a6abc2c0SJan Kara goto put_unlocked; 637a6abc2c0SJan Kara /* 638a6abc2c0SJan Kara * Entry got reallocated elsewhere? No need to writeback. We have to 639a6abc2c0SJan Kara * compare sectors as we must not bail out due to difference in lockbit 640a6abc2c0SJan Kara * or entry type. 641a6abc2c0SJan Kara */ 642a6abc2c0SJan Kara if (dax_radix_sector(entry2) != dax_radix_sector(entry)) 643a6abc2c0SJan Kara goto put_unlocked; 644642261acSRoss Zwisler if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 645642261acSRoss Zwisler dax_is_zero_entry(entry))) { 6469973c98eSRoss Zwisler ret = -EIO; 647a6abc2c0SJan Kara goto put_unlocked; 6489973c98eSRoss Zwisler } 6499973c98eSRoss Zwisler 650a6abc2c0SJan Kara /* Another fsync thread may have already written back this entry */ 651a6abc2c0SJan Kara if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) 652a6abc2c0SJan Kara goto put_unlocked; 653a6abc2c0SJan Kara /* Lock the entry to serialize with page faults */ 654a6abc2c0SJan Kara entry = lock_slot(mapping, slot); 655a6abc2c0SJan Kara /* 656a6abc2c0SJan Kara * We can clear the tag now but we have to be careful so that concurrent 657a6abc2c0SJan Kara * dax_writeback_one() calls for the same index cannot finish before we 658a6abc2c0SJan Kara * actually flush the caches. This is achieved as the calls will look 659a6abc2c0SJan Kara * at the entry only under tree_lock and once they do that they will 660a6abc2c0SJan Kara * see the entry locked and wait for it to unlock. 661a6abc2c0SJan Kara */ 662a6abc2c0SJan Kara radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE); 663a6abc2c0SJan Kara spin_unlock_irq(&mapping->tree_lock); 664a6abc2c0SJan Kara 665642261acSRoss Zwisler /* 666642261acSRoss Zwisler * Even if dax_writeback_mapping_range() was given a wbc->range_start 667642261acSRoss Zwisler * in the middle of a PMD, the 'index' we are given will be aligned to 668642261acSRoss Zwisler * the start index of the PMD, as will the sector we pull from 669642261acSRoss Zwisler * 'entry'. This allows us to flush for PMD_SIZE and not have to 670642261acSRoss Zwisler * worry about partial PMD writebacks. 671642261acSRoss Zwisler */ 672642261acSRoss Zwisler dax.sector = dax_radix_sector(entry); 673642261acSRoss Zwisler dax.size = PAGE_SIZE << dax_radix_order(entry); 6749973c98eSRoss Zwisler 6759973c98eSRoss Zwisler /* 6769973c98eSRoss Zwisler * We cannot hold tree_lock while calling dax_map_atomic() because it 6779973c98eSRoss Zwisler * eventually calls cond_resched(). 6789973c98eSRoss Zwisler */ 6799973c98eSRoss Zwisler ret = dax_map_atomic(bdev, &dax); 680a6abc2c0SJan Kara if (ret < 0) { 681a6abc2c0SJan Kara put_locked_mapping_entry(mapping, index, entry); 6829973c98eSRoss Zwisler return ret; 683a6abc2c0SJan Kara } 6849973c98eSRoss Zwisler 6859973c98eSRoss Zwisler if (WARN_ON_ONCE(ret < dax.size)) { 6869973c98eSRoss Zwisler ret = -EIO; 6879973c98eSRoss Zwisler goto unmap; 6889973c98eSRoss Zwisler } 6899973c98eSRoss Zwisler 6909973c98eSRoss Zwisler wb_cache_pmem(dax.addr, dax.size); 6919973c98eSRoss Zwisler unmap: 6929973c98eSRoss Zwisler dax_unmap_atomic(bdev, &dax); 693a6abc2c0SJan Kara put_locked_mapping_entry(mapping, index, entry); 6949973c98eSRoss Zwisler return ret; 6959973c98eSRoss Zwisler 696a6abc2c0SJan Kara put_unlocked: 697a6abc2c0SJan Kara put_unlocked_mapping_entry(mapping, index, entry2); 6989973c98eSRoss Zwisler spin_unlock_irq(&mapping->tree_lock); 6999973c98eSRoss Zwisler return ret; 7009973c98eSRoss Zwisler } 7019973c98eSRoss Zwisler 7029973c98eSRoss Zwisler /* 7039973c98eSRoss Zwisler * Flush the mapping to the persistent domain within the byte range of [start, 7049973c98eSRoss Zwisler * end]. This is required by data integrity operations to ensure file data is 7059973c98eSRoss Zwisler * on persistent storage prior to completion of the operation. 7069973c98eSRoss Zwisler */ 7077f6d5b52SRoss Zwisler int dax_writeback_mapping_range(struct address_space *mapping, 7087f6d5b52SRoss Zwisler struct block_device *bdev, struct writeback_control *wbc) 7099973c98eSRoss Zwisler { 7109973c98eSRoss Zwisler struct inode *inode = mapping->host; 711642261acSRoss Zwisler pgoff_t start_index, end_index; 7129973c98eSRoss Zwisler pgoff_t indices[PAGEVEC_SIZE]; 7139973c98eSRoss Zwisler struct pagevec pvec; 7149973c98eSRoss Zwisler bool done = false; 7159973c98eSRoss Zwisler int i, ret = 0; 7169973c98eSRoss Zwisler 7179973c98eSRoss Zwisler if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 7189973c98eSRoss Zwisler return -EIO; 7199973c98eSRoss Zwisler 7207f6d5b52SRoss Zwisler if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 7217f6d5b52SRoss Zwisler return 0; 7227f6d5b52SRoss Zwisler 72309cbfeafSKirill A. Shutemov start_index = wbc->range_start >> PAGE_SHIFT; 72409cbfeafSKirill A. Shutemov end_index = wbc->range_end >> PAGE_SHIFT; 7259973c98eSRoss Zwisler 7269973c98eSRoss Zwisler tag_pages_for_writeback(mapping, start_index, end_index); 7279973c98eSRoss Zwisler 7289973c98eSRoss Zwisler pagevec_init(&pvec, 0); 7299973c98eSRoss Zwisler while (!done) { 7309973c98eSRoss Zwisler pvec.nr = find_get_entries_tag(mapping, start_index, 7319973c98eSRoss Zwisler PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, 7329973c98eSRoss Zwisler pvec.pages, indices); 7339973c98eSRoss Zwisler 7349973c98eSRoss Zwisler if (pvec.nr == 0) 7359973c98eSRoss Zwisler break; 7369973c98eSRoss Zwisler 7379973c98eSRoss Zwisler for (i = 0; i < pvec.nr; i++) { 7389973c98eSRoss Zwisler if (indices[i] > end_index) { 7399973c98eSRoss Zwisler done = true; 7409973c98eSRoss Zwisler break; 7419973c98eSRoss Zwisler } 7429973c98eSRoss Zwisler 7439973c98eSRoss Zwisler ret = dax_writeback_one(bdev, mapping, indices[i], 7449973c98eSRoss Zwisler pvec.pages[i]); 7459973c98eSRoss Zwisler if (ret < 0) 7469973c98eSRoss Zwisler return ret; 7479973c98eSRoss Zwisler } 7489973c98eSRoss Zwisler } 7499973c98eSRoss Zwisler return 0; 7509973c98eSRoss Zwisler } 7519973c98eSRoss Zwisler EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 7529973c98eSRoss Zwisler 753ac401cc7SJan Kara static int dax_insert_mapping(struct address_space *mapping, 7541aaba095SChristoph Hellwig struct block_device *bdev, sector_t sector, size_t size, 7551aaba095SChristoph Hellwig void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf) 756f7ca90b1SMatthew Wilcox { 7571a29d85eSJan Kara unsigned long vaddr = vmf->address; 758b2e0d162SDan Williams struct blk_dax_ctl dax = { 7591aaba095SChristoph Hellwig .sector = sector, 7601aaba095SChristoph Hellwig .size = size, 761b2e0d162SDan Williams }; 762ac401cc7SJan Kara void *ret; 763ac401cc7SJan Kara void *entry = *entryp; 764f7ca90b1SMatthew Wilcox 7654d9a2c87SJan Kara if (dax_map_atomic(bdev, &dax) < 0) 7664d9a2c87SJan Kara return PTR_ERR(dax.addr); 767b2e0d162SDan Williams dax_unmap_atomic(bdev, &dax); 768f7ca90b1SMatthew Wilcox 769642261acSRoss Zwisler ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0); 7704d9a2c87SJan Kara if (IS_ERR(ret)) 7714d9a2c87SJan Kara return PTR_ERR(ret); 772ac401cc7SJan Kara *entryp = ret; 7739973c98eSRoss Zwisler 7744d9a2c87SJan Kara return vm_insert_mixed(vma, vaddr, dax.pfn); 775f7ca90b1SMatthew Wilcox } 776f7ca90b1SMatthew Wilcox 777ce5c5d55SDave Chinner /** 7780e3b210cSBoaz Harrosh * dax_pfn_mkwrite - handle first write to DAX page 7790e3b210cSBoaz Harrosh * @vma: The virtual memory area where the fault occurred 7800e3b210cSBoaz Harrosh * @vmf: The description of the fault 7810e3b210cSBoaz Harrosh */ 7820e3b210cSBoaz Harrosh int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 7830e3b210cSBoaz Harrosh { 7849973c98eSRoss Zwisler struct file *file = vma->vm_file; 785ac401cc7SJan Kara struct address_space *mapping = file->f_mapping; 786ac401cc7SJan Kara void *entry; 787ac401cc7SJan Kara pgoff_t index = vmf->pgoff; 7880e3b210cSBoaz Harrosh 789ac401cc7SJan Kara spin_lock_irq(&mapping->tree_lock); 790ac401cc7SJan Kara entry = get_unlocked_mapping_entry(mapping, index, NULL); 791ac401cc7SJan Kara if (!entry || !radix_tree_exceptional_entry(entry)) 792ac401cc7SJan Kara goto out; 793ac401cc7SJan Kara radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY); 794ac401cc7SJan Kara put_unlocked_mapping_entry(mapping, index, entry); 795ac401cc7SJan Kara out: 796ac401cc7SJan Kara spin_unlock_irq(&mapping->tree_lock); 7970e3b210cSBoaz Harrosh return VM_FAULT_NOPAGE; 7980e3b210cSBoaz Harrosh } 7990e3b210cSBoaz Harrosh EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); 8000e3b210cSBoaz Harrosh 8014b0228faSVishal Verma static bool dax_range_is_aligned(struct block_device *bdev, 8024b0228faSVishal Verma unsigned int offset, unsigned int length) 8034b0228faSVishal Verma { 8044b0228faSVishal Verma unsigned short sector_size = bdev_logical_block_size(bdev); 8054b0228faSVishal Verma 8064b0228faSVishal Verma if (!IS_ALIGNED(offset, sector_size)) 8074b0228faSVishal Verma return false; 8084b0228faSVishal Verma if (!IS_ALIGNED(length, sector_size)) 8094b0228faSVishal Verma return false; 8104b0228faSVishal Verma 8114b0228faSVishal Verma return true; 8124b0228faSVishal Verma } 8134b0228faSVishal Verma 814679c8bd3SChristoph Hellwig int __dax_zero_page_range(struct block_device *bdev, sector_t sector, 815679c8bd3SChristoph Hellwig unsigned int offset, unsigned int length) 816679c8bd3SChristoph Hellwig { 817679c8bd3SChristoph Hellwig struct blk_dax_ctl dax = { 818679c8bd3SChristoph Hellwig .sector = sector, 819679c8bd3SChristoph Hellwig .size = PAGE_SIZE, 820679c8bd3SChristoph Hellwig }; 821679c8bd3SChristoph Hellwig 8224b0228faSVishal Verma if (dax_range_is_aligned(bdev, offset, length)) { 8234b0228faSVishal Verma sector_t start_sector = dax.sector + (offset >> 9); 8244b0228faSVishal Verma 8254b0228faSVishal Verma return blkdev_issue_zeroout(bdev, start_sector, 8264b0228faSVishal Verma length >> 9, GFP_NOFS, true); 8274b0228faSVishal Verma } else { 828679c8bd3SChristoph Hellwig if (dax_map_atomic(bdev, &dax) < 0) 829679c8bd3SChristoph Hellwig return PTR_ERR(dax.addr); 830679c8bd3SChristoph Hellwig clear_pmem(dax.addr + offset, length); 831679c8bd3SChristoph Hellwig dax_unmap_atomic(bdev, &dax); 8324b0228faSVishal Verma } 833679c8bd3SChristoph Hellwig return 0; 834679c8bd3SChristoph Hellwig } 835679c8bd3SChristoph Hellwig EXPORT_SYMBOL_GPL(__dax_zero_page_range); 836679c8bd3SChristoph Hellwig 837a254e568SChristoph Hellwig #ifdef CONFIG_FS_IOMAP 838333ccc97SRoss Zwisler static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 839333ccc97SRoss Zwisler { 840333ccc97SRoss Zwisler return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); 841333ccc97SRoss Zwisler } 842333ccc97SRoss Zwisler 843a254e568SChristoph Hellwig static loff_t 84411c59c92SRoss Zwisler dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 845a254e568SChristoph Hellwig struct iomap *iomap) 846a254e568SChristoph Hellwig { 847a254e568SChristoph Hellwig struct iov_iter *iter = data; 848a254e568SChristoph Hellwig loff_t end = pos + length, done = 0; 849a254e568SChristoph Hellwig ssize_t ret = 0; 850a254e568SChristoph Hellwig 851a254e568SChristoph Hellwig if (iov_iter_rw(iter) == READ) { 852a254e568SChristoph Hellwig end = min(end, i_size_read(inode)); 853a254e568SChristoph Hellwig if (pos >= end) 854a254e568SChristoph Hellwig return 0; 855a254e568SChristoph Hellwig 856a254e568SChristoph Hellwig if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 857a254e568SChristoph Hellwig return iov_iter_zero(min(length, end - pos), iter); 858a254e568SChristoph Hellwig } 859a254e568SChristoph Hellwig 860a254e568SChristoph Hellwig if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 861a254e568SChristoph Hellwig return -EIO; 862a254e568SChristoph Hellwig 863a254e568SChristoph Hellwig while (pos < end) { 864a254e568SChristoph Hellwig unsigned offset = pos & (PAGE_SIZE - 1); 865a254e568SChristoph Hellwig struct blk_dax_ctl dax = { 0 }; 866a254e568SChristoph Hellwig ssize_t map_len; 867a254e568SChristoph Hellwig 868333ccc97SRoss Zwisler dax.sector = dax_iomap_sector(iomap, pos); 869a254e568SChristoph Hellwig dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK; 870a254e568SChristoph Hellwig map_len = dax_map_atomic(iomap->bdev, &dax); 871a254e568SChristoph Hellwig if (map_len < 0) { 872a254e568SChristoph Hellwig ret = map_len; 873a254e568SChristoph Hellwig break; 874a254e568SChristoph Hellwig } 875a254e568SChristoph Hellwig 876a254e568SChristoph Hellwig dax.addr += offset; 877a254e568SChristoph Hellwig map_len -= offset; 878a254e568SChristoph Hellwig if (map_len > end - pos) 879a254e568SChristoph Hellwig map_len = end - pos; 880a254e568SChristoph Hellwig 881a254e568SChristoph Hellwig if (iov_iter_rw(iter) == WRITE) 882a254e568SChristoph Hellwig map_len = copy_from_iter_pmem(dax.addr, map_len, iter); 883a254e568SChristoph Hellwig else 884a254e568SChristoph Hellwig map_len = copy_to_iter(dax.addr, map_len, iter); 885a254e568SChristoph Hellwig dax_unmap_atomic(iomap->bdev, &dax); 886a254e568SChristoph Hellwig if (map_len <= 0) { 887a254e568SChristoph Hellwig ret = map_len ? map_len : -EFAULT; 888a254e568SChristoph Hellwig break; 889a254e568SChristoph Hellwig } 890a254e568SChristoph Hellwig 891a254e568SChristoph Hellwig pos += map_len; 892a254e568SChristoph Hellwig length -= map_len; 893a254e568SChristoph Hellwig done += map_len; 894a254e568SChristoph Hellwig } 895a254e568SChristoph Hellwig 896a254e568SChristoph Hellwig return done ? done : ret; 897a254e568SChristoph Hellwig } 898a254e568SChristoph Hellwig 899a254e568SChristoph Hellwig /** 90011c59c92SRoss Zwisler * dax_iomap_rw - Perform I/O to a DAX file 901a254e568SChristoph Hellwig * @iocb: The control block for this I/O 902a254e568SChristoph Hellwig * @iter: The addresses to do I/O from or to 903a254e568SChristoph Hellwig * @ops: iomap ops passed from the file system 904a254e568SChristoph Hellwig * 905a254e568SChristoph Hellwig * This function performs read and write operations to directly mapped 906a254e568SChristoph Hellwig * persistent memory. The callers needs to take care of read/write exclusion 907a254e568SChristoph Hellwig * and evicting any page cache pages in the region under I/O. 908a254e568SChristoph Hellwig */ 909a254e568SChristoph Hellwig ssize_t 91011c59c92SRoss Zwisler dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 911a254e568SChristoph Hellwig struct iomap_ops *ops) 912a254e568SChristoph Hellwig { 913a254e568SChristoph Hellwig struct address_space *mapping = iocb->ki_filp->f_mapping; 914a254e568SChristoph Hellwig struct inode *inode = mapping->host; 915a254e568SChristoph Hellwig loff_t pos = iocb->ki_pos, ret = 0, done = 0; 916a254e568SChristoph Hellwig unsigned flags = 0; 917a254e568SChristoph Hellwig 918a254e568SChristoph Hellwig if (iov_iter_rw(iter) == WRITE) 919a254e568SChristoph Hellwig flags |= IOMAP_WRITE; 920a254e568SChristoph Hellwig 921a254e568SChristoph Hellwig /* 922a254e568SChristoph Hellwig * Yes, even DAX files can have page cache attached to them: A zeroed 923a254e568SChristoph Hellwig * page is inserted into the pagecache when we have to serve a write 924a254e568SChristoph Hellwig * fault on a hole. It should never be dirtied and can simply be 925a254e568SChristoph Hellwig * dropped from the pagecache once we get real data for the page. 926a254e568SChristoph Hellwig * 927a254e568SChristoph Hellwig * XXX: This is racy against mmap, and there's nothing we can do about 928a254e568SChristoph Hellwig * it. We'll eventually need to shift this down even further so that 929a254e568SChristoph Hellwig * we can check if we allocated blocks over a hole first. 930a254e568SChristoph Hellwig */ 931a254e568SChristoph Hellwig if (mapping->nrpages) { 932a254e568SChristoph Hellwig ret = invalidate_inode_pages2_range(mapping, 933a254e568SChristoph Hellwig pos >> PAGE_SHIFT, 934a254e568SChristoph Hellwig (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT); 935a254e568SChristoph Hellwig WARN_ON_ONCE(ret); 936a254e568SChristoph Hellwig } 937a254e568SChristoph Hellwig 938a254e568SChristoph Hellwig while (iov_iter_count(iter)) { 939a254e568SChristoph Hellwig ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 94011c59c92SRoss Zwisler iter, dax_iomap_actor); 941a254e568SChristoph Hellwig if (ret <= 0) 942a254e568SChristoph Hellwig break; 943a254e568SChristoph Hellwig pos += ret; 944a254e568SChristoph Hellwig done += ret; 945a254e568SChristoph Hellwig } 946a254e568SChristoph Hellwig 947a254e568SChristoph Hellwig iocb->ki_pos += done; 948a254e568SChristoph Hellwig return done ? done : ret; 949a254e568SChristoph Hellwig } 95011c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_rw); 951a7d73fe6SChristoph Hellwig 952a7d73fe6SChristoph Hellwig /** 95311c59c92SRoss Zwisler * dax_iomap_fault - handle a page fault on a DAX file 954a7d73fe6SChristoph Hellwig * @vma: The virtual memory area where the fault occurred 955a7d73fe6SChristoph Hellwig * @vmf: The description of the fault 956a7d73fe6SChristoph Hellwig * @ops: iomap ops passed from the file system 957a7d73fe6SChristoph Hellwig * 958a7d73fe6SChristoph Hellwig * When a page fault occurs, filesystems may call this helper in their fault 959a7d73fe6SChristoph Hellwig * or mkwrite handler for DAX files. Assumes the caller has done all the 960a7d73fe6SChristoph Hellwig * necessary locking for the page fault to proceed successfully. 961a7d73fe6SChristoph Hellwig */ 96211c59c92SRoss Zwisler int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, 963a7d73fe6SChristoph Hellwig struct iomap_ops *ops) 964a7d73fe6SChristoph Hellwig { 965a7d73fe6SChristoph Hellwig struct address_space *mapping = vma->vm_file->f_mapping; 966a7d73fe6SChristoph Hellwig struct inode *inode = mapping->host; 9671a29d85eSJan Kara unsigned long vaddr = vmf->address; 968a7d73fe6SChristoph Hellwig loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 969a7d73fe6SChristoph Hellwig sector_t sector; 970a7d73fe6SChristoph Hellwig struct iomap iomap = { 0 }; 9719484ab1bSJan Kara unsigned flags = IOMAP_FAULT; 972a7d73fe6SChristoph Hellwig int error, major = 0; 973b1aa812bSJan Kara int vmf_ret = 0; 974a7d73fe6SChristoph Hellwig void *entry; 975a7d73fe6SChristoph Hellwig 976a7d73fe6SChristoph Hellwig /* 977a7d73fe6SChristoph Hellwig * Check whether offset isn't beyond end of file now. Caller is supposed 978a7d73fe6SChristoph Hellwig * to hold locks serializing us with truncate / punch hole so this is 979a7d73fe6SChristoph Hellwig * a reliable test. 980a7d73fe6SChristoph Hellwig */ 981a7d73fe6SChristoph Hellwig if (pos >= i_size_read(inode)) 982a7d73fe6SChristoph Hellwig return VM_FAULT_SIGBUS; 983a7d73fe6SChristoph Hellwig 984642261acSRoss Zwisler entry = grab_mapping_entry(mapping, vmf->pgoff, 0); 985a7d73fe6SChristoph Hellwig if (IS_ERR(entry)) { 986a7d73fe6SChristoph Hellwig error = PTR_ERR(entry); 987a7d73fe6SChristoph Hellwig goto out; 988a7d73fe6SChristoph Hellwig } 989a7d73fe6SChristoph Hellwig 990a7d73fe6SChristoph Hellwig if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 991a7d73fe6SChristoph Hellwig flags |= IOMAP_WRITE; 992a7d73fe6SChristoph Hellwig 993a7d73fe6SChristoph Hellwig /* 994a7d73fe6SChristoph Hellwig * Note that we don't bother to use iomap_apply here: DAX required 995a7d73fe6SChristoph Hellwig * the file system block size to be equal the page size, which means 996a7d73fe6SChristoph Hellwig * that we never have to deal with more than a single extent here. 997a7d73fe6SChristoph Hellwig */ 998a7d73fe6SChristoph Hellwig error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 999a7d73fe6SChristoph Hellwig if (error) 1000a7d73fe6SChristoph Hellwig goto unlock_entry; 1001a7d73fe6SChristoph Hellwig if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1002a7d73fe6SChristoph Hellwig error = -EIO; /* fs corruption? */ 10031550290bSRoss Zwisler goto finish_iomap; 1004a7d73fe6SChristoph Hellwig } 1005a7d73fe6SChristoph Hellwig 1006333ccc97SRoss Zwisler sector = dax_iomap_sector(&iomap, pos); 1007a7d73fe6SChristoph Hellwig 1008a7d73fe6SChristoph Hellwig if (vmf->cow_page) { 1009a7d73fe6SChristoph Hellwig switch (iomap.type) { 1010a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 1011a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1012a7d73fe6SChristoph Hellwig clear_user_highpage(vmf->cow_page, vaddr); 1013a7d73fe6SChristoph Hellwig break; 1014a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1015a7d73fe6SChristoph Hellwig error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE, 1016a7d73fe6SChristoph Hellwig vmf->cow_page, vaddr); 1017a7d73fe6SChristoph Hellwig break; 1018a7d73fe6SChristoph Hellwig default: 1019a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1020a7d73fe6SChristoph Hellwig error = -EIO; 1021a7d73fe6SChristoph Hellwig break; 1022a7d73fe6SChristoph Hellwig } 1023a7d73fe6SChristoph Hellwig 1024a7d73fe6SChristoph Hellwig if (error) 10251550290bSRoss Zwisler goto finish_iomap; 1026b1aa812bSJan Kara 1027b1aa812bSJan Kara __SetPageUptodate(vmf->cow_page); 1028b1aa812bSJan Kara vmf_ret = finish_fault(vmf); 1029b1aa812bSJan Kara if (!vmf_ret) 1030b1aa812bSJan Kara vmf_ret = VM_FAULT_DONE_COW; 10311550290bSRoss Zwisler goto finish_iomap; 1032a7d73fe6SChristoph Hellwig } 1033a7d73fe6SChristoph Hellwig 1034a7d73fe6SChristoph Hellwig switch (iomap.type) { 1035a7d73fe6SChristoph Hellwig case IOMAP_MAPPED: 1036a7d73fe6SChristoph Hellwig if (iomap.flags & IOMAP_F_NEW) { 1037a7d73fe6SChristoph Hellwig count_vm_event(PGMAJFAULT); 1038a7d73fe6SChristoph Hellwig mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1039a7d73fe6SChristoph Hellwig major = VM_FAULT_MAJOR; 1040a7d73fe6SChristoph Hellwig } 1041a7d73fe6SChristoph Hellwig error = dax_insert_mapping(mapping, iomap.bdev, sector, 1042a7d73fe6SChristoph Hellwig PAGE_SIZE, &entry, vma, vmf); 1043a7d73fe6SChristoph Hellwig break; 1044a7d73fe6SChristoph Hellwig case IOMAP_UNWRITTEN: 1045a7d73fe6SChristoph Hellwig case IOMAP_HOLE: 10461550290bSRoss Zwisler if (!(vmf->flags & FAULT_FLAG_WRITE)) { 1047b1aa812bSJan Kara vmf_ret = dax_load_hole(mapping, entry, vmf); 10481550290bSRoss Zwisler break; 10491550290bSRoss Zwisler } 1050a7d73fe6SChristoph Hellwig /*FALLTHRU*/ 1051a7d73fe6SChristoph Hellwig default: 1052a7d73fe6SChristoph Hellwig WARN_ON_ONCE(1); 1053a7d73fe6SChristoph Hellwig error = -EIO; 1054a7d73fe6SChristoph Hellwig break; 1055a7d73fe6SChristoph Hellwig } 1056a7d73fe6SChristoph Hellwig 10571550290bSRoss Zwisler finish_iomap: 10581550290bSRoss Zwisler if (ops->iomap_end) { 1059b1aa812bSJan Kara if (error || (vmf_ret & VM_FAULT_ERROR)) { 10601550290bSRoss Zwisler /* keep previous error */ 10611550290bSRoss Zwisler ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags, 10621550290bSRoss Zwisler &iomap); 10631550290bSRoss Zwisler } else { 10641550290bSRoss Zwisler error = ops->iomap_end(inode, pos, PAGE_SIZE, 10651550290bSRoss Zwisler PAGE_SIZE, flags, &iomap); 10661550290bSRoss Zwisler } 10671550290bSRoss Zwisler } 1068a7d73fe6SChristoph Hellwig unlock_entry: 1069b1aa812bSJan Kara if (vmf_ret != VM_FAULT_LOCKED || error) 1070a7d73fe6SChristoph Hellwig put_locked_mapping_entry(mapping, vmf->pgoff, entry); 1071a7d73fe6SChristoph Hellwig out: 1072a7d73fe6SChristoph Hellwig if (error == -ENOMEM) 1073a7d73fe6SChristoph Hellwig return VM_FAULT_OOM | major; 1074a7d73fe6SChristoph Hellwig /* -EBUSY is fine, somebody else faulted on the same PTE */ 1075a7d73fe6SChristoph Hellwig if (error < 0 && error != -EBUSY) 1076a7d73fe6SChristoph Hellwig return VM_FAULT_SIGBUS | major; 1077b1aa812bSJan Kara if (vmf_ret) { 10781550290bSRoss Zwisler WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */ 1079b1aa812bSJan Kara return vmf_ret; 10801550290bSRoss Zwisler } 1081a7d73fe6SChristoph Hellwig return VM_FAULT_NOPAGE | major; 1082a7d73fe6SChristoph Hellwig } 108311c59c92SRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_fault); 1084642261acSRoss Zwisler 1085642261acSRoss Zwisler #ifdef CONFIG_FS_DAX_PMD 1086642261acSRoss Zwisler /* 1087642261acSRoss Zwisler * The 'colour' (ie low bits) within a PMD of a page offset. This comes up 1088642261acSRoss Zwisler * more often than one might expect in the below functions. 1089642261acSRoss Zwisler */ 1090642261acSRoss Zwisler #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 1091642261acSRoss Zwisler 1092642261acSRoss Zwisler static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd, 1093642261acSRoss Zwisler struct vm_fault *vmf, unsigned long address, 1094642261acSRoss Zwisler struct iomap *iomap, loff_t pos, bool write, void **entryp) 1095642261acSRoss Zwisler { 1096642261acSRoss Zwisler struct address_space *mapping = vma->vm_file->f_mapping; 1097642261acSRoss Zwisler struct block_device *bdev = iomap->bdev; 1098642261acSRoss Zwisler struct blk_dax_ctl dax = { 1099642261acSRoss Zwisler .sector = dax_iomap_sector(iomap, pos), 1100642261acSRoss Zwisler .size = PMD_SIZE, 1101642261acSRoss Zwisler }; 1102642261acSRoss Zwisler long length = dax_map_atomic(bdev, &dax); 1103642261acSRoss Zwisler void *ret; 1104642261acSRoss Zwisler 1105642261acSRoss Zwisler if (length < 0) /* dax_map_atomic() failed */ 1106642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1107642261acSRoss Zwisler if (length < PMD_SIZE) 1108642261acSRoss Zwisler goto unmap_fallback; 1109642261acSRoss Zwisler if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) 1110642261acSRoss Zwisler goto unmap_fallback; 1111642261acSRoss Zwisler if (!pfn_t_devmap(dax.pfn)) 1112642261acSRoss Zwisler goto unmap_fallback; 1113642261acSRoss Zwisler 1114642261acSRoss Zwisler dax_unmap_atomic(bdev, &dax); 1115642261acSRoss Zwisler 1116642261acSRoss Zwisler ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector, 1117642261acSRoss Zwisler RADIX_DAX_PMD); 1118642261acSRoss Zwisler if (IS_ERR(ret)) 1119642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1120642261acSRoss Zwisler *entryp = ret; 1121642261acSRoss Zwisler 1122642261acSRoss Zwisler return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write); 1123642261acSRoss Zwisler 1124642261acSRoss Zwisler unmap_fallback: 1125642261acSRoss Zwisler dax_unmap_atomic(bdev, &dax); 1126642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1127642261acSRoss Zwisler } 1128642261acSRoss Zwisler 1129642261acSRoss Zwisler static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd, 1130642261acSRoss Zwisler struct vm_fault *vmf, unsigned long address, 1131642261acSRoss Zwisler struct iomap *iomap, void **entryp) 1132642261acSRoss Zwisler { 1133642261acSRoss Zwisler struct address_space *mapping = vma->vm_file->f_mapping; 1134642261acSRoss Zwisler unsigned long pmd_addr = address & PMD_MASK; 1135642261acSRoss Zwisler struct page *zero_page; 1136642261acSRoss Zwisler spinlock_t *ptl; 1137642261acSRoss Zwisler pmd_t pmd_entry; 1138642261acSRoss Zwisler void *ret; 1139642261acSRoss Zwisler 1140642261acSRoss Zwisler zero_page = mm_get_huge_zero_page(vma->vm_mm); 1141642261acSRoss Zwisler 1142642261acSRoss Zwisler if (unlikely(!zero_page)) 1143642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1144642261acSRoss Zwisler 1145642261acSRoss Zwisler ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0, 1146642261acSRoss Zwisler RADIX_DAX_PMD | RADIX_DAX_HZP); 1147642261acSRoss Zwisler if (IS_ERR(ret)) 1148642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1149642261acSRoss Zwisler *entryp = ret; 1150642261acSRoss Zwisler 1151642261acSRoss Zwisler ptl = pmd_lock(vma->vm_mm, pmd); 1152642261acSRoss Zwisler if (!pmd_none(*pmd)) { 1153642261acSRoss Zwisler spin_unlock(ptl); 1154642261acSRoss Zwisler return VM_FAULT_FALLBACK; 1155642261acSRoss Zwisler } 1156642261acSRoss Zwisler 1157642261acSRoss Zwisler pmd_entry = mk_pmd(zero_page, vma->vm_page_prot); 1158642261acSRoss Zwisler pmd_entry = pmd_mkhuge(pmd_entry); 1159642261acSRoss Zwisler set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry); 1160642261acSRoss Zwisler spin_unlock(ptl); 1161642261acSRoss Zwisler return VM_FAULT_NOPAGE; 1162642261acSRoss Zwisler } 1163642261acSRoss Zwisler 1164642261acSRoss Zwisler int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, 1165642261acSRoss Zwisler pmd_t *pmd, unsigned int flags, struct iomap_ops *ops) 1166642261acSRoss Zwisler { 1167642261acSRoss Zwisler struct address_space *mapping = vma->vm_file->f_mapping; 1168642261acSRoss Zwisler unsigned long pmd_addr = address & PMD_MASK; 1169642261acSRoss Zwisler bool write = flags & FAULT_FLAG_WRITE; 11709484ab1bSJan Kara unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1171642261acSRoss Zwisler struct inode *inode = mapping->host; 1172642261acSRoss Zwisler int result = VM_FAULT_FALLBACK; 1173642261acSRoss Zwisler struct iomap iomap = { 0 }; 1174642261acSRoss Zwisler pgoff_t max_pgoff, pgoff; 1175642261acSRoss Zwisler struct vm_fault vmf; 1176642261acSRoss Zwisler void *entry; 1177642261acSRoss Zwisler loff_t pos; 1178642261acSRoss Zwisler int error; 1179642261acSRoss Zwisler 1180642261acSRoss Zwisler /* Fall back to PTEs if we're going to COW */ 1181642261acSRoss Zwisler if (write && !(vma->vm_flags & VM_SHARED)) 1182642261acSRoss Zwisler goto fallback; 1183642261acSRoss Zwisler 1184642261acSRoss Zwisler /* If the PMD would extend outside the VMA */ 1185642261acSRoss Zwisler if (pmd_addr < vma->vm_start) 1186642261acSRoss Zwisler goto fallback; 1187642261acSRoss Zwisler if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1188642261acSRoss Zwisler goto fallback; 1189642261acSRoss Zwisler 1190642261acSRoss Zwisler /* 1191642261acSRoss Zwisler * Check whether offset isn't beyond end of file now. Caller is 1192642261acSRoss Zwisler * supposed to hold locks serializing us with truncate / punch hole so 1193642261acSRoss Zwisler * this is a reliable test. 1194642261acSRoss Zwisler */ 1195642261acSRoss Zwisler pgoff = linear_page_index(vma, pmd_addr); 1196642261acSRoss Zwisler max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT; 1197642261acSRoss Zwisler 1198642261acSRoss Zwisler if (pgoff > max_pgoff) 1199642261acSRoss Zwisler return VM_FAULT_SIGBUS; 1200642261acSRoss Zwisler 1201642261acSRoss Zwisler /* If the PMD would extend beyond the file size */ 1202642261acSRoss Zwisler if ((pgoff | PG_PMD_COLOUR) > max_pgoff) 1203642261acSRoss Zwisler goto fallback; 1204642261acSRoss Zwisler 1205642261acSRoss Zwisler /* 1206642261acSRoss Zwisler * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX 1207642261acSRoss Zwisler * PMD or a HZP entry. If it can't (because a 4k page is already in 1208642261acSRoss Zwisler * the tree, for instance), it will return -EEXIST and we just fall 1209642261acSRoss Zwisler * back to 4k entries. 1210642261acSRoss Zwisler */ 1211642261acSRoss Zwisler entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 1212642261acSRoss Zwisler if (IS_ERR(entry)) 1213642261acSRoss Zwisler goto fallback; 1214642261acSRoss Zwisler 1215642261acSRoss Zwisler /* 1216642261acSRoss Zwisler * Note that we don't use iomap_apply here. We aren't doing I/O, only 1217642261acSRoss Zwisler * setting up a mapping, so really we're using iomap_begin() as a way 1218642261acSRoss Zwisler * to look up our filesystem block. 1219642261acSRoss Zwisler */ 1220642261acSRoss Zwisler pos = (loff_t)pgoff << PAGE_SHIFT; 1221642261acSRoss Zwisler error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1222642261acSRoss Zwisler if (error) 1223642261acSRoss Zwisler goto unlock_entry; 1224642261acSRoss Zwisler if (iomap.offset + iomap.length < pos + PMD_SIZE) 1225642261acSRoss Zwisler goto finish_iomap; 1226642261acSRoss Zwisler 1227642261acSRoss Zwisler vmf.pgoff = pgoff; 1228642261acSRoss Zwisler vmf.flags = flags; 1229642261acSRoss Zwisler vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO; 1230642261acSRoss Zwisler 1231642261acSRoss Zwisler switch (iomap.type) { 1232642261acSRoss Zwisler case IOMAP_MAPPED: 1233642261acSRoss Zwisler result = dax_pmd_insert_mapping(vma, pmd, &vmf, address, 1234642261acSRoss Zwisler &iomap, pos, write, &entry); 1235642261acSRoss Zwisler break; 1236642261acSRoss Zwisler case IOMAP_UNWRITTEN: 1237642261acSRoss Zwisler case IOMAP_HOLE: 1238642261acSRoss Zwisler if (WARN_ON_ONCE(write)) 1239642261acSRoss Zwisler goto finish_iomap; 1240642261acSRoss Zwisler result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap, 1241642261acSRoss Zwisler &entry); 1242642261acSRoss Zwisler break; 1243642261acSRoss Zwisler default: 1244642261acSRoss Zwisler WARN_ON_ONCE(1); 1245642261acSRoss Zwisler break; 1246642261acSRoss Zwisler } 1247642261acSRoss Zwisler 1248642261acSRoss Zwisler finish_iomap: 1249642261acSRoss Zwisler if (ops->iomap_end) { 1250642261acSRoss Zwisler if (result == VM_FAULT_FALLBACK) { 1251642261acSRoss Zwisler ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags, 1252642261acSRoss Zwisler &iomap); 1253642261acSRoss Zwisler } else { 1254642261acSRoss Zwisler error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE, 1255642261acSRoss Zwisler iomap_flags, &iomap); 1256642261acSRoss Zwisler if (error) 1257642261acSRoss Zwisler result = VM_FAULT_FALLBACK; 1258642261acSRoss Zwisler } 1259642261acSRoss Zwisler } 1260642261acSRoss Zwisler unlock_entry: 1261642261acSRoss Zwisler put_locked_mapping_entry(mapping, pgoff, entry); 1262642261acSRoss Zwisler fallback: 1263642261acSRoss Zwisler if (result == VM_FAULT_FALLBACK) { 1264642261acSRoss Zwisler split_huge_pmd(vma, pmd, address); 1265642261acSRoss Zwisler count_vm_event(THP_FAULT_FALLBACK); 1266642261acSRoss Zwisler } 1267642261acSRoss Zwisler return result; 1268642261acSRoss Zwisler } 1269642261acSRoss Zwisler EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault); 1270642261acSRoss Zwisler #endif /* CONFIG_FS_DAX_PMD */ 1271a254e568SChristoph Hellwig #endif /* CONFIG_FS_IOMAP */ 1272