pmem-dax.c (f295e53b60eb93ee53ed5ac610374ed293caa57b) | pmem-dax.c (ee8520fe8cd4cd2658ca555781eefeb4914c4ef9) |
---|---|
1/* 2 * Copyright (c) 2014-2016, Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT --- 7 unchanged lines hidden (view full) --- 16#include <nd.h> 17 18long pmem_direct_access(struct block_device *bdev, sector_t sector, 19 void __pmem **kaddr, pfn_t *pfn, long size) 20{ 21 struct pmem_device *pmem = bdev->bd_queue->queuedata; 22 resource_size_t offset = sector * 512 + pmem->data_offset; 23 | 1/* 2 * Copyright (c) 2014-2016, Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT --- 7 unchanged lines hidden (view full) --- 16#include <nd.h> 17 18long pmem_direct_access(struct block_device *bdev, sector_t sector, 19 void __pmem **kaddr, pfn_t *pfn, long size) 20{ 21 struct pmem_device *pmem = bdev->bd_queue->queuedata; 22 resource_size_t offset = sector * 512 + pmem->data_offset; 23 |
24 /* disable DAX for nfit_test pmem devices */ 25 if (get_nfit_res(pmem->phys_addr + offset)) { 26 dev_info_once(pmem->bb.dev, "dax is disabled for nfit_test\n"); | 24 if (unlikely(is_bad_pmem(&pmem->bb, sector, size))) |
27 return -EIO; | 25 return -EIO; |
26 27 /* 28 * Limit dax to a single page at a time given vmalloc()-backed 29 * in the nfit_test case. 30 */ 31 if (get_nfit_res(pmem->phys_addr + offset)) { 32 struct page *page; 33 34 *kaddr = pmem->virt_addr + offset; 35 page = vmalloc_to_page(pmem->virt_addr + offset); 36 *pfn = page_to_pfn_t(page); 37 dev_dbg_ratelimited(disk_to_dev(bdev->bd_disk)->parent, 38 "%s: sector: %#llx pfn: %#lx\n", __func__, 39 (unsigned long long) sector, page_to_pfn(page)); 40 41 return PAGE_SIZE; |
|
28 } 29 | 42 } 43 |
30 if (unlikely(is_bad_pmem(&pmem->bb, sector, size))) 31 return -EIO; | |
32 *kaddr = pmem->virt_addr + offset; 33 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); 34 35 /* 36 * If badblocks are present, limit known good range to the 37 * requested range. 38 */ 39 if (unlikely(pmem->bb.count)) 40 return size; 41 return pmem->size - pmem->pfn_pad - offset; 42} | 44 *kaddr = pmem->virt_addr + offset; 45 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); 46 47 /* 48 * If badblocks are present, limit known good range to the 49 * requested range. 50 */ 51 if (unlikely(pmem->bb.count)) 52 return size; 53 return pmem->size - pmem->pfn_pad - offset; 54} |