xref: /openbmc/linux/drivers/nvdimm/region_devs.c (revision 6f50b414)
15b497af4SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21f7df6f8SDan Williams /*
31f7df6f8SDan Williams  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
41f7df6f8SDan Williams  */
5eaf96153SDan Williams #include <linux/scatterlist.h>
633dd7075SDan Williams #include <linux/memregion.h>
7047fc8a1SRoss Zwisler #include <linux/highmem.h>
844f23dabSChristophe JAILLET #include <linux/kstrtox.h>
9eaf96153SDan Williams #include <linux/sched.h>
101f7df6f8SDan Williams #include <linux/slab.h>
110c27af60SDan Williams #include <linux/hash.h>
12eaf96153SDan Williams #include <linux/sort.h>
131f7df6f8SDan Williams #include <linux/io.h>
14bf9bccc1SDan Williams #include <linux/nd.h>
151f7df6f8SDan Williams #include "nd-core.h"
161f7df6f8SDan Williams #include "nd.h"
171f7df6f8SDan Williams 
18f284a4f2SDan Williams /*
19f284a4f2SDan Williams  * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
20f284a4f2SDan Williams  * irrelevant.
21f284a4f2SDan Williams  */
22f284a4f2SDan Williams #include <linux/io-64-nonatomic-hi-lo.h>
23f284a4f2SDan Williams 
240c27af60SDan Williams static DEFINE_PER_CPU(int, flush_idx);
251f7df6f8SDan Williams 
nvdimm_map_flush(struct device * dev,struct nvdimm * nvdimm,int dimm,struct nd_region_data * ndrd)26e5ae3b25SDan Williams static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
27e5ae3b25SDan Williams 		struct nd_region_data *ndrd)
28e5ae3b25SDan Williams {
29e5ae3b25SDan Williams 	int i, j;
30e5ae3b25SDan Williams 
31e5ae3b25SDan Williams 	dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
32e5ae3b25SDan Williams 			nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
33595c7307SDan Williams 	for (i = 0; i < (1 << ndrd->hints_shift); i++) {
34e5ae3b25SDan Williams 		struct resource *res = &nvdimm->flush_wpq[i];
35e5ae3b25SDan Williams 		unsigned long pfn = PHYS_PFN(res->start);
36e5ae3b25SDan Williams 		void __iomem *flush_page;
37e5ae3b25SDan Williams 
38e5ae3b25SDan Williams 		/* check if flush hints share a page */
39e5ae3b25SDan Williams 		for (j = 0; j < i; j++) {
40e5ae3b25SDan Williams 			struct resource *res_j = &nvdimm->flush_wpq[j];
41e5ae3b25SDan Williams 			unsigned long pfn_j = PHYS_PFN(res_j->start);
42e5ae3b25SDan Williams 
43e5ae3b25SDan Williams 			if (pfn == pfn_j)
44e5ae3b25SDan Williams 				break;
45e5ae3b25SDan Williams 		}
46e5ae3b25SDan Williams 
47e5ae3b25SDan Williams 		if (j < i)
48e5ae3b25SDan Williams 			flush_page = (void __iomem *) ((unsigned long)
49595c7307SDan Williams 					ndrd_get_flush_wpq(ndrd, dimm, j)
50595c7307SDan Williams 					& PAGE_MASK);
51e5ae3b25SDan Williams 		else
52e5ae3b25SDan Williams 			flush_page = devm_nvdimm_ioremap(dev,
53480b6837SOliver O'Halloran 					PFN_PHYS(pfn), PAGE_SIZE);
54e5ae3b25SDan Williams 		if (!flush_page)
55e5ae3b25SDan Williams 			return -ENXIO;
56595c7307SDan Williams 		ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
57595c7307SDan Williams 				+ (res->start & ~PAGE_MASK));
58e5ae3b25SDan Williams 	}
59e5ae3b25SDan Williams 
60e5ae3b25SDan Williams 	return 0;
61e5ae3b25SDan Williams }
62e5ae3b25SDan Williams 
nd_region_invalidate_memregion(struct nd_region * nd_region)63dc370b28SDan Williams static int nd_region_invalidate_memregion(struct nd_region *nd_region)
64dc370b28SDan Williams {
65dc370b28SDan Williams 	int i, incoherent = 0;
66dc370b28SDan Williams 
67dc370b28SDan Williams 	for (i = 0; i < nd_region->ndr_mappings; i++) {
68dc370b28SDan Williams 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
69dc370b28SDan Williams 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
70dc370b28SDan Williams 
71dc370b28SDan Williams 		if (test_bit(NDD_INCOHERENT, &nvdimm->flags)) {
72dc370b28SDan Williams 			incoherent++;
73dc370b28SDan Williams 			break;
74dc370b28SDan Williams 		}
75dc370b28SDan Williams 	}
76dc370b28SDan Williams 
77dc370b28SDan Williams 	if (!incoherent)
78dc370b28SDan Williams 		return 0;
79dc370b28SDan Williams 
80dc370b28SDan Williams 	if (!cpu_cache_has_invalidate_memregion()) {
81dc370b28SDan Williams 		if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)) {
82dc370b28SDan Williams 			dev_warn(
83dc370b28SDan Williams 				&nd_region->dev,
84dc370b28SDan Williams 				"Bypassing cpu_cache_invalidate_memergion() for testing!\n");
85dc370b28SDan Williams 			goto out;
86dc370b28SDan Williams 		} else {
87dc370b28SDan Williams 			dev_err(&nd_region->dev,
88dc370b28SDan Williams 				"Failed to synchronize CPU cache state\n");
89dc370b28SDan Williams 			return -ENXIO;
90dc370b28SDan Williams 		}
91dc370b28SDan Williams 	}
92dc370b28SDan Williams 
93dc370b28SDan Williams 	cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
94dc370b28SDan Williams out:
95dc370b28SDan Williams 	for (i = 0; i < nd_region->ndr_mappings; i++) {
96dc370b28SDan Williams 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
97dc370b28SDan Williams 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
98dc370b28SDan Williams 
99dc370b28SDan Williams 		clear_bit(NDD_INCOHERENT, &nvdimm->flags);
100dc370b28SDan Williams 	}
101dc370b28SDan Williams 
102dc370b28SDan Williams 	return 0;
103dc370b28SDan Williams }
104dc370b28SDan Williams 
nd_region_activate(struct nd_region * nd_region)105e5ae3b25SDan Williams int nd_region_activate(struct nd_region *nd_region)
106e5ae3b25SDan Williams {
107dc370b28SDan Williams 	int i, j, rc, num_flush = 0;
108e5ae3b25SDan Williams 	struct nd_region_data *ndrd;
109e5ae3b25SDan Williams 	struct device *dev = &nd_region->dev;
110e5ae3b25SDan Williams 	size_t flush_data_size = sizeof(void *);
111e5ae3b25SDan Williams 
112e5ae3b25SDan Williams 	nvdimm_bus_lock(&nd_region->dev);
113e5ae3b25SDan Williams 	for (i = 0; i < nd_region->ndr_mappings; i++) {
114e5ae3b25SDan Williams 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
115e5ae3b25SDan Williams 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
116e5ae3b25SDan Williams 
1177d988097SDave Jiang 		if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
1187d988097SDave Jiang 			nvdimm_bus_unlock(&nd_region->dev);
1197d988097SDave Jiang 			return -EBUSY;
1207d988097SDave Jiang 		}
1217d988097SDave Jiang 
122e5ae3b25SDan Williams 		/* at least one null hint slot per-dimm for the "no-hint" case */
123e5ae3b25SDan Williams 		flush_data_size += sizeof(void *);
1240c27af60SDan Williams 		num_flush = min_not_zero(num_flush, nvdimm->num_flush);
125e5ae3b25SDan Williams 		if (!nvdimm->num_flush)
126e5ae3b25SDan Williams 			continue;
127e5ae3b25SDan Williams 		flush_data_size += nvdimm->num_flush * sizeof(void *);
128e5ae3b25SDan Williams 	}
129e5ae3b25SDan Williams 	nvdimm_bus_unlock(&nd_region->dev);
130e5ae3b25SDan Williams 
131dc370b28SDan Williams 	rc = nd_region_invalidate_memregion(nd_region);
132dc370b28SDan Williams 	if (rc)
133dc370b28SDan Williams 		return rc;
134dc370b28SDan Williams 
135e5ae3b25SDan Williams 	ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
136e5ae3b25SDan Williams 	if (!ndrd)
137e5ae3b25SDan Williams 		return -ENOMEM;
138e5ae3b25SDan Williams 	dev_set_drvdata(dev, ndrd);
139e5ae3b25SDan Williams 
140595c7307SDan Williams 	if (!num_flush)
141595c7307SDan Williams 		return 0;
142595c7307SDan Williams 
143595c7307SDan Williams 	ndrd->hints_shift = ilog2(num_flush);
144e5ae3b25SDan Williams 	for (i = 0; i < nd_region->ndr_mappings; i++) {
145e5ae3b25SDan Williams 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
146e5ae3b25SDan Williams 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
147e5ae3b25SDan Williams 		int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
148e5ae3b25SDan Williams 
149e5ae3b25SDan Williams 		if (rc)
150e5ae3b25SDan Williams 			return rc;
151e5ae3b25SDan Williams 	}
152e5ae3b25SDan Williams 
153db58028eSDave Jiang 	/*
154db58028eSDave Jiang 	 * Clear out entries that are duplicates. This should prevent the
155db58028eSDave Jiang 	 * extra flushings.
156db58028eSDave Jiang 	 */
157db58028eSDave Jiang 	for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
158db58028eSDave Jiang 		/* ignore if NULL already */
159db58028eSDave Jiang 		if (!ndrd_get_flush_wpq(ndrd, i, 0))
160db58028eSDave Jiang 			continue;
161db58028eSDave Jiang 
162db58028eSDave Jiang 		for (j = i + 1; j < nd_region->ndr_mappings; j++)
163db58028eSDave Jiang 			if (ndrd_get_flush_wpq(ndrd, i, 0) ==
164db58028eSDave Jiang 			    ndrd_get_flush_wpq(ndrd, j, 0))
165db58028eSDave Jiang 				ndrd_set_flush_wpq(ndrd, j, 0, NULL);
166db58028eSDave Jiang 	}
167db58028eSDave Jiang 
168e5ae3b25SDan Williams 	return 0;
169e5ae3b25SDan Williams }
170e5ae3b25SDan Williams 
nd_region_release(struct device * dev)1711f7df6f8SDan Williams static void nd_region_release(struct device *dev)
1721f7df6f8SDan Williams {
1731f7df6f8SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
1741f7df6f8SDan Williams 	u16 i;
1751f7df6f8SDan Williams 
1761f7df6f8SDan Williams 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1771f7df6f8SDan Williams 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1781f7df6f8SDan Williams 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
1791f7df6f8SDan Williams 
1801f7df6f8SDan Williams 		put_device(&nvdimm->dev);
1811f7df6f8SDan Williams 	}
1825212e11fSVishal Verma 	free_percpu(nd_region->lane);
18304ad63f0SDan Williams 	if (!test_bit(ND_REGION_CXL, &nd_region->flags))
18433dd7075SDan Williams 		memregion_free(nd_region->id);
1851f7df6f8SDan Williams 	kfree(nd_region);
1861f7df6f8SDan Williams }
1871f7df6f8SDan Williams 
to_nd_region(struct device * dev)1881f7df6f8SDan Williams struct nd_region *to_nd_region(struct device *dev)
1891f7df6f8SDan Williams {
1901f7df6f8SDan Williams 	struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
1911f7df6f8SDan Williams 
1921f7df6f8SDan Williams 	WARN_ON(dev->type->release != nd_region_release);
1931f7df6f8SDan Williams 	return nd_region;
1941f7df6f8SDan Williams }
1951f7df6f8SDan Williams EXPORT_SYMBOL_GPL(to_nd_region);
1961f7df6f8SDan Williams 
nd_region_dev(struct nd_region * nd_region)197243f29feSDan Williams struct device *nd_region_dev(struct nd_region *nd_region)
198243f29feSDan Williams {
199243f29feSDan Williams 	if (!nd_region)
200243f29feSDan Williams 		return NULL;
201243f29feSDan Williams 	return &nd_region->dev;
202243f29feSDan Williams }
203243f29feSDan Williams EXPORT_SYMBOL_GPL(nd_region_dev);
204243f29feSDan Williams 
nd_region_provider_data(struct nd_region * nd_region)205047fc8a1SRoss Zwisler void *nd_region_provider_data(struct nd_region *nd_region)
206047fc8a1SRoss Zwisler {
207047fc8a1SRoss Zwisler 	return nd_region->provider_data;
208047fc8a1SRoss Zwisler }
209047fc8a1SRoss Zwisler EXPORT_SYMBOL_GPL(nd_region_provider_data);
210047fc8a1SRoss Zwisler 
2113d88002eSDan Williams /**
2123d88002eSDan Williams  * nd_region_to_nstype() - region to an integer namespace type
2133d88002eSDan Williams  * @nd_region: region-device to interrogate
2143d88002eSDan Williams  *
2153d88002eSDan Williams  * This is the 'nstype' attribute of a region as well, an input to the
2163d88002eSDan Williams  * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
2173d88002eSDan Williams  * namespace devices with namespace drivers.
2183d88002eSDan Williams  */
nd_region_to_nstype(struct nd_region * nd_region)2193d88002eSDan Williams int nd_region_to_nstype(struct nd_region *nd_region)
2203d88002eSDan Williams {
221c9e582aaSDan Williams 	if (is_memory(&nd_region->dev)) {
222a0e37452SDan Williams 		u16 i, label;
2233d88002eSDan Williams 
224a0e37452SDan Williams 		for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) {
2253d88002eSDan Williams 			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2263d88002eSDan Williams 			struct nvdimm *nvdimm = nd_mapping->nvdimm;
2273d88002eSDan Williams 
228a0e37452SDan Williams 			if (test_bit(NDD_LABELING, &nvdimm->flags))
229a0e37452SDan Williams 				label++;
2303d88002eSDan Williams 		}
231a0e37452SDan Williams 		if (label)
2323d88002eSDan Williams 			return ND_DEVICE_NAMESPACE_PMEM;
2333d88002eSDan Williams 		else
2343d88002eSDan Williams 			return ND_DEVICE_NAMESPACE_IO;
2353d88002eSDan Williams 	}
2363d88002eSDan Williams 
2373d88002eSDan Williams 	return 0;
2383d88002eSDan Williams }
239bf9bccc1SDan Williams EXPORT_SYMBOL(nd_region_to_nstype);
240bf9bccc1SDan Williams 
region_size(struct nd_region * nd_region)2412522afb8SDan Williams static unsigned long long region_size(struct nd_region *nd_region)
2422522afb8SDan Williams {
2432522afb8SDan Williams 	if (is_memory(&nd_region->dev)) {
2442522afb8SDan Williams 		return nd_region->ndr_size;
2452522afb8SDan Williams 	} else if (nd_region->ndr_mappings == 1) {
2462522afb8SDan Williams 		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2472522afb8SDan Williams 
2482522afb8SDan Williams 		return nd_mapping->size;
2492522afb8SDan Williams 	}
2502522afb8SDan Williams 
2512522afb8SDan Williams 	return 0;
2522522afb8SDan Williams }
2532522afb8SDan Williams 
size_show(struct device * dev,struct device_attribute * attr,char * buf)2541f7df6f8SDan Williams static ssize_t size_show(struct device *dev,
2551f7df6f8SDan Williams 		struct device_attribute *attr, char *buf)
2561f7df6f8SDan Williams {
2571f7df6f8SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
2581f7df6f8SDan Williams 
2592522afb8SDan Williams 	return sprintf(buf, "%llu\n", region_size(nd_region));
2601f7df6f8SDan Williams }
2611f7df6f8SDan Williams static DEVICE_ATTR_RO(size);
2621f7df6f8SDan Williams 
deep_flush_show(struct device * dev,struct device_attribute * attr,char * buf)263ab630891SDan Williams static ssize_t deep_flush_show(struct device *dev,
264ab630891SDan Williams 		struct device_attribute *attr, char *buf)
265ab630891SDan Williams {
266ab630891SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
267ab630891SDan Williams 
268ab630891SDan Williams 	/*
269ab630891SDan Williams 	 * NOTE: in the nvdimm_has_flush() error case this attribute is
270ab630891SDan Williams 	 * not visible.
271ab630891SDan Williams 	 */
272ab630891SDan Williams 	return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
273ab630891SDan Williams }
274ab630891SDan Williams 
deep_flush_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)275ab630891SDan Williams static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
276ab630891SDan Williams 		const char *buf, size_t len)
277ab630891SDan Williams {
278ab630891SDan Williams 	bool flush;
27944f23dabSChristophe JAILLET 	int rc = kstrtobool(buf, &flush);
280ab630891SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
281ab630891SDan Williams 
282ab630891SDan Williams 	if (rc)
283ab630891SDan Williams 		return rc;
284ab630891SDan Williams 	if (!flush)
285ab630891SDan Williams 		return -EINVAL;
286c5d4355dSPankaj Gupta 	rc = nvdimm_flush(nd_region, NULL);
287c5d4355dSPankaj Gupta 	if (rc)
288c5d4355dSPankaj Gupta 		return rc;
289ab630891SDan Williams 
290ab630891SDan Williams 	return len;
291ab630891SDan Williams }
292ab630891SDan Williams static DEVICE_ATTR_RW(deep_flush);
293ab630891SDan Williams 
mappings_show(struct device * dev,struct device_attribute * attr,char * buf)2941f7df6f8SDan Williams static ssize_t mappings_show(struct device *dev,
2951f7df6f8SDan Williams 		struct device_attribute *attr, char *buf)
2961f7df6f8SDan Williams {
2971f7df6f8SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
2981f7df6f8SDan Williams 
2991f7df6f8SDan Williams 	return sprintf(buf, "%d\n", nd_region->ndr_mappings);
3001f7df6f8SDan Williams }
3011f7df6f8SDan Williams static DEVICE_ATTR_RO(mappings);
3021f7df6f8SDan Williams 
nstype_show(struct device * dev,struct device_attribute * attr,char * buf)3033d88002eSDan Williams static ssize_t nstype_show(struct device *dev,
3043d88002eSDan Williams 		struct device_attribute *attr, char *buf)
3053d88002eSDan Williams {
3063d88002eSDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
3073d88002eSDan Williams 
3083d88002eSDan Williams 	return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
3093d88002eSDan Williams }
3103d88002eSDan Williams static DEVICE_ATTR_RO(nstype);
3113d88002eSDan Williams 
set_cookie_show(struct device * dev,struct device_attribute * attr,char * buf)312eaf96153SDan Williams static ssize_t set_cookie_show(struct device *dev,
313eaf96153SDan Williams 		struct device_attribute *attr, char *buf)
314eaf96153SDan Williams {
315eaf96153SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
316eaf96153SDan Williams 	struct nd_interleave_set *nd_set = nd_region->nd_set;
317c12c48ceSDan Williams 	ssize_t rc = 0;
318eaf96153SDan Williams 
319c9e582aaSDan Williams 	if (is_memory(dev) && nd_set)
320eaf96153SDan Williams 		/* pass, should be precluded by region_visible */;
321eaf96153SDan Williams 	else
322eaf96153SDan Williams 		return -ENXIO;
323eaf96153SDan Williams 
324c12c48ceSDan Williams 	/*
325c12c48ceSDan Williams 	 * The cookie to show depends on which specification of the
326c12c48ceSDan Williams 	 * labels we are using. If there are not labels then default to
327c12c48ceSDan Williams 	 * the v1.1 namespace label cookie definition. To read all this
328c12c48ceSDan Williams 	 * data we need to wait for probing to settle.
329c12c48ceSDan Williams 	 */
33081beea55SDan Williams 	device_lock(dev);
331c12c48ceSDan Williams 	nvdimm_bus_lock(dev);
332c12c48ceSDan Williams 	wait_nvdimm_bus_probe_idle(dev);
333c12c48ceSDan Williams 	if (nd_region->ndr_mappings) {
334c12c48ceSDan Williams 		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
335c12c48ceSDan Williams 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
336c12c48ceSDan Williams 
337c12c48ceSDan Williams 		if (ndd) {
338c12c48ceSDan Williams 			struct nd_namespace_index *nsindex;
339c12c48ceSDan Williams 
340c12c48ceSDan Williams 			nsindex = to_namespace_index(ndd, ndd->ns_current);
341c12c48ceSDan Williams 			rc = sprintf(buf, "%#llx\n",
342c12c48ceSDan Williams 					nd_region_interleave_set_cookie(nd_region,
343c12c48ceSDan Williams 						nsindex));
344c12c48ceSDan Williams 		}
345c12c48ceSDan Williams 	}
346c12c48ceSDan Williams 	nvdimm_bus_unlock(dev);
34781beea55SDan Williams 	device_unlock(dev);
348c12c48ceSDan Williams 
349c12c48ceSDan Williams 	if (rc)
350c12c48ceSDan Williams 		return rc;
351c12c48ceSDan Williams 	return sprintf(buf, "%#llx\n", nd_set->cookie1);
352eaf96153SDan Williams }
353eaf96153SDan Williams static DEVICE_ATTR_RO(set_cookie);
354eaf96153SDan Williams 
nd_region_available_dpa(struct nd_region * nd_region)355bf9bccc1SDan Williams resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
356bf9bccc1SDan Williams {
3573b6c6c03SDan Williams 	resource_size_t available;
358bf9bccc1SDan Williams 	int i;
359bf9bccc1SDan Williams 
360bf9bccc1SDan Williams 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
361bf9bccc1SDan Williams 
362bf9bccc1SDan Williams 	available = 0;
363bf9bccc1SDan Williams 	for (i = 0; i < nd_region->ndr_mappings; i++) {
364bf9bccc1SDan Williams 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
365bf9bccc1SDan Williams 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
366bf9bccc1SDan Williams 
367bf9bccc1SDan Williams 		/* if a dimm is disabled the available capacity is zero */
368bf9bccc1SDan Williams 		if (!ndd)
369bf9bccc1SDan Williams 			return 0;
370bf9bccc1SDan Williams 
3713b6c6c03SDan Williams 		available += nd_pmem_available_dpa(nd_region, nd_mapping);
372bf9bccc1SDan Williams 	}
373bf9bccc1SDan Williams 
374bf9bccc1SDan Williams 	return available;
375bf9bccc1SDan Williams }
376bf9bccc1SDan Williams 
nd_region_allocatable_dpa(struct nd_region * nd_region)37712e3129eSKeith Busch resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
37812e3129eSKeith Busch {
3793b6c6c03SDan Williams 	resource_size_t avail = 0;
38012e3129eSKeith Busch 	int i;
38112e3129eSKeith Busch 
38212e3129eSKeith Busch 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
38312e3129eSKeith Busch 	for (i = 0; i < nd_region->ndr_mappings; i++) {
38412e3129eSKeith Busch 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
38512e3129eSKeith Busch 
3863b6c6c03SDan Williams 		avail = min_not_zero(avail, nd_pmem_max_contiguous_dpa(
3873b6c6c03SDan Williams 						    nd_region, nd_mapping));
38812e3129eSKeith Busch 	}
3893b6c6c03SDan Williams 	return avail * nd_region->ndr_mappings;
39012e3129eSKeith Busch }
39112e3129eSKeith Busch 
available_size_show(struct device * dev,struct device_attribute * attr,char * buf)392bf9bccc1SDan Williams static ssize_t available_size_show(struct device *dev,
393bf9bccc1SDan Williams 		struct device_attribute *attr, char *buf)
394bf9bccc1SDan Williams {
395bf9bccc1SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
396bf9bccc1SDan Williams 	unsigned long long available = 0;
397bf9bccc1SDan Williams 
398bf9bccc1SDan Williams 	/*
399bf9bccc1SDan Williams 	 * Flush in-flight updates and grab a snapshot of the available
400bf9bccc1SDan Williams 	 * size.  Of course, this value is potentially invalidated the
401bf9bccc1SDan Williams 	 * memory nvdimm_bus_lock() is dropped, but that's userspace's
402bf9bccc1SDan Williams 	 * problem to not race itself.
403bf9bccc1SDan Williams 	 */
40481beea55SDan Williams 	device_lock(dev);
405bf9bccc1SDan Williams 	nvdimm_bus_lock(dev);
406bf9bccc1SDan Williams 	wait_nvdimm_bus_probe_idle(dev);
407bf9bccc1SDan Williams 	available = nd_region_available_dpa(nd_region);
408bf9bccc1SDan Williams 	nvdimm_bus_unlock(dev);
40981beea55SDan Williams 	device_unlock(dev);
410bf9bccc1SDan Williams 
411bf9bccc1SDan Williams 	return sprintf(buf, "%llu\n", available);
412bf9bccc1SDan Williams }
413bf9bccc1SDan Williams static DEVICE_ATTR_RO(available_size);
414bf9bccc1SDan Williams 
max_available_extent_show(struct device * dev,struct device_attribute * attr,char * buf)4151e687220SKeith Busch static ssize_t max_available_extent_show(struct device *dev,
4161e687220SKeith Busch 		struct device_attribute *attr, char *buf)
4171e687220SKeith Busch {
4181e687220SKeith Busch 	struct nd_region *nd_region = to_nd_region(dev);
4191e687220SKeith Busch 	unsigned long long available = 0;
4201e687220SKeith Busch 
42181beea55SDan Williams 	device_lock(dev);
4221e687220SKeith Busch 	nvdimm_bus_lock(dev);
4231e687220SKeith Busch 	wait_nvdimm_bus_probe_idle(dev);
4241e687220SKeith Busch 	available = nd_region_allocatable_dpa(nd_region);
4251e687220SKeith Busch 	nvdimm_bus_unlock(dev);
42681beea55SDan Williams 	device_unlock(dev);
4271e687220SKeith Busch 
4281e687220SKeith Busch 	return sprintf(buf, "%llu\n", available);
4291e687220SKeith Busch }
4301e687220SKeith Busch static DEVICE_ATTR_RO(max_available_extent);
4311e687220SKeith Busch 
init_namespaces_show(struct device * dev,struct device_attribute * attr,char * buf)4323d88002eSDan Williams static ssize_t init_namespaces_show(struct device *dev,
4333d88002eSDan Williams 		struct device_attribute *attr, char *buf)
4343d88002eSDan Williams {
435e5ae3b25SDan Williams 	struct nd_region_data *ndrd = dev_get_drvdata(dev);
4363d88002eSDan Williams 	ssize_t rc;
4373d88002eSDan Williams 
4383d88002eSDan Williams 	nvdimm_bus_lock(dev);
439e5ae3b25SDan Williams 	if (ndrd)
440e5ae3b25SDan Williams 		rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
4413d88002eSDan Williams 	else
4423d88002eSDan Williams 		rc = -ENXIO;
4433d88002eSDan Williams 	nvdimm_bus_unlock(dev);
4443d88002eSDan Williams 
4453d88002eSDan Williams 	return rc;
4463d88002eSDan Williams }
4473d88002eSDan Williams static DEVICE_ATTR_RO(init_namespaces);
4483d88002eSDan Williams 
namespace_seed_show(struct device * dev,struct device_attribute * attr,char * buf)449bf9bccc1SDan Williams static ssize_t namespace_seed_show(struct device *dev,
450bf9bccc1SDan Williams 		struct device_attribute *attr, char *buf)
451bf9bccc1SDan Williams {
452bf9bccc1SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
453bf9bccc1SDan Williams 	ssize_t rc;
454bf9bccc1SDan Williams 
455bf9bccc1SDan Williams 	nvdimm_bus_lock(dev);
456bf9bccc1SDan Williams 	if (nd_region->ns_seed)
457bf9bccc1SDan Williams 		rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
458bf9bccc1SDan Williams 	else
459bf9bccc1SDan Williams 		rc = sprintf(buf, "\n");
460bf9bccc1SDan Williams 	nvdimm_bus_unlock(dev);
461bf9bccc1SDan Williams 	return rc;
462bf9bccc1SDan Williams }
463bf9bccc1SDan Williams static DEVICE_ATTR_RO(namespace_seed);
464bf9bccc1SDan Williams 
btt_seed_show(struct device * dev,struct device_attribute * attr,char * buf)4658c2f7e86SDan Williams static ssize_t btt_seed_show(struct device *dev,
4668c2f7e86SDan Williams 		struct device_attribute *attr, char *buf)
4678c2f7e86SDan Williams {
4688c2f7e86SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
4698c2f7e86SDan Williams 	ssize_t rc;
4708c2f7e86SDan Williams 
4718c2f7e86SDan Williams 	nvdimm_bus_lock(dev);
4728c2f7e86SDan Williams 	if (nd_region->btt_seed)
4738c2f7e86SDan Williams 		rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
4748c2f7e86SDan Williams 	else
4758c2f7e86SDan Williams 		rc = sprintf(buf, "\n");
4768c2f7e86SDan Williams 	nvdimm_bus_unlock(dev);
4778c2f7e86SDan Williams 
4788c2f7e86SDan Williams 	return rc;
4798c2f7e86SDan Williams }
4808c2f7e86SDan Williams static DEVICE_ATTR_RO(btt_seed);
4818c2f7e86SDan Williams 
pfn_seed_show(struct device * dev,struct device_attribute * attr,char * buf)482e1455744SDan Williams static ssize_t pfn_seed_show(struct device *dev,
483e1455744SDan Williams 		struct device_attribute *attr, char *buf)
484e1455744SDan Williams {
485e1455744SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
486e1455744SDan Williams 	ssize_t rc;
487e1455744SDan Williams 
488e1455744SDan Williams 	nvdimm_bus_lock(dev);
489e1455744SDan Williams 	if (nd_region->pfn_seed)
490e1455744SDan Williams 		rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
491e1455744SDan Williams 	else
492e1455744SDan Williams 		rc = sprintf(buf, "\n");
493e1455744SDan Williams 	nvdimm_bus_unlock(dev);
494e1455744SDan Williams 
495e1455744SDan Williams 	return rc;
496e1455744SDan Williams }
497e1455744SDan Williams static DEVICE_ATTR_RO(pfn_seed);
498e1455744SDan Williams 
dax_seed_show(struct device * dev,struct device_attribute * attr,char * buf)499cd03412aSDan Williams static ssize_t dax_seed_show(struct device *dev,
500cd03412aSDan Williams 		struct device_attribute *attr, char *buf)
501cd03412aSDan Williams {
502cd03412aSDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
503cd03412aSDan Williams 	ssize_t rc;
504cd03412aSDan Williams 
505cd03412aSDan Williams 	nvdimm_bus_lock(dev);
506cd03412aSDan Williams 	if (nd_region->dax_seed)
507cd03412aSDan Williams 		rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
508cd03412aSDan Williams 	else
509cd03412aSDan Williams 		rc = sprintf(buf, "\n");
510cd03412aSDan Williams 	nvdimm_bus_unlock(dev);
511cd03412aSDan Williams 
512cd03412aSDan Williams 	return rc;
513cd03412aSDan Williams }
514cd03412aSDan Williams static DEVICE_ATTR_RO(dax_seed);
515cd03412aSDan Williams 
read_only_show(struct device * dev,struct device_attribute * attr,char * buf)51658138820SDan Williams static ssize_t read_only_show(struct device *dev,
51758138820SDan Williams 		struct device_attribute *attr, char *buf)
51858138820SDan Williams {
51958138820SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
52058138820SDan Williams 
52158138820SDan Williams 	return sprintf(buf, "%d\n", nd_region->ro);
52258138820SDan Williams }
52358138820SDan Williams 
revalidate_read_only(struct device * dev,void * data)5242361db89SDan Williams static int revalidate_read_only(struct device *dev, void *data)
5252361db89SDan Williams {
5262361db89SDan Williams 	nd_device_notify(dev, NVDIMM_REVALIDATE_REGION);
5272361db89SDan Williams 	return 0;
5282361db89SDan Williams }
5292361db89SDan Williams 
read_only_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)53058138820SDan Williams static ssize_t read_only_store(struct device *dev,
53158138820SDan Williams 		struct device_attribute *attr, const char *buf, size_t len)
53258138820SDan Williams {
53358138820SDan Williams 	bool ro;
53444f23dabSChristophe JAILLET 	int rc = kstrtobool(buf, &ro);
53558138820SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
53658138820SDan Williams 
53758138820SDan Williams 	if (rc)
53858138820SDan Williams 		return rc;
53958138820SDan Williams 
54058138820SDan Williams 	nd_region->ro = ro;
5412361db89SDan Williams 	device_for_each_child(dev, NULL, revalidate_read_only);
54258138820SDan Williams 	return len;
54358138820SDan Williams }
54458138820SDan Williams static DEVICE_ATTR_RW(read_only);
54558138820SDan Williams 
align_show(struct device * dev,struct device_attribute * attr,char * buf)5462522afb8SDan Williams static ssize_t align_show(struct device *dev,
5472522afb8SDan Williams 		struct device_attribute *attr, char *buf)
5482522afb8SDan Williams {
5492522afb8SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
5502522afb8SDan Williams 
5512522afb8SDan Williams 	return sprintf(buf, "%#lx\n", nd_region->align);
5522522afb8SDan Williams }
5532522afb8SDan Williams 
align_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)5542522afb8SDan Williams static ssize_t align_store(struct device *dev,
5552522afb8SDan Williams 		struct device_attribute *attr, const char *buf, size_t len)
5562522afb8SDan Williams {
5572522afb8SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
5582522afb8SDan Williams 	unsigned long val, dpa;
5592e5021ccSTyler Hicks 	u32 mappings, remainder;
5602522afb8SDan Williams 	int rc;
5612522afb8SDan Williams 
5622522afb8SDan Williams 	rc = kstrtoul(buf, 0, &val);
5632522afb8SDan Williams 	if (rc)
5642522afb8SDan Williams 		return rc;
5652522afb8SDan Williams 
5662522afb8SDan Williams 	/*
5672522afb8SDan Williams 	 * Ensure space-align is evenly divisible by the region
5682522afb8SDan Williams 	 * interleave-width because the kernel typically has no facility
5692522afb8SDan Williams 	 * to determine which DIMM(s), dimm-physical-addresses, would
5702522afb8SDan Williams 	 * contribute to the tail capacity in system-physical-address
5712522afb8SDan Williams 	 * space for the namespace.
5722522afb8SDan Williams 	 */
5732e5021ccSTyler Hicks 	mappings = max_t(u32, 1, nd_region->ndr_mappings);
5742e5021ccSTyler Hicks 	dpa = div_u64_rem(val, mappings, &remainder);
5752522afb8SDan Williams 	if (!is_power_of_2(dpa) || dpa < PAGE_SIZE
5762522afb8SDan Williams 			|| val > region_size(nd_region) || remainder)
5772522afb8SDan Williams 		return -EINVAL;
5782522afb8SDan Williams 
5792522afb8SDan Williams 	/*
5802522afb8SDan Williams 	 * Given that space allocation consults this value multiple
5812522afb8SDan Williams 	 * times ensure it does not change for the duration of the
5822522afb8SDan Williams 	 * allocation.
5832522afb8SDan Williams 	 */
5842522afb8SDan Williams 	nvdimm_bus_lock(dev);
5852522afb8SDan Williams 	nd_region->align = val;
5862522afb8SDan Williams 	nvdimm_bus_unlock(dev);
5872522afb8SDan Williams 
5882522afb8SDan Williams 	return len;
5892522afb8SDan Williams }
5902522afb8SDan Williams static DEVICE_ATTR_RW(align);
5912522afb8SDan Williams 
region_badblocks_show(struct device * dev,struct device_attribute * attr,char * buf)59223f49844SDan Williams static ssize_t region_badblocks_show(struct device *dev,
5936a6bef90SDave Jiang 		struct device_attribute *attr, char *buf)
5946a6bef90SDave Jiang {
5956a6bef90SDave Jiang 	struct nd_region *nd_region = to_nd_region(dev);
5965d394eeeSDan Williams 	ssize_t rc;
5976a6bef90SDave Jiang 
59881beea55SDan Williams 	device_lock(dev);
5995d394eeeSDan Williams 	if (dev->driver)
6005d394eeeSDan Williams 		rc = badblocks_show(&nd_region->bb, buf, 0);
6015d394eeeSDan Williams 	else
6025d394eeeSDan Williams 		rc = -ENXIO;
60381beea55SDan Williams 	device_unlock(dev);
6045d394eeeSDan Williams 
6055d394eeeSDan Williams 	return rc;
6066a6bef90SDave Jiang }
60723f49844SDan Williams static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
6086a6bef90SDave Jiang 
resource_show(struct device * dev,struct device_attribute * attr,char * buf)609802f4be6SDave Jiang static ssize_t resource_show(struct device *dev,
610802f4be6SDave Jiang 		struct device_attribute *attr, char *buf)
611802f4be6SDave Jiang {
612802f4be6SDave Jiang 	struct nd_region *nd_region = to_nd_region(dev);
613802f4be6SDave Jiang 
614802f4be6SDave Jiang 	return sprintf(buf, "%#llx\n", nd_region->ndr_start);
615802f4be6SDave Jiang }
6165cf81ce1SDan Williams static DEVICE_ATTR_ADMIN_RO(resource);
617802f4be6SDave Jiang 
persistence_domain_show(struct device * dev,struct device_attribute * attr,char * buf)61896c3a239SDave Jiang static ssize_t persistence_domain_show(struct device *dev,
61996c3a239SDave Jiang 		struct device_attribute *attr, char *buf)
62096c3a239SDave Jiang {
62196c3a239SDave Jiang 	struct nd_region *nd_region = to_nd_region(dev);
62296c3a239SDave Jiang 
623fe9a552eSDan Williams 	if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
624fe9a552eSDan Williams 		return sprintf(buf, "cpu_cache\n");
625fe9a552eSDan Williams 	else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
626fe9a552eSDan Williams 		return sprintf(buf, "memory_controller\n");
627fe9a552eSDan Williams 	else
628fe9a552eSDan Williams 		return sprintf(buf, "\n");
62996c3a239SDave Jiang }
63096c3a239SDave Jiang static DEVICE_ATTR_RO(persistence_domain);
63196c3a239SDave Jiang 
6321f7df6f8SDan Williams static struct attribute *nd_region_attributes[] = {
6331f7df6f8SDan Williams 	&dev_attr_size.attr,
6342522afb8SDan Williams 	&dev_attr_align.attr,
6353d88002eSDan Williams 	&dev_attr_nstype.attr,
6361f7df6f8SDan Williams 	&dev_attr_mappings.attr,
6378c2f7e86SDan Williams 	&dev_attr_btt_seed.attr,
638e1455744SDan Williams 	&dev_attr_pfn_seed.attr,
639cd03412aSDan Williams 	&dev_attr_dax_seed.attr,
640ab630891SDan Williams 	&dev_attr_deep_flush.attr,
64158138820SDan Williams 	&dev_attr_read_only.attr,
642eaf96153SDan Williams 	&dev_attr_set_cookie.attr,
643bf9bccc1SDan Williams 	&dev_attr_available_size.attr,
6441e687220SKeith Busch 	&dev_attr_max_available_extent.attr,
645bf9bccc1SDan Williams 	&dev_attr_namespace_seed.attr,
6463d88002eSDan Williams 	&dev_attr_init_namespaces.attr,
64723f49844SDan Williams 	&dev_attr_badblocks.attr,
648802f4be6SDave Jiang 	&dev_attr_resource.attr,
64996c3a239SDave Jiang 	&dev_attr_persistence_domain.attr,
6501f7df6f8SDan Williams 	NULL,
6511f7df6f8SDan Williams };
6521f7df6f8SDan Williams 
region_visible(struct kobject * kobj,struct attribute * a,int n)653eaf96153SDan Williams static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
654eaf96153SDan Williams {
655eaf96153SDan Williams 	struct device *dev = container_of(kobj, typeof(*dev), kobj);
656eaf96153SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
657eaf96153SDan Williams 	struct nd_interleave_set *nd_set = nd_region->nd_set;
658bf9bccc1SDan Williams 	int type = nd_region_to_nstype(nd_region);
659eaf96153SDan Williams 
660c9e582aaSDan Williams 	if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
6616bb691acSDmitry Krivenok 		return 0;
6626bb691acSDmitry Krivenok 
663c9e582aaSDan Williams 	if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
664cd03412aSDan Williams 		return 0;
665cd03412aSDan Williams 
666c42adf87SAneesh Kumar K.V 	if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
6676a6bef90SDave Jiang 		return 0;
6686a6bef90SDave Jiang 
669bfd2e914SDan Williams 	if (a == &dev_attr_resource.attr && !is_memory(dev))
670802f4be6SDave Jiang 		return 0;
671802f4be6SDave Jiang 
672ab630891SDan Williams 	if (a == &dev_attr_deep_flush.attr) {
673ab630891SDan Williams 		int has_flush = nvdimm_has_flush(nd_region);
674ab630891SDan Williams 
675ab630891SDan Williams 		if (has_flush == 1)
676ab630891SDan Williams 			return a->mode;
677ab630891SDan Williams 		else if (has_flush == 0)
678ab630891SDan Williams 			return 0444;
679ab630891SDan Williams 		else
680ab630891SDan Williams 			return 0;
681ab630891SDan Williams 	}
682ab630891SDan Williams 
683896196dcSDan Williams 	if (a == &dev_attr_persistence_domain.attr) {
684896196dcSDan Williams 		if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
685896196dcSDan Williams 					| BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
686896196dcSDan Williams 			return 0;
687896196dcSDan Williams 		return a->mode;
688896196dcSDan Williams 	}
689896196dcSDan Williams 
690543094e1SVishal Verma 	if (a == &dev_attr_align.attr)
6912522afb8SDan Williams 		return a->mode;
6922522afb8SDan Williams 
693bf9bccc1SDan Williams 	if (a != &dev_attr_set_cookie.attr
694bf9bccc1SDan Williams 			&& a != &dev_attr_available_size.attr)
695eaf96153SDan Williams 		return a->mode;
696eaf96153SDan Williams 
6973b6c6c03SDan Williams 	if (type == ND_DEVICE_NAMESPACE_PMEM &&
6983b6c6c03SDan Williams 	    a == &dev_attr_available_size.attr)
699bf9bccc1SDan Williams 		return a->mode;
700c9e582aaSDan Williams 	else if (is_memory(dev) && nd_set)
701eaf96153SDan Williams 		return a->mode;
702eaf96153SDan Williams 
703eaf96153SDan Williams 	return 0;
704eaf96153SDan Williams }
705eaf96153SDan Williams 
mappingN(struct device * dev,char * buf,int n)7061f7df6f8SDan Williams static ssize_t mappingN(struct device *dev, char *buf, int n)
7071f7df6f8SDan Williams {
7081f7df6f8SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
7091f7df6f8SDan Williams 	struct nd_mapping *nd_mapping;
7101f7df6f8SDan Williams 	struct nvdimm *nvdimm;
7111f7df6f8SDan Williams 
7121f7df6f8SDan Williams 	if (n >= nd_region->ndr_mappings)
7131f7df6f8SDan Williams 		return -ENXIO;
7141f7df6f8SDan Williams 	nd_mapping = &nd_region->mapping[n];
7151f7df6f8SDan Williams 	nvdimm = nd_mapping->nvdimm;
7161f7df6f8SDan Williams 
717401c0a19SDan Williams 	return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
718401c0a19SDan Williams 			nd_mapping->start, nd_mapping->size,
719401c0a19SDan Williams 			nd_mapping->position);
7201f7df6f8SDan Williams }
7211f7df6f8SDan Williams 
7221f7df6f8SDan Williams #define REGION_MAPPING(idx) \
7231f7df6f8SDan Williams static ssize_t mapping##idx##_show(struct device *dev,		\
7241f7df6f8SDan Williams 		struct device_attribute *attr, char *buf)	\
7251f7df6f8SDan Williams {								\
7261f7df6f8SDan Williams 	return mappingN(dev, buf, idx);				\
7271f7df6f8SDan Williams }								\
7281f7df6f8SDan Williams static DEVICE_ATTR_RO(mapping##idx)
7291f7df6f8SDan Williams 
7301f7df6f8SDan Williams /*
7311f7df6f8SDan Williams  * 32 should be enough for a while, even in the presence of socket
7321f7df6f8SDan Williams  * interleave a 32-way interleave set is a degenerate case.
7331f7df6f8SDan Williams  */
7341f7df6f8SDan Williams REGION_MAPPING(0);
7351f7df6f8SDan Williams REGION_MAPPING(1);
7361f7df6f8SDan Williams REGION_MAPPING(2);
7371f7df6f8SDan Williams REGION_MAPPING(3);
7381f7df6f8SDan Williams REGION_MAPPING(4);
7391f7df6f8SDan Williams REGION_MAPPING(5);
7401f7df6f8SDan Williams REGION_MAPPING(6);
7411f7df6f8SDan Williams REGION_MAPPING(7);
7421f7df6f8SDan Williams REGION_MAPPING(8);
7431f7df6f8SDan Williams REGION_MAPPING(9);
7441f7df6f8SDan Williams REGION_MAPPING(10);
7451f7df6f8SDan Williams REGION_MAPPING(11);
7461f7df6f8SDan Williams REGION_MAPPING(12);
7471f7df6f8SDan Williams REGION_MAPPING(13);
7481f7df6f8SDan Williams REGION_MAPPING(14);
7491f7df6f8SDan Williams REGION_MAPPING(15);
7501f7df6f8SDan Williams REGION_MAPPING(16);
7511f7df6f8SDan Williams REGION_MAPPING(17);
7521f7df6f8SDan Williams REGION_MAPPING(18);
7531f7df6f8SDan Williams REGION_MAPPING(19);
7541f7df6f8SDan Williams REGION_MAPPING(20);
7551f7df6f8SDan Williams REGION_MAPPING(21);
7561f7df6f8SDan Williams REGION_MAPPING(22);
7571f7df6f8SDan Williams REGION_MAPPING(23);
7581f7df6f8SDan Williams REGION_MAPPING(24);
7591f7df6f8SDan Williams REGION_MAPPING(25);
7601f7df6f8SDan Williams REGION_MAPPING(26);
7611f7df6f8SDan Williams REGION_MAPPING(27);
7621f7df6f8SDan Williams REGION_MAPPING(28);
7631f7df6f8SDan Williams REGION_MAPPING(29);
7641f7df6f8SDan Williams REGION_MAPPING(30);
7651f7df6f8SDan Williams REGION_MAPPING(31);
7661f7df6f8SDan Williams 
mapping_visible(struct kobject * kobj,struct attribute * a,int n)7671f7df6f8SDan Williams static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
7681f7df6f8SDan Williams {
7691f7df6f8SDan Williams 	struct device *dev = container_of(kobj, struct device, kobj);
7701f7df6f8SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
7711f7df6f8SDan Williams 
7721f7df6f8SDan Williams 	if (n < nd_region->ndr_mappings)
7731f7df6f8SDan Williams 		return a->mode;
7741f7df6f8SDan Williams 	return 0;
7751f7df6f8SDan Williams }
7761f7df6f8SDan Williams 
7771f7df6f8SDan Williams static struct attribute *mapping_attributes[] = {
7781f7df6f8SDan Williams 	&dev_attr_mapping0.attr,
7791f7df6f8SDan Williams 	&dev_attr_mapping1.attr,
7801f7df6f8SDan Williams 	&dev_attr_mapping2.attr,
7811f7df6f8SDan Williams 	&dev_attr_mapping3.attr,
7821f7df6f8SDan Williams 	&dev_attr_mapping4.attr,
7831f7df6f8SDan Williams 	&dev_attr_mapping5.attr,
7841f7df6f8SDan Williams 	&dev_attr_mapping6.attr,
7851f7df6f8SDan Williams 	&dev_attr_mapping7.attr,
7861f7df6f8SDan Williams 	&dev_attr_mapping8.attr,
7871f7df6f8SDan Williams 	&dev_attr_mapping9.attr,
7881f7df6f8SDan Williams 	&dev_attr_mapping10.attr,
7891f7df6f8SDan Williams 	&dev_attr_mapping11.attr,
7901f7df6f8SDan Williams 	&dev_attr_mapping12.attr,
7911f7df6f8SDan Williams 	&dev_attr_mapping13.attr,
7921f7df6f8SDan Williams 	&dev_attr_mapping14.attr,
7931f7df6f8SDan Williams 	&dev_attr_mapping15.attr,
7941f7df6f8SDan Williams 	&dev_attr_mapping16.attr,
7951f7df6f8SDan Williams 	&dev_attr_mapping17.attr,
7961f7df6f8SDan Williams 	&dev_attr_mapping18.attr,
7971f7df6f8SDan Williams 	&dev_attr_mapping19.attr,
7981f7df6f8SDan Williams 	&dev_attr_mapping20.attr,
7991f7df6f8SDan Williams 	&dev_attr_mapping21.attr,
8001f7df6f8SDan Williams 	&dev_attr_mapping22.attr,
8011f7df6f8SDan Williams 	&dev_attr_mapping23.attr,
8021f7df6f8SDan Williams 	&dev_attr_mapping24.attr,
8031f7df6f8SDan Williams 	&dev_attr_mapping25.attr,
8041f7df6f8SDan Williams 	&dev_attr_mapping26.attr,
8051f7df6f8SDan Williams 	&dev_attr_mapping27.attr,
8061f7df6f8SDan Williams 	&dev_attr_mapping28.attr,
8071f7df6f8SDan Williams 	&dev_attr_mapping29.attr,
8081f7df6f8SDan Williams 	&dev_attr_mapping30.attr,
8091f7df6f8SDan Williams 	&dev_attr_mapping31.attr,
8101f7df6f8SDan Williams 	NULL,
8111f7df6f8SDan Williams };
8121f7df6f8SDan Williams 
8134ce79fa9SDan Williams static const struct attribute_group nd_mapping_attribute_group = {
8141f7df6f8SDan Williams 	.is_visible = mapping_visible,
8151f7df6f8SDan Williams 	.attrs = mapping_attributes,
8161f7df6f8SDan Williams };
8171f7df6f8SDan Williams 
8187c4fc8cdSDan Williams static const struct attribute_group nd_region_attribute_group = {
819cb719d5fSDan Williams 	.attrs = nd_region_attributes,
820cb719d5fSDan Williams 	.is_visible = region_visible,
821cb719d5fSDan Williams };
822cb719d5fSDan Williams 
823adbb6829SDan Williams static const struct attribute_group *nd_region_attribute_groups[] = {
824adbb6829SDan Williams 	&nd_device_attribute_group,
8257c4fc8cdSDan Williams 	&nd_region_attribute_group,
826e2f6a0e3SDan Williams 	&nd_numa_attribute_group,
8274ce79fa9SDan Williams 	&nd_mapping_attribute_group,
828adbb6829SDan Williams 	NULL,
829adbb6829SDan Williams };
830adbb6829SDan Williams 
831adbb6829SDan Williams static const struct device_type nd_pmem_device_type = {
832cb719d5fSDan Williams 	.name = "nd_pmem",
833cb719d5fSDan Williams 	.release = nd_region_release,
834adbb6829SDan Williams 	.groups = nd_region_attribute_groups,
835cb719d5fSDan Williams };
836cb719d5fSDan Williams 
837adbb6829SDan Williams static const struct device_type nd_volatile_device_type = {
838cb719d5fSDan Williams 	.name = "nd_volatile",
839cb719d5fSDan Williams 	.release = nd_region_release,
840adbb6829SDan Williams 	.groups = nd_region_attribute_groups,
841cb719d5fSDan Williams };
842cb719d5fSDan Williams 
is_nd_pmem(const struct device * dev)8432a81ada3SGreg Kroah-Hartman bool is_nd_pmem(const struct device *dev)
844cb719d5fSDan Williams {
845cb719d5fSDan Williams 	return dev ? dev->type == &nd_pmem_device_type : false;
846cb719d5fSDan Williams }
847cb719d5fSDan Williams 
is_nd_volatile(const struct device * dev)8482a81ada3SGreg Kroah-Hartman bool is_nd_volatile(const struct device *dev)
849cb719d5fSDan Williams {
850cb719d5fSDan Williams 	return dev ? dev->type == &nd_volatile_device_type : false;
851cb719d5fSDan Williams }
852cb719d5fSDan Williams 
nd_region_interleave_set_cookie(struct nd_region * nd_region,struct nd_namespace_index * nsindex)853cb719d5fSDan Williams u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
854cb719d5fSDan Williams 		struct nd_namespace_index *nsindex)
855cb719d5fSDan Williams {
856cb719d5fSDan Williams 	struct nd_interleave_set *nd_set = nd_region->nd_set;
857cb719d5fSDan Williams 
858cb719d5fSDan Williams 	if (!nd_set)
859cb719d5fSDan Williams 		return 0;
860cb719d5fSDan Williams 
861cb719d5fSDan Williams 	if (nsindex && __le16_to_cpu(nsindex->major) == 1
862cb719d5fSDan Williams 			&& __le16_to_cpu(nsindex->minor) == 1)
863cb719d5fSDan Williams 		return nd_set->cookie1;
864cb719d5fSDan Williams 	return nd_set->cookie2;
865cb719d5fSDan Williams }
866cb719d5fSDan Williams 
nd_region_interleave_set_altcookie(struct nd_region * nd_region)867cb719d5fSDan Williams u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
868cb719d5fSDan Williams {
869cb719d5fSDan Williams 	struct nd_interleave_set *nd_set = nd_region->nd_set;
870cb719d5fSDan Williams 
871cb719d5fSDan Williams 	if (nd_set)
872cb719d5fSDan Williams 		return nd_set->altcookie;
873cb719d5fSDan Williams 	return 0;
874cb719d5fSDan Williams }
875cb719d5fSDan Williams 
nd_mapping_free_labels(struct nd_mapping * nd_mapping)876cb719d5fSDan Williams void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
877cb719d5fSDan Williams {
878cb719d5fSDan Williams 	struct nd_label_ent *label_ent, *e;
879cb719d5fSDan Williams 
880cb719d5fSDan Williams 	lockdep_assert_held(&nd_mapping->lock);
881cb719d5fSDan Williams 	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
882cb719d5fSDan Williams 		list_del(&label_ent->list);
883cb719d5fSDan Williams 		kfree(label_ent);
884cb719d5fSDan Williams 	}
885cb719d5fSDan Williams }
886cb719d5fSDan Williams 
887cb719d5fSDan Williams /*
888cb719d5fSDan Williams  * When a namespace is activated create new seeds for the next
889cb719d5fSDan Williams  * namespace, or namespace-personality to be configured.
890cb719d5fSDan Williams  */
nd_region_advance_seeds(struct nd_region * nd_region,struct device * dev)891cb719d5fSDan Williams void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
892cb719d5fSDan Williams {
893cb719d5fSDan Williams 	nvdimm_bus_lock(dev);
894cb719d5fSDan Williams 	if (nd_region->ns_seed == dev) {
895cb719d5fSDan Williams 		nd_region_create_ns_seed(nd_region);
896cb719d5fSDan Williams 	} else if (is_nd_btt(dev)) {
897cb719d5fSDan Williams 		struct nd_btt *nd_btt = to_nd_btt(dev);
898cb719d5fSDan Williams 
899cb719d5fSDan Williams 		if (nd_region->btt_seed == dev)
900cb719d5fSDan Williams 			nd_region_create_btt_seed(nd_region);
901cb719d5fSDan Williams 		if (nd_region->ns_seed == &nd_btt->ndns->dev)
902cb719d5fSDan Williams 			nd_region_create_ns_seed(nd_region);
903cb719d5fSDan Williams 	} else if (is_nd_pfn(dev)) {
904cb719d5fSDan Williams 		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
905cb719d5fSDan Williams 
906cb719d5fSDan Williams 		if (nd_region->pfn_seed == dev)
907cb719d5fSDan Williams 			nd_region_create_pfn_seed(nd_region);
908cb719d5fSDan Williams 		if (nd_region->ns_seed == &nd_pfn->ndns->dev)
909cb719d5fSDan Williams 			nd_region_create_ns_seed(nd_region);
910cb719d5fSDan Williams 	} else if (is_nd_dax(dev)) {
911cb719d5fSDan Williams 		struct nd_dax *nd_dax = to_nd_dax(dev);
912cb719d5fSDan Williams 
913cb719d5fSDan Williams 		if (nd_region->dax_seed == dev)
914cb719d5fSDan Williams 			nd_region_create_dax_seed(nd_region);
915cb719d5fSDan Williams 		if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
916cb719d5fSDan Williams 			nd_region_create_ns_seed(nd_region);
917cb719d5fSDan Williams 	}
918cb719d5fSDan Williams 	nvdimm_bus_unlock(dev);
919cb719d5fSDan Williams }
9201f7df6f8SDan Williams 
9215212e11fSVishal Verma /**
9225212e11fSVishal Verma  * nd_region_acquire_lane - allocate and lock a lane
9235212e11fSVishal Verma  * @nd_region: region id and number of lanes possible
9245212e11fSVishal Verma  *
9255212e11fSVishal Verma  * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
9265212e11fSVishal Verma  * We optimize for the common case where there are 256 lanes, one
9275212e11fSVishal Verma  * per-cpu.  For larger systems we need to lock to share lanes.  For now
9285212e11fSVishal Verma  * this implementation assumes the cost of maintaining an allocator for
9295212e11fSVishal Verma  * free lanes is on the order of the lock hold time, so it implements a
9305212e11fSVishal Verma  * static lane = cpu % num_lanes mapping.
9315212e11fSVishal Verma  *
9325212e11fSVishal Verma  * In the case of a BTT instance on top of a BLK namespace a lane may be
9335212e11fSVishal Verma  * acquired recursively.  We lock on the first instance.
9345212e11fSVishal Verma  *
9355212e11fSVishal Verma  * In the case of a BTT instance on top of PMEM, we only acquire a lane
9365212e11fSVishal Verma  * for the BTT metadata updates.
9375212e11fSVishal Verma  */
nd_region_acquire_lane(struct nd_region * nd_region)9385212e11fSVishal Verma unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
9395212e11fSVishal Verma {
9405212e11fSVishal Verma 	unsigned int cpu, lane;
9415212e11fSVishal Verma 
942*6f50b414STomas Glozar 	migrate_disable();
943*6f50b414STomas Glozar 	cpu = smp_processor_id();
9445212e11fSVishal Verma 	if (nd_region->num_lanes < nr_cpu_ids) {
9455212e11fSVishal Verma 		struct nd_percpu_lane *ndl_lock, *ndl_count;
9465212e11fSVishal Verma 
9475212e11fSVishal Verma 		lane = cpu % nd_region->num_lanes;
9485212e11fSVishal Verma 		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
9495212e11fSVishal Verma 		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
9505212e11fSVishal Verma 		if (ndl_count->count++ == 0)
9515212e11fSVishal Verma 			spin_lock(&ndl_lock->lock);
9525212e11fSVishal Verma 	} else
9535212e11fSVishal Verma 		lane = cpu;
9545212e11fSVishal Verma 
9555212e11fSVishal Verma 	return lane;
9565212e11fSVishal Verma }
9575212e11fSVishal Verma EXPORT_SYMBOL(nd_region_acquire_lane);
9585212e11fSVishal Verma 
nd_region_release_lane(struct nd_region * nd_region,unsigned int lane)9595212e11fSVishal Verma void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
9605212e11fSVishal Verma {
9615212e11fSVishal Verma 	if (nd_region->num_lanes < nr_cpu_ids) {
962*6f50b414STomas Glozar 		unsigned int cpu = smp_processor_id();
9635212e11fSVishal Verma 		struct nd_percpu_lane *ndl_lock, *ndl_count;
9645212e11fSVishal Verma 
9655212e11fSVishal Verma 		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
9665212e11fSVishal Verma 		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
9675212e11fSVishal Verma 		if (--ndl_count->count == 0)
9685212e11fSVishal Verma 			spin_unlock(&ndl_lock->lock);
9695212e11fSVishal Verma 	}
970*6f50b414STomas Glozar 	migrate_enable();
9715212e11fSVishal Verma }
9725212e11fSVishal Verma EXPORT_SYMBOL(nd_region_release_lane);
9735212e11fSVishal Verma 
9742522afb8SDan Williams /*
9752522afb8SDan Williams  * PowerPC requires this alignment for memremap_pages(). All other archs
9762522afb8SDan Williams  * should be ok with SUBSECTION_SIZE (see memremap_compat_align()).
9772522afb8SDan Williams  */
9782522afb8SDan Williams #define MEMREMAP_COMPAT_ALIGN_MAX SZ_16M
9792522afb8SDan Williams 
default_align(struct nd_region * nd_region)9802522afb8SDan Williams static unsigned long default_align(struct nd_region *nd_region)
9812522afb8SDan Williams {
98204ff4863SYueHaibing 	unsigned long align;
9832522afb8SDan Williams 	u32 remainder;
9843b6c6c03SDan Williams 	int mappings;
9852522afb8SDan Williams 
9862522afb8SDan Williams 	align = MEMREMAP_COMPAT_ALIGN_MAX;
987d9d290d7SDan Williams 	if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX)
988d9d290d7SDan Williams 		align = PAGE_SIZE;
989d9d290d7SDan Williams 
9902522afb8SDan Williams 	mappings = max_t(u16, 1, nd_region->ndr_mappings);
99104ff4863SYueHaibing 	div_u64_rem(align, mappings, &remainder);
9922522afb8SDan Williams 	if (remainder)
9932522afb8SDan Williams 		align *= mappings;
9942522afb8SDan Williams 
9952522afb8SDan Williams 	return align;
9962522afb8SDan Williams }
9972522afb8SDan Williams 
9984a0079bcSDan Williams static struct lock_class_key nvdimm_region_key;
9994a0079bcSDan Williams 
nd_region_create(struct nvdimm_bus * nvdimm_bus,struct nd_region_desc * ndr_desc,const struct device_type * dev_type,const char * caller)10001f7df6f8SDan Williams static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
1001adbb6829SDan Williams 		struct nd_region_desc *ndr_desc,
1002adbb6829SDan Williams 		const struct device_type *dev_type, const char *caller)
10031f7df6f8SDan Williams {
10041f7df6f8SDan Williams 	struct nd_region *nd_region;
10051f7df6f8SDan Williams 	struct device *dev;
10065212e11fSVishal Verma 	unsigned int i;
100758138820SDan Williams 	int ro = 0;
10081f7df6f8SDan Williams 
10091f7df6f8SDan Williams 	for (i = 0; i < ndr_desc->num_mappings; i++) {
101044c462ebSDan Williams 		struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
101144c462ebSDan Williams 		struct nvdimm *nvdimm = mapping->nvdimm;
10121f7df6f8SDan Williams 
10135b26db95SAneesh Kumar K.V 		if ((mapping->start | mapping->size) % PAGE_SIZE) {
10145b26db95SAneesh Kumar K.V 			dev_err(&nvdimm_bus->dev,
10155b26db95SAneesh Kumar K.V 				"%s: %s mapping%d is not %ld aligned\n",
10165b26db95SAneesh Kumar K.V 				caller, dev_name(&nvdimm->dev), i, PAGE_SIZE);
10171f7df6f8SDan Williams 			return NULL;
10181f7df6f8SDan Williams 		}
101958138820SDan Williams 
10208f078b38SDan Williams 		if (test_bit(NDD_UNARMED, &nvdimm->flags))
102158138820SDan Williams 			ro = 1;
1022d5d30d5aSDan Williams 
10231f7df6f8SDan Williams 	}
10241f7df6f8SDan Williams 
10253b6c6c03SDan Williams 	nd_region =
10263b6c6c03SDan Williams 		kzalloc(struct_size(nd_region, mapping, ndr_desc->num_mappings),
10271f7df6f8SDan Williams 			GFP_KERNEL);
1028047fc8a1SRoss Zwisler 
10293b6c6c03SDan Williams 	if (!nd_region)
10301f7df6f8SDan Williams 		return NULL;
103104ad63f0SDan Williams 	/* CXL pre-assigns memregion ids before creating nvdimm regions */
103204ad63f0SDan Williams 	if (test_bit(ND_REGION_CXL, &ndr_desc->flags)) {
103304ad63f0SDan Williams 		nd_region->id = ndr_desc->memregion;
103404ad63f0SDan Williams 	} else {
103533dd7075SDan Williams 		nd_region->id = memregion_alloc(GFP_KERNEL);
10365212e11fSVishal Verma 		if (nd_region->id < 0)
10375212e11fSVishal Verma 			goto err_id;
103804ad63f0SDan Williams 	}
10395212e11fSVishal Verma 
10405212e11fSVishal Verma 	nd_region->lane = alloc_percpu(struct nd_percpu_lane);
10415212e11fSVishal Verma 	if (!nd_region->lane)
10425212e11fSVishal Verma 		goto err_percpu;
10435212e11fSVishal Verma 
10445212e11fSVishal Verma         for (i = 0; i < nr_cpu_ids; i++) {
10455212e11fSVishal Verma 		struct nd_percpu_lane *ndl;
10465212e11fSVishal Verma 
10475212e11fSVishal Verma 		ndl = per_cpu_ptr(nd_region->lane, i);
10485212e11fSVishal Verma 		spin_lock_init(&ndl->lock);
10495212e11fSVishal Verma 		ndl->count = 0;
10501f7df6f8SDan Williams 	}
10511f7df6f8SDan Williams 
10521f7df6f8SDan Williams 	for (i = 0; i < ndr_desc->num_mappings; i++) {
105344c462ebSDan Williams 		struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
105444c462ebSDan Williams 		struct nvdimm *nvdimm = mapping->nvdimm;
105544c462ebSDan Williams 
105644c462ebSDan Williams 		nd_region->mapping[i].nvdimm = nvdimm;
105744c462ebSDan Williams 		nd_region->mapping[i].start = mapping->start;
105844c462ebSDan Williams 		nd_region->mapping[i].size = mapping->size;
1059401c0a19SDan Williams 		nd_region->mapping[i].position = mapping->position;
1060ae8219f1SDan Williams 		INIT_LIST_HEAD(&nd_region->mapping[i].labels);
1061ae8219f1SDan Williams 		mutex_init(&nd_region->mapping[i].lock);
10621f7df6f8SDan Williams 
10631f7df6f8SDan Williams 		get_device(&nvdimm->dev);
10641f7df6f8SDan Williams 	}
10651f7df6f8SDan Williams 	nd_region->ndr_mappings = ndr_desc->num_mappings;
10661f7df6f8SDan Williams 	nd_region->provider_data = ndr_desc->provider_data;
1067eaf96153SDan Williams 	nd_region->nd_set = ndr_desc->nd_set;
10685212e11fSVishal Verma 	nd_region->num_lanes = ndr_desc->num_lanes;
1069004f1afbSDan Williams 	nd_region->flags = ndr_desc->flags;
107058138820SDan Williams 	nd_region->ro = ro;
107141d7a6d6SToshi Kani 	nd_region->numa_node = ndr_desc->numa_node;
10728fc5c735SDan Williams 	nd_region->target_node = ndr_desc->target_node;
10731b40e09aSDan Williams 	ida_init(&nd_region->ns_ida);
10748c2f7e86SDan Williams 	ida_init(&nd_region->btt_ida);
1075e1455744SDan Williams 	ida_init(&nd_region->pfn_ida);
1076cd03412aSDan Williams 	ida_init(&nd_region->dax_ida);
10771f7df6f8SDan Williams 	dev = &nd_region->dev;
10781f7df6f8SDan Williams 	dev_set_name(dev, "region%d", nd_region->id);
10791f7df6f8SDan Williams 	dev->parent = &nvdimm_bus->dev;
10801f7df6f8SDan Williams 	dev->type = dev_type;
10811f7df6f8SDan Williams 	dev->groups = ndr_desc->attr_groups;
10821ff19f48SOliver O'Halloran 	dev->of_node = ndr_desc->of_node;
10831f7df6f8SDan Williams 	nd_region->ndr_size = resource_size(ndr_desc->res);
10841f7df6f8SDan Williams 	nd_region->ndr_start = ndr_desc->res->start;
10852522afb8SDan Williams 	nd_region->align = default_align(nd_region);
1086c5d4355dSPankaj Gupta 	if (ndr_desc->flush)
1087c5d4355dSPankaj Gupta 		nd_region->flush = ndr_desc->flush;
1088c5d4355dSPankaj Gupta 	else
1089c5d4355dSPankaj Gupta 		nd_region->flush = NULL;
1090c5d4355dSPankaj Gupta 
10914a0079bcSDan Williams 	device_initialize(dev);
10924a0079bcSDan Williams 	lockdep_set_class(&dev->mutex, &nvdimm_region_key);
10931f7df6f8SDan Williams 	nd_device_register(dev);
10941f7df6f8SDan Williams 
10951f7df6f8SDan Williams 	return nd_region;
10965212e11fSVishal Verma 
10975212e11fSVishal Verma err_percpu:
109804ad63f0SDan Williams 	if (!test_bit(ND_REGION_CXL, &ndr_desc->flags))
109933dd7075SDan Williams 		memregion_free(nd_region->id);
11005212e11fSVishal Verma err_id:
11013b6c6c03SDan Williams 	kfree(nd_region);
11025212e11fSVishal Verma 	return NULL;
11031f7df6f8SDan Williams }
11041f7df6f8SDan Williams 
nvdimm_pmem_region_create(struct nvdimm_bus * nvdimm_bus,struct nd_region_desc * ndr_desc)11051f7df6f8SDan Williams struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
11061f7df6f8SDan Williams 		struct nd_region_desc *ndr_desc)
11071f7df6f8SDan Williams {
11085212e11fSVishal Verma 	ndr_desc->num_lanes = ND_MAX_LANES;
11091f7df6f8SDan Williams 	return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
11101f7df6f8SDan Williams 			__func__);
11111f7df6f8SDan Williams }
11121f7df6f8SDan Williams EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
11131f7df6f8SDan Williams 
nvdimm_volatile_region_create(struct nvdimm_bus * nvdimm_bus,struct nd_region_desc * ndr_desc)11141f7df6f8SDan Williams struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
11151f7df6f8SDan Williams 		struct nd_region_desc *ndr_desc)
11161f7df6f8SDan Williams {
11175212e11fSVishal Verma 	ndr_desc->num_lanes = ND_MAX_LANES;
11181f7df6f8SDan Williams 	return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
11191f7df6f8SDan Williams 			__func__);
11201f7df6f8SDan Williams }
11211f7df6f8SDan Williams EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
1122b354aba0SDan Williams 
nvdimm_region_delete(struct nd_region * nd_region)112304ad63f0SDan Williams void nvdimm_region_delete(struct nd_region *nd_region)
112404ad63f0SDan Williams {
112504ad63f0SDan Williams 	if (nd_region)
112604ad63f0SDan Williams 		nd_device_unregister(&nd_region->dev, ND_SYNC);
112704ad63f0SDan Williams }
112804ad63f0SDan Williams EXPORT_SYMBOL_GPL(nvdimm_region_delete);
112904ad63f0SDan Williams 
nvdimm_flush(struct nd_region * nd_region,struct bio * bio)1130c5d4355dSPankaj Gupta int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
1131c5d4355dSPankaj Gupta {
1132c5d4355dSPankaj Gupta 	int rc = 0;
1133c5d4355dSPankaj Gupta 
1134c5d4355dSPankaj Gupta 	if (!nd_region->flush)
1135c5d4355dSPankaj Gupta 		rc = generic_nvdimm_flush(nd_region);
1136c5d4355dSPankaj Gupta 	else {
1137c5d4355dSPankaj Gupta 		if (nd_region->flush(nd_region, bio))
1138c5d4355dSPankaj Gupta 			rc = -EIO;
1139c5d4355dSPankaj Gupta 	}
1140c5d4355dSPankaj Gupta 
1141c5d4355dSPankaj Gupta 	return rc;
1142c5d4355dSPankaj Gupta }
1143f284a4f2SDan Williams /**
1144a84b280fSJiapeng Chong  * generic_nvdimm_flush() - flush any posted write queues between the cpu and pmem media
11453b6c6c03SDan Williams  * @nd_region: interleaved pmem region
1146f284a4f2SDan Williams  */
generic_nvdimm_flush(struct nd_region * nd_region)1147c5d4355dSPankaj Gupta int generic_nvdimm_flush(struct nd_region *nd_region)
1148f284a4f2SDan Williams {
1149f284a4f2SDan Williams 	struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
11500c27af60SDan Williams 	int i, idx;
11510c27af60SDan Williams 
11520c27af60SDan Williams 	/*
11530c27af60SDan Williams 	 * Try to encourage some diversity in flush hint addresses
11540c27af60SDan Williams 	 * across cpus assuming a limited number of flush hints.
11550c27af60SDan Williams 	 */
11560c27af60SDan Williams 	idx = this_cpu_read(flush_idx);
11570c27af60SDan Williams 	idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
1158f284a4f2SDan Williams 
1159f284a4f2SDan Williams 	/*
11603e79f082SAneesh Kumar K.V 	 * The pmem_wmb() is needed to 'sfence' all
11613e79f082SAneesh Kumar K.V 	 * previous writes such that they are architecturally visible for
11623e79f082SAneesh Kumar K.V 	 * the platform buffer flush. Note that we've already arranged for pmem
11630aed55afSDan Williams 	 * writes to avoid the cache via memcpy_flushcache().  The final
11640aed55afSDan Williams 	 * wmb() ensures ordering for the NVDIMM flush write.
1165f284a4f2SDan Williams 	 */
11663e79f082SAneesh Kumar K.V 	pmem_wmb();
1167f284a4f2SDan Williams 	for (i = 0; i < nd_region->ndr_mappings; i++)
1168595c7307SDan Williams 		if (ndrd_get_flush_wpq(ndrd, i, 0))
1169595c7307SDan Williams 			writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
1170f284a4f2SDan Williams 	wmb();
1171c5d4355dSPankaj Gupta 
1172c5d4355dSPankaj Gupta 	return 0;
1173f284a4f2SDan Williams }
1174f284a4f2SDan Williams EXPORT_SYMBOL_GPL(nvdimm_flush);
1175f284a4f2SDan Williams 
1176f284a4f2SDan Williams /**
1177f284a4f2SDan Williams  * nvdimm_has_flush - determine write flushing requirements
11783b6c6c03SDan Williams  * @nd_region: interleaved pmem region
1179f284a4f2SDan Williams  *
1180f284a4f2SDan Williams  * Returns 1 if writes require flushing
1181f284a4f2SDan Williams  * Returns 0 if writes do not require flushing
1182f284a4f2SDan Williams  * Returns -ENXIO if flushing capability can not be determined
1183f284a4f2SDan Williams  */
nvdimm_has_flush(struct nd_region * nd_region)1184f284a4f2SDan Williams int nvdimm_has_flush(struct nd_region *nd_region)
1185f284a4f2SDan Williams {
1186f284a4f2SDan Williams 	int i;
1187f284a4f2SDan Williams 
1188c00b396eSDan Williams 	/* no nvdimm or pmem api == flushing capability unknown */
1189c00b396eSDan Williams 	if (nd_region->ndr_mappings == 0
1190c00b396eSDan Williams 			|| !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
1191f284a4f2SDan Williams 		return -ENXIO;
1192f284a4f2SDan Williams 
1193a2948b17SVaibhav Jain 	/* Test if an explicit flush function is defined */
1194a2948b17SVaibhav Jain 	if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
1195a2948b17SVaibhav Jain 		return 1;
1196a2948b17SVaibhav Jain 
1197a2948b17SVaibhav Jain 	/* Test if any flush hints for the region are available */
1198bc042fdfSDan Williams 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1199bc042fdfSDan Williams 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1200bc042fdfSDan Williams 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
1201bc042fdfSDan Williams 
1202bc042fdfSDan Williams 		/* flush hints present / available */
1203bc042fdfSDan Williams 		if (nvdimm->num_flush)
1204f284a4f2SDan Williams 			return 1;
1205bc042fdfSDan Williams 	}
1206f284a4f2SDan Williams 
1207f284a4f2SDan Williams 	/*
1208a2948b17SVaibhav Jain 	 * The platform defines dimm devices without hints nor explicit flush,
1209a2948b17SVaibhav Jain 	 * assume platform persistence mechanism like ADR
1210f284a4f2SDan Williams 	 */
1211f284a4f2SDan Williams 	return 0;
1212f284a4f2SDan Williams }
1213f284a4f2SDan Williams EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1214f284a4f2SDan Williams 
nvdimm_has_cache(struct nd_region * nd_region)12150b277961SDan Williams int nvdimm_has_cache(struct nd_region *nd_region)
12160b277961SDan Williams {
1217546eb031SRoss Zwisler 	return is_nd_pmem(&nd_region->dev) &&
1218546eb031SRoss Zwisler 		!test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
12190b277961SDan Williams }
12200b277961SDan Williams EXPORT_SYMBOL_GPL(nvdimm_has_cache);
12210b277961SDan Williams 
is_nvdimm_sync(struct nd_region * nd_region)1222fefc1d97SPankaj Gupta bool is_nvdimm_sync(struct nd_region *nd_region)
1223fefc1d97SPankaj Gupta {
12244c806b89SAneesh Kumar K.V 	if (is_nd_volatile(&nd_region->dev))
12254c806b89SAneesh Kumar K.V 		return true;
12264c806b89SAneesh Kumar K.V 
1227fefc1d97SPankaj Gupta 	return is_nd_pmem(&nd_region->dev) &&
1228fefc1d97SPankaj Gupta 		!test_bit(ND_REGION_ASYNC, &nd_region->flags);
1229fefc1d97SPankaj Gupta }
1230fefc1d97SPankaj Gupta EXPORT_SYMBOL_GPL(is_nvdimm_sync);
1231fefc1d97SPankaj Gupta 
1232ae86cbfeSDan Williams struct conflict_context {
1233ae86cbfeSDan Williams 	struct nd_region *nd_region;
1234ae86cbfeSDan Williams 	resource_size_t start, size;
1235ae86cbfeSDan Williams };
1236ae86cbfeSDan Williams 
region_conflict(struct device * dev,void * data)1237ae86cbfeSDan Williams static int region_conflict(struct device *dev, void *data)
1238ae86cbfeSDan Williams {
1239ae86cbfeSDan Williams 	struct nd_region *nd_region;
1240ae86cbfeSDan Williams 	struct conflict_context *ctx = data;
1241ae86cbfeSDan Williams 	resource_size_t res_end, region_end, region_start;
1242ae86cbfeSDan Williams 
1243ae86cbfeSDan Williams 	if (!is_memory(dev))
1244ae86cbfeSDan Williams 		return 0;
1245ae86cbfeSDan Williams 
1246ae86cbfeSDan Williams 	nd_region = to_nd_region(dev);
1247ae86cbfeSDan Williams 	if (nd_region == ctx->nd_region)
1248ae86cbfeSDan Williams 		return 0;
1249ae86cbfeSDan Williams 
1250ae86cbfeSDan Williams 	res_end = ctx->start + ctx->size;
1251ae86cbfeSDan Williams 	region_start = nd_region->ndr_start;
1252ae86cbfeSDan Williams 	region_end = region_start + nd_region->ndr_size;
1253ae86cbfeSDan Williams 	if (ctx->start >= region_start && ctx->start < region_end)
1254ae86cbfeSDan Williams 		return -EBUSY;
1255ae86cbfeSDan Williams 	if (res_end > region_start && res_end <= region_end)
1256ae86cbfeSDan Williams 		return -EBUSY;
1257ae86cbfeSDan Williams 	return 0;
1258ae86cbfeSDan Williams }
1259ae86cbfeSDan Williams 
nd_region_conflict(struct nd_region * nd_region,resource_size_t start,resource_size_t size)1260ae86cbfeSDan Williams int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
1261ae86cbfeSDan Williams 		resource_size_t size)
1262ae86cbfeSDan Williams {
1263ae86cbfeSDan Williams 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
1264ae86cbfeSDan Williams 	struct conflict_context ctx = {
1265ae86cbfeSDan Williams 		.nd_region = nd_region,
1266ae86cbfeSDan Williams 		.start = start,
1267ae86cbfeSDan Williams 		.size = size,
1268ae86cbfeSDan Williams 	};
1269ae86cbfeSDan Williams 
1270ae86cbfeSDan Williams 	return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
1271ae86cbfeSDan Williams }
1272dc370b28SDan Williams 
1273dc370b28SDan Williams MODULE_IMPORT_NS(DEVMEM);
1274