xref: /openbmc/linux/drivers/nvdimm/region_devs.c (revision cd03412a)
11f7df6f8SDan Williams /*
21f7df6f8SDan Williams  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
31f7df6f8SDan Williams  *
41f7df6f8SDan Williams  * This program is free software; you can redistribute it and/or modify
51f7df6f8SDan Williams  * it under the terms of version 2 of the GNU General Public License as
61f7df6f8SDan Williams  * published by the Free Software Foundation.
71f7df6f8SDan Williams  *
81f7df6f8SDan Williams  * This program is distributed in the hope that it will be useful, but
91f7df6f8SDan Williams  * WITHOUT ANY WARRANTY; without even the implied warranty of
101f7df6f8SDan Williams  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
111f7df6f8SDan Williams  * General Public License for more details.
121f7df6f8SDan Williams  */
13eaf96153SDan Williams #include <linux/scatterlist.h>
14047fc8a1SRoss Zwisler #include <linux/highmem.h>
15eaf96153SDan Williams #include <linux/sched.h>
161f7df6f8SDan Williams #include <linux/slab.h>
17eaf96153SDan Williams #include <linux/sort.h>
181f7df6f8SDan Williams #include <linux/io.h>
19bf9bccc1SDan Williams #include <linux/nd.h>
201f7df6f8SDan Williams #include "nd-core.h"
211f7df6f8SDan Williams #include "nd.h"
221f7df6f8SDan Williams 
231f7df6f8SDan Williams static DEFINE_IDA(region_ida);
241f7df6f8SDan Williams 
251f7df6f8SDan Williams static void nd_region_release(struct device *dev)
261f7df6f8SDan Williams {
271f7df6f8SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
281f7df6f8SDan Williams 	u16 i;
291f7df6f8SDan Williams 
301f7df6f8SDan Williams 	for (i = 0; i < nd_region->ndr_mappings; i++) {
311f7df6f8SDan Williams 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
321f7df6f8SDan Williams 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
331f7df6f8SDan Williams 
341f7df6f8SDan Williams 		put_device(&nvdimm->dev);
351f7df6f8SDan Williams 	}
365212e11fSVishal Verma 	free_percpu(nd_region->lane);
371f7df6f8SDan Williams 	ida_simple_remove(&region_ida, nd_region->id);
38047fc8a1SRoss Zwisler 	if (is_nd_blk(dev))
39047fc8a1SRoss Zwisler 		kfree(to_nd_blk_region(dev));
40047fc8a1SRoss Zwisler 	else
411f7df6f8SDan Williams 		kfree(nd_region);
421f7df6f8SDan Williams }
431f7df6f8SDan Williams 
441f7df6f8SDan Williams static struct device_type nd_blk_device_type = {
451f7df6f8SDan Williams 	.name = "nd_blk",
461f7df6f8SDan Williams 	.release = nd_region_release,
471f7df6f8SDan Williams };
481f7df6f8SDan Williams 
491f7df6f8SDan Williams static struct device_type nd_pmem_device_type = {
501f7df6f8SDan Williams 	.name = "nd_pmem",
511f7df6f8SDan Williams 	.release = nd_region_release,
521f7df6f8SDan Williams };
531f7df6f8SDan Williams 
541f7df6f8SDan Williams static struct device_type nd_volatile_device_type = {
551f7df6f8SDan Williams 	.name = "nd_volatile",
561f7df6f8SDan Williams 	.release = nd_region_release,
571f7df6f8SDan Williams };
581f7df6f8SDan Williams 
593d88002eSDan Williams bool is_nd_pmem(struct device *dev)
601f7df6f8SDan Williams {
611f7df6f8SDan Williams 	return dev ? dev->type == &nd_pmem_device_type : false;
621f7df6f8SDan Williams }
631f7df6f8SDan Williams 
643d88002eSDan Williams bool is_nd_blk(struct device *dev)
653d88002eSDan Williams {
663d88002eSDan Williams 	return dev ? dev->type == &nd_blk_device_type : false;
673d88002eSDan Williams }
683d88002eSDan Williams 
691f7df6f8SDan Williams struct nd_region *to_nd_region(struct device *dev)
701f7df6f8SDan Williams {
711f7df6f8SDan Williams 	struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
721f7df6f8SDan Williams 
731f7df6f8SDan Williams 	WARN_ON(dev->type->release != nd_region_release);
741f7df6f8SDan Williams 	return nd_region;
751f7df6f8SDan Williams }
761f7df6f8SDan Williams EXPORT_SYMBOL_GPL(to_nd_region);
771f7df6f8SDan Williams 
78047fc8a1SRoss Zwisler struct nd_blk_region *to_nd_blk_region(struct device *dev)
79047fc8a1SRoss Zwisler {
80047fc8a1SRoss Zwisler 	struct nd_region *nd_region = to_nd_region(dev);
81047fc8a1SRoss Zwisler 
82047fc8a1SRoss Zwisler 	WARN_ON(!is_nd_blk(dev));
83047fc8a1SRoss Zwisler 	return container_of(nd_region, struct nd_blk_region, nd_region);
84047fc8a1SRoss Zwisler }
85047fc8a1SRoss Zwisler EXPORT_SYMBOL_GPL(to_nd_blk_region);
86047fc8a1SRoss Zwisler 
87047fc8a1SRoss Zwisler void *nd_region_provider_data(struct nd_region *nd_region)
88047fc8a1SRoss Zwisler {
89047fc8a1SRoss Zwisler 	return nd_region->provider_data;
90047fc8a1SRoss Zwisler }
91047fc8a1SRoss Zwisler EXPORT_SYMBOL_GPL(nd_region_provider_data);
92047fc8a1SRoss Zwisler 
93047fc8a1SRoss Zwisler void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
94047fc8a1SRoss Zwisler {
95047fc8a1SRoss Zwisler 	return ndbr->blk_provider_data;
96047fc8a1SRoss Zwisler }
97047fc8a1SRoss Zwisler EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
98047fc8a1SRoss Zwisler 
99047fc8a1SRoss Zwisler void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
100047fc8a1SRoss Zwisler {
101047fc8a1SRoss Zwisler 	ndbr->blk_provider_data = data;
102047fc8a1SRoss Zwisler }
103047fc8a1SRoss Zwisler EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
104047fc8a1SRoss Zwisler 
1053d88002eSDan Williams /**
1063d88002eSDan Williams  * nd_region_to_nstype() - region to an integer namespace type
1073d88002eSDan Williams  * @nd_region: region-device to interrogate
1083d88002eSDan Williams  *
1093d88002eSDan Williams  * This is the 'nstype' attribute of a region as well, an input to the
1103d88002eSDan Williams  * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
1113d88002eSDan Williams  * namespace devices with namespace drivers.
1123d88002eSDan Williams  */
1133d88002eSDan Williams int nd_region_to_nstype(struct nd_region *nd_region)
1143d88002eSDan Williams {
1153d88002eSDan Williams 	if (is_nd_pmem(&nd_region->dev)) {
1163d88002eSDan Williams 		u16 i, alias;
1173d88002eSDan Williams 
1183d88002eSDan Williams 		for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
1193d88002eSDan Williams 			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1203d88002eSDan Williams 			struct nvdimm *nvdimm = nd_mapping->nvdimm;
1213d88002eSDan Williams 
1223d88002eSDan Williams 			if (nvdimm->flags & NDD_ALIASING)
1233d88002eSDan Williams 				alias++;
1243d88002eSDan Williams 		}
1253d88002eSDan Williams 		if (alias)
1263d88002eSDan Williams 			return ND_DEVICE_NAMESPACE_PMEM;
1273d88002eSDan Williams 		else
1283d88002eSDan Williams 			return ND_DEVICE_NAMESPACE_IO;
1293d88002eSDan Williams 	} else if (is_nd_blk(&nd_region->dev)) {
1303d88002eSDan Williams 		return ND_DEVICE_NAMESPACE_BLK;
1313d88002eSDan Williams 	}
1323d88002eSDan Williams 
1333d88002eSDan Williams 	return 0;
1343d88002eSDan Williams }
135bf9bccc1SDan Williams EXPORT_SYMBOL(nd_region_to_nstype);
136bf9bccc1SDan Williams 
1371f7df6f8SDan Williams static ssize_t size_show(struct device *dev,
1381f7df6f8SDan Williams 		struct device_attribute *attr, char *buf)
1391f7df6f8SDan Williams {
1401f7df6f8SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
1411f7df6f8SDan Williams 	unsigned long long size = 0;
1421f7df6f8SDan Williams 
1431f7df6f8SDan Williams 	if (is_nd_pmem(dev)) {
1441f7df6f8SDan Williams 		size = nd_region->ndr_size;
1451f7df6f8SDan Williams 	} else if (nd_region->ndr_mappings == 1) {
1461f7df6f8SDan Williams 		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1471f7df6f8SDan Williams 
1481f7df6f8SDan Williams 		size = nd_mapping->size;
1491f7df6f8SDan Williams 	}
1501f7df6f8SDan Williams 
1511f7df6f8SDan Williams 	return sprintf(buf, "%llu\n", size);
1521f7df6f8SDan Williams }
1531f7df6f8SDan Williams static DEVICE_ATTR_RO(size);
1541f7df6f8SDan Williams 
1551f7df6f8SDan Williams static ssize_t mappings_show(struct device *dev,
1561f7df6f8SDan Williams 		struct device_attribute *attr, char *buf)
1571f7df6f8SDan Williams {
1581f7df6f8SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
1591f7df6f8SDan Williams 
1601f7df6f8SDan Williams 	return sprintf(buf, "%d\n", nd_region->ndr_mappings);
1611f7df6f8SDan Williams }
1621f7df6f8SDan Williams static DEVICE_ATTR_RO(mappings);
1631f7df6f8SDan Williams 
1643d88002eSDan Williams static ssize_t nstype_show(struct device *dev,
1653d88002eSDan Williams 		struct device_attribute *attr, char *buf)
1663d88002eSDan Williams {
1673d88002eSDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
1683d88002eSDan Williams 
1693d88002eSDan Williams 	return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
1703d88002eSDan Williams }
1713d88002eSDan Williams static DEVICE_ATTR_RO(nstype);
1723d88002eSDan Williams 
173eaf96153SDan Williams static ssize_t set_cookie_show(struct device *dev,
174eaf96153SDan Williams 		struct device_attribute *attr, char *buf)
175eaf96153SDan Williams {
176eaf96153SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
177eaf96153SDan Williams 	struct nd_interleave_set *nd_set = nd_region->nd_set;
178eaf96153SDan Williams 
179eaf96153SDan Williams 	if (is_nd_pmem(dev) && nd_set)
180eaf96153SDan Williams 		/* pass, should be precluded by region_visible */;
181eaf96153SDan Williams 	else
182eaf96153SDan Williams 		return -ENXIO;
183eaf96153SDan Williams 
184eaf96153SDan Williams 	return sprintf(buf, "%#llx\n", nd_set->cookie);
185eaf96153SDan Williams }
186eaf96153SDan Williams static DEVICE_ATTR_RO(set_cookie);
187eaf96153SDan Williams 
188bf9bccc1SDan Williams resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
189bf9bccc1SDan Williams {
190bf9bccc1SDan Williams 	resource_size_t blk_max_overlap = 0, available, overlap;
191bf9bccc1SDan Williams 	int i;
192bf9bccc1SDan Williams 
193bf9bccc1SDan Williams 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
194bf9bccc1SDan Williams 
195bf9bccc1SDan Williams  retry:
196bf9bccc1SDan Williams 	available = 0;
197bf9bccc1SDan Williams 	overlap = blk_max_overlap;
198bf9bccc1SDan Williams 	for (i = 0; i < nd_region->ndr_mappings; i++) {
199bf9bccc1SDan Williams 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
200bf9bccc1SDan Williams 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
201bf9bccc1SDan Williams 
202bf9bccc1SDan Williams 		/* if a dimm is disabled the available capacity is zero */
203bf9bccc1SDan Williams 		if (!ndd)
204bf9bccc1SDan Williams 			return 0;
205bf9bccc1SDan Williams 
206bf9bccc1SDan Williams 		if (is_nd_pmem(&nd_region->dev)) {
207bf9bccc1SDan Williams 			available += nd_pmem_available_dpa(nd_region,
208bf9bccc1SDan Williams 					nd_mapping, &overlap);
209bf9bccc1SDan Williams 			if (overlap > blk_max_overlap) {
210bf9bccc1SDan Williams 				blk_max_overlap = overlap;
211bf9bccc1SDan Williams 				goto retry;
212bf9bccc1SDan Williams 			}
213bf9bccc1SDan Williams 		} else if (is_nd_blk(&nd_region->dev)) {
2141b40e09aSDan Williams 			available += nd_blk_available_dpa(nd_mapping);
215bf9bccc1SDan Williams 		}
216bf9bccc1SDan Williams 	}
217bf9bccc1SDan Williams 
218bf9bccc1SDan Williams 	return available;
219bf9bccc1SDan Williams }
220bf9bccc1SDan Williams 
221bf9bccc1SDan Williams static ssize_t available_size_show(struct device *dev,
222bf9bccc1SDan Williams 		struct device_attribute *attr, char *buf)
223bf9bccc1SDan Williams {
224bf9bccc1SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
225bf9bccc1SDan Williams 	unsigned long long available = 0;
226bf9bccc1SDan Williams 
227bf9bccc1SDan Williams 	/*
228bf9bccc1SDan Williams 	 * Flush in-flight updates and grab a snapshot of the available
229bf9bccc1SDan Williams 	 * size.  Of course, this value is potentially invalidated the
230bf9bccc1SDan Williams 	 * memory nvdimm_bus_lock() is dropped, but that's userspace's
231bf9bccc1SDan Williams 	 * problem to not race itself.
232bf9bccc1SDan Williams 	 */
233bf9bccc1SDan Williams 	nvdimm_bus_lock(dev);
234bf9bccc1SDan Williams 	wait_nvdimm_bus_probe_idle(dev);
235bf9bccc1SDan Williams 	available = nd_region_available_dpa(nd_region);
236bf9bccc1SDan Williams 	nvdimm_bus_unlock(dev);
237bf9bccc1SDan Williams 
238bf9bccc1SDan Williams 	return sprintf(buf, "%llu\n", available);
239bf9bccc1SDan Williams }
240bf9bccc1SDan Williams static DEVICE_ATTR_RO(available_size);
241bf9bccc1SDan Williams 
2423d88002eSDan Williams static ssize_t init_namespaces_show(struct device *dev,
2433d88002eSDan Williams 		struct device_attribute *attr, char *buf)
2443d88002eSDan Williams {
2453d88002eSDan Williams 	struct nd_region_namespaces *num_ns = dev_get_drvdata(dev);
2463d88002eSDan Williams 	ssize_t rc;
2473d88002eSDan Williams 
2483d88002eSDan Williams 	nvdimm_bus_lock(dev);
2493d88002eSDan Williams 	if (num_ns)
2503d88002eSDan Williams 		rc = sprintf(buf, "%d/%d\n", num_ns->active, num_ns->count);
2513d88002eSDan Williams 	else
2523d88002eSDan Williams 		rc = -ENXIO;
2533d88002eSDan Williams 	nvdimm_bus_unlock(dev);
2543d88002eSDan Williams 
2553d88002eSDan Williams 	return rc;
2563d88002eSDan Williams }
2573d88002eSDan Williams static DEVICE_ATTR_RO(init_namespaces);
2583d88002eSDan Williams 
259bf9bccc1SDan Williams static ssize_t namespace_seed_show(struct device *dev,
260bf9bccc1SDan Williams 		struct device_attribute *attr, char *buf)
261bf9bccc1SDan Williams {
262bf9bccc1SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
263bf9bccc1SDan Williams 	ssize_t rc;
264bf9bccc1SDan Williams 
265bf9bccc1SDan Williams 	nvdimm_bus_lock(dev);
266bf9bccc1SDan Williams 	if (nd_region->ns_seed)
267bf9bccc1SDan Williams 		rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
268bf9bccc1SDan Williams 	else
269bf9bccc1SDan Williams 		rc = sprintf(buf, "\n");
270bf9bccc1SDan Williams 	nvdimm_bus_unlock(dev);
271bf9bccc1SDan Williams 	return rc;
272bf9bccc1SDan Williams }
273bf9bccc1SDan Williams static DEVICE_ATTR_RO(namespace_seed);
274bf9bccc1SDan Williams 
2758c2f7e86SDan Williams static ssize_t btt_seed_show(struct device *dev,
2768c2f7e86SDan Williams 		struct device_attribute *attr, char *buf)
2778c2f7e86SDan Williams {
2788c2f7e86SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
2798c2f7e86SDan Williams 	ssize_t rc;
2808c2f7e86SDan Williams 
2818c2f7e86SDan Williams 	nvdimm_bus_lock(dev);
2828c2f7e86SDan Williams 	if (nd_region->btt_seed)
2838c2f7e86SDan Williams 		rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
2848c2f7e86SDan Williams 	else
2858c2f7e86SDan Williams 		rc = sprintf(buf, "\n");
2868c2f7e86SDan Williams 	nvdimm_bus_unlock(dev);
2878c2f7e86SDan Williams 
2888c2f7e86SDan Williams 	return rc;
2898c2f7e86SDan Williams }
2908c2f7e86SDan Williams static DEVICE_ATTR_RO(btt_seed);
2918c2f7e86SDan Williams 
292e1455744SDan Williams static ssize_t pfn_seed_show(struct device *dev,
293e1455744SDan Williams 		struct device_attribute *attr, char *buf)
294e1455744SDan Williams {
295e1455744SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
296e1455744SDan Williams 	ssize_t rc;
297e1455744SDan Williams 
298e1455744SDan Williams 	nvdimm_bus_lock(dev);
299e1455744SDan Williams 	if (nd_region->pfn_seed)
300e1455744SDan Williams 		rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
301e1455744SDan Williams 	else
302e1455744SDan Williams 		rc = sprintf(buf, "\n");
303e1455744SDan Williams 	nvdimm_bus_unlock(dev);
304e1455744SDan Williams 
305e1455744SDan Williams 	return rc;
306e1455744SDan Williams }
307e1455744SDan Williams static DEVICE_ATTR_RO(pfn_seed);
308e1455744SDan Williams 
309cd03412aSDan Williams static ssize_t dax_seed_show(struct device *dev,
310cd03412aSDan Williams 		struct device_attribute *attr, char *buf)
311cd03412aSDan Williams {
312cd03412aSDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
313cd03412aSDan Williams 	ssize_t rc;
314cd03412aSDan Williams 
315cd03412aSDan Williams 	nvdimm_bus_lock(dev);
316cd03412aSDan Williams 	if (nd_region->dax_seed)
317cd03412aSDan Williams 		rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
318cd03412aSDan Williams 	else
319cd03412aSDan Williams 		rc = sprintf(buf, "\n");
320cd03412aSDan Williams 	nvdimm_bus_unlock(dev);
321cd03412aSDan Williams 
322cd03412aSDan Williams 	return rc;
323cd03412aSDan Williams }
324cd03412aSDan Williams static DEVICE_ATTR_RO(dax_seed);
325cd03412aSDan Williams 
32658138820SDan Williams static ssize_t read_only_show(struct device *dev,
32758138820SDan Williams 		struct device_attribute *attr, char *buf)
32858138820SDan Williams {
32958138820SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
33058138820SDan Williams 
33158138820SDan Williams 	return sprintf(buf, "%d\n", nd_region->ro);
33258138820SDan Williams }
33358138820SDan Williams 
33458138820SDan Williams static ssize_t read_only_store(struct device *dev,
33558138820SDan Williams 		struct device_attribute *attr, const char *buf, size_t len)
33658138820SDan Williams {
33758138820SDan Williams 	bool ro;
33858138820SDan Williams 	int rc = strtobool(buf, &ro);
33958138820SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
34058138820SDan Williams 
34158138820SDan Williams 	if (rc)
34258138820SDan Williams 		return rc;
34358138820SDan Williams 
34458138820SDan Williams 	nd_region->ro = ro;
34558138820SDan Williams 	return len;
34658138820SDan Williams }
34758138820SDan Williams static DEVICE_ATTR_RW(read_only);
34858138820SDan Williams 
3491f7df6f8SDan Williams static struct attribute *nd_region_attributes[] = {
3501f7df6f8SDan Williams 	&dev_attr_size.attr,
3513d88002eSDan Williams 	&dev_attr_nstype.attr,
3521f7df6f8SDan Williams 	&dev_attr_mappings.attr,
3538c2f7e86SDan Williams 	&dev_attr_btt_seed.attr,
354e1455744SDan Williams 	&dev_attr_pfn_seed.attr,
355cd03412aSDan Williams 	&dev_attr_dax_seed.attr,
35658138820SDan Williams 	&dev_attr_read_only.attr,
357eaf96153SDan Williams 	&dev_attr_set_cookie.attr,
358bf9bccc1SDan Williams 	&dev_attr_available_size.attr,
359bf9bccc1SDan Williams 	&dev_attr_namespace_seed.attr,
3603d88002eSDan Williams 	&dev_attr_init_namespaces.attr,
3611f7df6f8SDan Williams 	NULL,
3621f7df6f8SDan Williams };
3631f7df6f8SDan Williams 
364eaf96153SDan Williams static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
365eaf96153SDan Williams {
366eaf96153SDan Williams 	struct device *dev = container_of(kobj, typeof(*dev), kobj);
367eaf96153SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
368eaf96153SDan Williams 	struct nd_interleave_set *nd_set = nd_region->nd_set;
369bf9bccc1SDan Williams 	int type = nd_region_to_nstype(nd_region);
370eaf96153SDan Williams 
3716bb691acSDmitry Krivenok 	if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
3726bb691acSDmitry Krivenok 		return 0;
3736bb691acSDmitry Krivenok 
374cd03412aSDan Williams 	if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
375cd03412aSDan Williams 		return 0;
376cd03412aSDan Williams 
377bf9bccc1SDan Williams 	if (a != &dev_attr_set_cookie.attr
378bf9bccc1SDan Williams 			&& a != &dev_attr_available_size.attr)
379eaf96153SDan Williams 		return a->mode;
380eaf96153SDan Williams 
381bf9bccc1SDan Williams 	if ((type == ND_DEVICE_NAMESPACE_PMEM
382bf9bccc1SDan Williams 				|| type == ND_DEVICE_NAMESPACE_BLK)
383bf9bccc1SDan Williams 			&& a == &dev_attr_available_size.attr)
384bf9bccc1SDan Williams 		return a->mode;
385bf9bccc1SDan Williams 	else if (is_nd_pmem(dev) && nd_set)
386eaf96153SDan Williams 		return a->mode;
387eaf96153SDan Williams 
388eaf96153SDan Williams 	return 0;
389eaf96153SDan Williams }
390eaf96153SDan Williams 
3911f7df6f8SDan Williams struct attribute_group nd_region_attribute_group = {
3921f7df6f8SDan Williams 	.attrs = nd_region_attributes,
393eaf96153SDan Williams 	.is_visible = region_visible,
3941f7df6f8SDan Williams };
3951f7df6f8SDan Williams EXPORT_SYMBOL_GPL(nd_region_attribute_group);
3961f7df6f8SDan Williams 
397bf9bccc1SDan Williams u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
398bf9bccc1SDan Williams {
399bf9bccc1SDan Williams 	struct nd_interleave_set *nd_set = nd_region->nd_set;
400bf9bccc1SDan Williams 
401bf9bccc1SDan Williams 	if (nd_set)
402bf9bccc1SDan Williams 		return nd_set->cookie;
403bf9bccc1SDan Williams 	return 0;
404bf9bccc1SDan Williams }
405bf9bccc1SDan Williams 
406eaf96153SDan Williams /*
407eaf96153SDan Williams  * Upon successful probe/remove, take/release a reference on the
4088c2f7e86SDan Williams  * associated interleave set (if present), and plant new btt + namespace
409047fc8a1SRoss Zwisler  * seeds.  Also, on the removal of a BLK region, notify the provider to
410047fc8a1SRoss Zwisler  * disable the region.
411eaf96153SDan Williams  */
412eaf96153SDan Williams static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
413eaf96153SDan Williams 		struct device *dev, bool probe)
414eaf96153SDan Williams {
4158c2f7e86SDan Williams 	struct nd_region *nd_region;
4168c2f7e86SDan Williams 
417bf9bccc1SDan Williams 	if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
418eaf96153SDan Williams 		int i;
419eaf96153SDan Williams 
4208c2f7e86SDan Williams 		nd_region = to_nd_region(dev);
421eaf96153SDan Williams 		for (i = 0; i < nd_region->ndr_mappings; i++) {
422eaf96153SDan Williams 			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
423bf9bccc1SDan Williams 			struct nvdimm_drvdata *ndd = nd_mapping->ndd;
424eaf96153SDan Williams 			struct nvdimm *nvdimm = nd_mapping->nvdimm;
425eaf96153SDan Williams 
426bf9bccc1SDan Williams 			kfree(nd_mapping->labels);
427bf9bccc1SDan Williams 			nd_mapping->labels = NULL;
428bf9bccc1SDan Williams 			put_ndd(ndd);
429bf9bccc1SDan Williams 			nd_mapping->ndd = NULL;
430047fc8a1SRoss Zwisler 			if (ndd)
431eaf96153SDan Williams 				atomic_dec(&nvdimm->busy);
432eaf96153SDan Williams 		}
433047fc8a1SRoss Zwisler 
434047fc8a1SRoss Zwisler 		if (is_nd_pmem(dev))
435047fc8a1SRoss Zwisler 			return;
436047fc8a1SRoss Zwisler 
437047fc8a1SRoss Zwisler 		to_nd_blk_region(dev)->disable(nvdimm_bus, dev);
4388c2f7e86SDan Williams 	}
4398c2f7e86SDan Williams 	if (dev->parent && is_nd_blk(dev->parent) && probe) {
4408c2f7e86SDan Williams 		nd_region = to_nd_region(dev->parent);
4411b40e09aSDan Williams 		nvdimm_bus_lock(dev);
4421b40e09aSDan Williams 		if (nd_region->ns_seed == dev)
4431b40e09aSDan Williams 			nd_region_create_blk_seed(nd_region);
4441b40e09aSDan Williams 		nvdimm_bus_unlock(dev);
445eaf96153SDan Williams 	}
4468c2f7e86SDan Williams 	if (is_nd_btt(dev) && probe) {
4478ca24353SDan Williams 		struct nd_btt *nd_btt = to_nd_btt(dev);
4488ca24353SDan Williams 
4498c2f7e86SDan Williams 		nd_region = to_nd_region(dev->parent);
4508c2f7e86SDan Williams 		nvdimm_bus_lock(dev);
4518c2f7e86SDan Williams 		if (nd_region->btt_seed == dev)
4528c2f7e86SDan Williams 			nd_region_create_btt_seed(nd_region);
4538ca24353SDan Williams 		if (nd_region->ns_seed == &nd_btt->ndns->dev &&
4548ca24353SDan Williams 				is_nd_blk(dev->parent))
4558ca24353SDan Williams 			nd_region_create_blk_seed(nd_region);
4568c2f7e86SDan Williams 		nvdimm_bus_unlock(dev);
4578c2f7e86SDan Williams 	}
4582dc43331SDan Williams 	if (is_nd_pfn(dev) && probe) {
4592dc43331SDan Williams 		nd_region = to_nd_region(dev->parent);
4602dc43331SDan Williams 		nvdimm_bus_lock(dev);
4612dc43331SDan Williams 		if (nd_region->pfn_seed == dev)
4622dc43331SDan Williams 			nd_region_create_pfn_seed(nd_region);
4632dc43331SDan Williams 		nvdimm_bus_unlock(dev);
4642dc43331SDan Williams 	}
465cd03412aSDan Williams 	if (is_nd_dax(dev) && probe) {
466cd03412aSDan Williams 		nd_region = to_nd_region(dev->parent);
467cd03412aSDan Williams 		nvdimm_bus_lock(dev);
468cd03412aSDan Williams 		if (nd_region->dax_seed == dev)
469cd03412aSDan Williams 			nd_region_create_dax_seed(nd_region);
470cd03412aSDan Williams 		nvdimm_bus_unlock(dev);
471cd03412aSDan Williams 	}
472eaf96153SDan Williams }
473eaf96153SDan Williams 
474eaf96153SDan Williams void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
475eaf96153SDan Williams {
476eaf96153SDan Williams 	nd_region_notify_driver_action(nvdimm_bus, dev, true);
477eaf96153SDan Williams }
478eaf96153SDan Williams 
479eaf96153SDan Williams void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
480eaf96153SDan Williams {
481eaf96153SDan Williams 	nd_region_notify_driver_action(nvdimm_bus, dev, false);
482eaf96153SDan Williams }
483eaf96153SDan Williams 
4841f7df6f8SDan Williams static ssize_t mappingN(struct device *dev, char *buf, int n)
4851f7df6f8SDan Williams {
4861f7df6f8SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
4871f7df6f8SDan Williams 	struct nd_mapping *nd_mapping;
4881f7df6f8SDan Williams 	struct nvdimm *nvdimm;
4891f7df6f8SDan Williams 
4901f7df6f8SDan Williams 	if (n >= nd_region->ndr_mappings)
4911f7df6f8SDan Williams 		return -ENXIO;
4921f7df6f8SDan Williams 	nd_mapping = &nd_region->mapping[n];
4931f7df6f8SDan Williams 	nvdimm = nd_mapping->nvdimm;
4941f7df6f8SDan Williams 
4951f7df6f8SDan Williams 	return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
4961f7df6f8SDan Williams 			nd_mapping->start, nd_mapping->size);
4971f7df6f8SDan Williams }
4981f7df6f8SDan Williams 
4991f7df6f8SDan Williams #define REGION_MAPPING(idx) \
5001f7df6f8SDan Williams static ssize_t mapping##idx##_show(struct device *dev,		\
5011f7df6f8SDan Williams 		struct device_attribute *attr, char *buf)	\
5021f7df6f8SDan Williams {								\
5031f7df6f8SDan Williams 	return mappingN(dev, buf, idx);				\
5041f7df6f8SDan Williams }								\
5051f7df6f8SDan Williams static DEVICE_ATTR_RO(mapping##idx)
5061f7df6f8SDan Williams 
5071f7df6f8SDan Williams /*
5081f7df6f8SDan Williams  * 32 should be enough for a while, even in the presence of socket
5091f7df6f8SDan Williams  * interleave a 32-way interleave set is a degenerate case.
5101f7df6f8SDan Williams  */
5111f7df6f8SDan Williams REGION_MAPPING(0);
5121f7df6f8SDan Williams REGION_MAPPING(1);
5131f7df6f8SDan Williams REGION_MAPPING(2);
5141f7df6f8SDan Williams REGION_MAPPING(3);
5151f7df6f8SDan Williams REGION_MAPPING(4);
5161f7df6f8SDan Williams REGION_MAPPING(5);
5171f7df6f8SDan Williams REGION_MAPPING(6);
5181f7df6f8SDan Williams REGION_MAPPING(7);
5191f7df6f8SDan Williams REGION_MAPPING(8);
5201f7df6f8SDan Williams REGION_MAPPING(9);
5211f7df6f8SDan Williams REGION_MAPPING(10);
5221f7df6f8SDan Williams REGION_MAPPING(11);
5231f7df6f8SDan Williams REGION_MAPPING(12);
5241f7df6f8SDan Williams REGION_MAPPING(13);
5251f7df6f8SDan Williams REGION_MAPPING(14);
5261f7df6f8SDan Williams REGION_MAPPING(15);
5271f7df6f8SDan Williams REGION_MAPPING(16);
5281f7df6f8SDan Williams REGION_MAPPING(17);
5291f7df6f8SDan Williams REGION_MAPPING(18);
5301f7df6f8SDan Williams REGION_MAPPING(19);
5311f7df6f8SDan Williams REGION_MAPPING(20);
5321f7df6f8SDan Williams REGION_MAPPING(21);
5331f7df6f8SDan Williams REGION_MAPPING(22);
5341f7df6f8SDan Williams REGION_MAPPING(23);
5351f7df6f8SDan Williams REGION_MAPPING(24);
5361f7df6f8SDan Williams REGION_MAPPING(25);
5371f7df6f8SDan Williams REGION_MAPPING(26);
5381f7df6f8SDan Williams REGION_MAPPING(27);
5391f7df6f8SDan Williams REGION_MAPPING(28);
5401f7df6f8SDan Williams REGION_MAPPING(29);
5411f7df6f8SDan Williams REGION_MAPPING(30);
5421f7df6f8SDan Williams REGION_MAPPING(31);
5431f7df6f8SDan Williams 
5441f7df6f8SDan Williams static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
5451f7df6f8SDan Williams {
5461f7df6f8SDan Williams 	struct device *dev = container_of(kobj, struct device, kobj);
5471f7df6f8SDan Williams 	struct nd_region *nd_region = to_nd_region(dev);
5481f7df6f8SDan Williams 
5491f7df6f8SDan Williams 	if (n < nd_region->ndr_mappings)
5501f7df6f8SDan Williams 		return a->mode;
5511f7df6f8SDan Williams 	return 0;
5521f7df6f8SDan Williams }
5531f7df6f8SDan Williams 
5541f7df6f8SDan Williams static struct attribute *mapping_attributes[] = {
5551f7df6f8SDan Williams 	&dev_attr_mapping0.attr,
5561f7df6f8SDan Williams 	&dev_attr_mapping1.attr,
5571f7df6f8SDan Williams 	&dev_attr_mapping2.attr,
5581f7df6f8SDan Williams 	&dev_attr_mapping3.attr,
5591f7df6f8SDan Williams 	&dev_attr_mapping4.attr,
5601f7df6f8SDan Williams 	&dev_attr_mapping5.attr,
5611f7df6f8SDan Williams 	&dev_attr_mapping6.attr,
5621f7df6f8SDan Williams 	&dev_attr_mapping7.attr,
5631f7df6f8SDan Williams 	&dev_attr_mapping8.attr,
5641f7df6f8SDan Williams 	&dev_attr_mapping9.attr,
5651f7df6f8SDan Williams 	&dev_attr_mapping10.attr,
5661f7df6f8SDan Williams 	&dev_attr_mapping11.attr,
5671f7df6f8SDan Williams 	&dev_attr_mapping12.attr,
5681f7df6f8SDan Williams 	&dev_attr_mapping13.attr,
5691f7df6f8SDan Williams 	&dev_attr_mapping14.attr,
5701f7df6f8SDan Williams 	&dev_attr_mapping15.attr,
5711f7df6f8SDan Williams 	&dev_attr_mapping16.attr,
5721f7df6f8SDan Williams 	&dev_attr_mapping17.attr,
5731f7df6f8SDan Williams 	&dev_attr_mapping18.attr,
5741f7df6f8SDan Williams 	&dev_attr_mapping19.attr,
5751f7df6f8SDan Williams 	&dev_attr_mapping20.attr,
5761f7df6f8SDan Williams 	&dev_attr_mapping21.attr,
5771f7df6f8SDan Williams 	&dev_attr_mapping22.attr,
5781f7df6f8SDan Williams 	&dev_attr_mapping23.attr,
5791f7df6f8SDan Williams 	&dev_attr_mapping24.attr,
5801f7df6f8SDan Williams 	&dev_attr_mapping25.attr,
5811f7df6f8SDan Williams 	&dev_attr_mapping26.attr,
5821f7df6f8SDan Williams 	&dev_attr_mapping27.attr,
5831f7df6f8SDan Williams 	&dev_attr_mapping28.attr,
5841f7df6f8SDan Williams 	&dev_attr_mapping29.attr,
5851f7df6f8SDan Williams 	&dev_attr_mapping30.attr,
5861f7df6f8SDan Williams 	&dev_attr_mapping31.attr,
5871f7df6f8SDan Williams 	NULL,
5881f7df6f8SDan Williams };
5891f7df6f8SDan Williams 
5901f7df6f8SDan Williams struct attribute_group nd_mapping_attribute_group = {
5911f7df6f8SDan Williams 	.is_visible = mapping_visible,
5921f7df6f8SDan Williams 	.attrs = mapping_attributes,
5931f7df6f8SDan Williams };
5941f7df6f8SDan Williams EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
5951f7df6f8SDan Williams 
596047fc8a1SRoss Zwisler int nd_blk_region_init(struct nd_region *nd_region)
5971f7df6f8SDan Williams {
598047fc8a1SRoss Zwisler 	struct device *dev = &nd_region->dev;
599047fc8a1SRoss Zwisler 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
600047fc8a1SRoss Zwisler 
601047fc8a1SRoss Zwisler 	if (!is_nd_blk(dev))
602047fc8a1SRoss Zwisler 		return 0;
603047fc8a1SRoss Zwisler 
604047fc8a1SRoss Zwisler 	if (nd_region->ndr_mappings < 1) {
605047fc8a1SRoss Zwisler 		dev_err(dev, "invalid BLK region\n");
606047fc8a1SRoss Zwisler 		return -ENXIO;
6071f7df6f8SDan Williams 	}
608047fc8a1SRoss Zwisler 
609047fc8a1SRoss Zwisler 	return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
610047fc8a1SRoss Zwisler }
6111f7df6f8SDan Williams 
6125212e11fSVishal Verma /**
6135212e11fSVishal Verma  * nd_region_acquire_lane - allocate and lock a lane
6145212e11fSVishal Verma  * @nd_region: region id and number of lanes possible
6155212e11fSVishal Verma  *
6165212e11fSVishal Verma  * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
6175212e11fSVishal Verma  * We optimize for the common case where there are 256 lanes, one
6185212e11fSVishal Verma  * per-cpu.  For larger systems we need to lock to share lanes.  For now
6195212e11fSVishal Verma  * this implementation assumes the cost of maintaining an allocator for
6205212e11fSVishal Verma  * free lanes is on the order of the lock hold time, so it implements a
6215212e11fSVishal Verma  * static lane = cpu % num_lanes mapping.
6225212e11fSVishal Verma  *
6235212e11fSVishal Verma  * In the case of a BTT instance on top of a BLK namespace a lane may be
6245212e11fSVishal Verma  * acquired recursively.  We lock on the first instance.
6255212e11fSVishal Verma  *
6265212e11fSVishal Verma  * In the case of a BTT instance on top of PMEM, we only acquire a lane
6275212e11fSVishal Verma  * for the BTT metadata updates.
6285212e11fSVishal Verma  */
6295212e11fSVishal Verma unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
6305212e11fSVishal Verma {
6315212e11fSVishal Verma 	unsigned int cpu, lane;
6325212e11fSVishal Verma 
6335212e11fSVishal Verma 	cpu = get_cpu();
6345212e11fSVishal Verma 	if (nd_region->num_lanes < nr_cpu_ids) {
6355212e11fSVishal Verma 		struct nd_percpu_lane *ndl_lock, *ndl_count;
6365212e11fSVishal Verma 
6375212e11fSVishal Verma 		lane = cpu % nd_region->num_lanes;
6385212e11fSVishal Verma 		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
6395212e11fSVishal Verma 		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
6405212e11fSVishal Verma 		if (ndl_count->count++ == 0)
6415212e11fSVishal Verma 			spin_lock(&ndl_lock->lock);
6425212e11fSVishal Verma 	} else
6435212e11fSVishal Verma 		lane = cpu;
6445212e11fSVishal Verma 
6455212e11fSVishal Verma 	return lane;
6465212e11fSVishal Verma }
6475212e11fSVishal Verma EXPORT_SYMBOL(nd_region_acquire_lane);
6485212e11fSVishal Verma 
6495212e11fSVishal Verma void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
6505212e11fSVishal Verma {
6515212e11fSVishal Verma 	if (nd_region->num_lanes < nr_cpu_ids) {
6525212e11fSVishal Verma 		unsigned int cpu = get_cpu();
6535212e11fSVishal Verma 		struct nd_percpu_lane *ndl_lock, *ndl_count;
6545212e11fSVishal Verma 
6555212e11fSVishal Verma 		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
6565212e11fSVishal Verma 		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
6575212e11fSVishal Verma 		if (--ndl_count->count == 0)
6585212e11fSVishal Verma 			spin_unlock(&ndl_lock->lock);
6595212e11fSVishal Verma 		put_cpu();
6605212e11fSVishal Verma 	}
6615212e11fSVishal Verma 	put_cpu();
6625212e11fSVishal Verma }
6635212e11fSVishal Verma EXPORT_SYMBOL(nd_region_release_lane);
6645212e11fSVishal Verma 
6651f7df6f8SDan Williams static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
6661f7df6f8SDan Williams 		struct nd_region_desc *ndr_desc, struct device_type *dev_type,
6671f7df6f8SDan Williams 		const char *caller)
6681f7df6f8SDan Williams {
6691f7df6f8SDan Williams 	struct nd_region *nd_region;
6701f7df6f8SDan Williams 	struct device *dev;
671047fc8a1SRoss Zwisler 	void *region_buf;
6725212e11fSVishal Verma 	unsigned int i;
67358138820SDan Williams 	int ro = 0;
6741f7df6f8SDan Williams 
6751f7df6f8SDan Williams 	for (i = 0; i < ndr_desc->num_mappings; i++) {
6761f7df6f8SDan Williams 		struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
6771f7df6f8SDan Williams 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
6781f7df6f8SDan Williams 
6791f7df6f8SDan Williams 		if ((nd_mapping->start | nd_mapping->size) % SZ_4K) {
6801f7df6f8SDan Williams 			dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
6811f7df6f8SDan Williams 					caller, dev_name(&nvdimm->dev), i);
6821f7df6f8SDan Williams 
6831f7df6f8SDan Williams 			return NULL;
6841f7df6f8SDan Williams 		}
68558138820SDan Williams 
68658138820SDan Williams 		if (nvdimm->flags & NDD_UNARMED)
68758138820SDan Williams 			ro = 1;
6881f7df6f8SDan Williams 	}
6891f7df6f8SDan Williams 
690047fc8a1SRoss Zwisler 	if (dev_type == &nd_blk_device_type) {
691047fc8a1SRoss Zwisler 		struct nd_blk_region_desc *ndbr_desc;
692047fc8a1SRoss Zwisler 		struct nd_blk_region *ndbr;
693047fc8a1SRoss Zwisler 
694047fc8a1SRoss Zwisler 		ndbr_desc = to_blk_region_desc(ndr_desc);
695047fc8a1SRoss Zwisler 		ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
696047fc8a1SRoss Zwisler 				* ndr_desc->num_mappings,
6971f7df6f8SDan Williams 				GFP_KERNEL);
698047fc8a1SRoss Zwisler 		if (ndbr) {
699047fc8a1SRoss Zwisler 			nd_region = &ndbr->nd_region;
700047fc8a1SRoss Zwisler 			ndbr->enable = ndbr_desc->enable;
701047fc8a1SRoss Zwisler 			ndbr->disable = ndbr_desc->disable;
702047fc8a1SRoss Zwisler 			ndbr->do_io = ndbr_desc->do_io;
703047fc8a1SRoss Zwisler 		}
704047fc8a1SRoss Zwisler 		region_buf = ndbr;
705047fc8a1SRoss Zwisler 	} else {
706047fc8a1SRoss Zwisler 		nd_region = kzalloc(sizeof(struct nd_region)
707047fc8a1SRoss Zwisler 				+ sizeof(struct nd_mapping)
708047fc8a1SRoss Zwisler 				* ndr_desc->num_mappings,
709047fc8a1SRoss Zwisler 				GFP_KERNEL);
710047fc8a1SRoss Zwisler 		region_buf = nd_region;
711047fc8a1SRoss Zwisler 	}
712047fc8a1SRoss Zwisler 
713047fc8a1SRoss Zwisler 	if (!region_buf)
7141f7df6f8SDan Williams 		return NULL;
7151f7df6f8SDan Williams 	nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
7165212e11fSVishal Verma 	if (nd_region->id < 0)
7175212e11fSVishal Verma 		goto err_id;
7185212e11fSVishal Verma 
7195212e11fSVishal Verma 	nd_region->lane = alloc_percpu(struct nd_percpu_lane);
7205212e11fSVishal Verma 	if (!nd_region->lane)
7215212e11fSVishal Verma 		goto err_percpu;
7225212e11fSVishal Verma 
7235212e11fSVishal Verma         for (i = 0; i < nr_cpu_ids; i++) {
7245212e11fSVishal Verma 		struct nd_percpu_lane *ndl;
7255212e11fSVishal Verma 
7265212e11fSVishal Verma 		ndl = per_cpu_ptr(nd_region->lane, i);
7275212e11fSVishal Verma 		spin_lock_init(&ndl->lock);
7285212e11fSVishal Verma 		ndl->count = 0;
7291f7df6f8SDan Williams 	}
7301f7df6f8SDan Williams 
7311f7df6f8SDan Williams 	memcpy(nd_region->mapping, ndr_desc->nd_mapping,
7321f7df6f8SDan Williams 			sizeof(struct nd_mapping) * ndr_desc->num_mappings);
7331f7df6f8SDan Williams 	for (i = 0; i < ndr_desc->num_mappings; i++) {
7341f7df6f8SDan Williams 		struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
7351f7df6f8SDan Williams 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
7361f7df6f8SDan Williams 
7371f7df6f8SDan Williams 		get_device(&nvdimm->dev);
7381f7df6f8SDan Williams 	}
7391f7df6f8SDan Williams 	nd_region->ndr_mappings = ndr_desc->num_mappings;
7401f7df6f8SDan Williams 	nd_region->provider_data = ndr_desc->provider_data;
741eaf96153SDan Williams 	nd_region->nd_set = ndr_desc->nd_set;
7425212e11fSVishal Verma 	nd_region->num_lanes = ndr_desc->num_lanes;
743004f1afbSDan Williams 	nd_region->flags = ndr_desc->flags;
74458138820SDan Williams 	nd_region->ro = ro;
74541d7a6d6SToshi Kani 	nd_region->numa_node = ndr_desc->numa_node;
7461b40e09aSDan Williams 	ida_init(&nd_region->ns_ida);
7478c2f7e86SDan Williams 	ida_init(&nd_region->btt_ida);
748e1455744SDan Williams 	ida_init(&nd_region->pfn_ida);
749cd03412aSDan Williams 	ida_init(&nd_region->dax_ida);
7501f7df6f8SDan Williams 	dev = &nd_region->dev;
7511f7df6f8SDan Williams 	dev_set_name(dev, "region%d", nd_region->id);
7521f7df6f8SDan Williams 	dev->parent = &nvdimm_bus->dev;
7531f7df6f8SDan Williams 	dev->type = dev_type;
7541f7df6f8SDan Williams 	dev->groups = ndr_desc->attr_groups;
7551f7df6f8SDan Williams 	nd_region->ndr_size = resource_size(ndr_desc->res);
7561f7df6f8SDan Williams 	nd_region->ndr_start = ndr_desc->res->start;
7571f7df6f8SDan Williams 	nd_device_register(dev);
7581f7df6f8SDan Williams 
7591f7df6f8SDan Williams 	return nd_region;
7605212e11fSVishal Verma 
7615212e11fSVishal Verma  err_percpu:
7625212e11fSVishal Verma 	ida_simple_remove(&region_ida, nd_region->id);
7635212e11fSVishal Verma  err_id:
764047fc8a1SRoss Zwisler 	kfree(region_buf);
7655212e11fSVishal Verma 	return NULL;
7661f7df6f8SDan Williams }
7671f7df6f8SDan Williams 
7681f7df6f8SDan Williams struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
7691f7df6f8SDan Williams 		struct nd_region_desc *ndr_desc)
7701f7df6f8SDan Williams {
7715212e11fSVishal Verma 	ndr_desc->num_lanes = ND_MAX_LANES;
7721f7df6f8SDan Williams 	return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
7731f7df6f8SDan Williams 			__func__);
7741f7df6f8SDan Williams }
7751f7df6f8SDan Williams EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
7761f7df6f8SDan Williams 
7771f7df6f8SDan Williams struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
7781f7df6f8SDan Williams 		struct nd_region_desc *ndr_desc)
7791f7df6f8SDan Williams {
7801f7df6f8SDan Williams 	if (ndr_desc->num_mappings > 1)
7811f7df6f8SDan Williams 		return NULL;
7825212e11fSVishal Verma 	ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
7831f7df6f8SDan Williams 	return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
7841f7df6f8SDan Williams 			__func__);
7851f7df6f8SDan Williams }
7861f7df6f8SDan Williams EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
7871f7df6f8SDan Williams 
7881f7df6f8SDan Williams struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
7891f7df6f8SDan Williams 		struct nd_region_desc *ndr_desc)
7901f7df6f8SDan Williams {
7915212e11fSVishal Verma 	ndr_desc->num_lanes = ND_MAX_LANES;
7921f7df6f8SDan Williams 	return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
7931f7df6f8SDan Williams 			__func__);
7941f7df6f8SDan Williams }
7951f7df6f8SDan Williams EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
796