Lines Matching +full:uuid +full:- +full:dev
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
12 #include "nd-core.h"
17 static void namespace_io_release(struct device *dev) in namespace_io_release() argument
19 struct nd_namespace_io *nsio = to_nd_namespace_io(dev); in namespace_io_release()
24 static void namespace_pmem_release(struct device *dev) in namespace_pmem_release() argument
26 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in namespace_pmem_release()
27 struct nd_region *nd_region = to_nd_region(dev->parent); in namespace_pmem_release()
29 if (nspm->id >= 0) in namespace_pmem_release()
30 ida_simple_remove(&nd_region->ns_ida, nspm->id); in namespace_pmem_release()
31 kfree(nspm->alt_name); in namespace_pmem_release()
32 kfree(nspm->uuid); in namespace_pmem_release()
36 static bool is_namespace_pmem(const struct device *dev);
37 static bool is_namespace_io(const struct device *dev);
39 static int is_uuid_busy(struct device *dev, void *data) in is_uuid_busy() argument
43 if (is_namespace_pmem(dev)) { in is_uuid_busy()
44 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in is_uuid_busy()
46 uuid2 = nspm->uuid; in is_uuid_busy()
47 } else if (is_nd_btt(dev)) { in is_uuid_busy()
48 struct nd_btt *nd_btt = to_nd_btt(dev); in is_uuid_busy()
50 uuid2 = nd_btt->uuid; in is_uuid_busy()
51 } else if (is_nd_pfn(dev)) { in is_uuid_busy()
52 struct nd_pfn *nd_pfn = to_nd_pfn(dev); in is_uuid_busy()
54 uuid2 = nd_pfn->uuid; in is_uuid_busy()
58 return -EBUSY; in is_uuid_busy()
63 static int is_namespace_uuid_busy(struct device *dev, void *data) in is_namespace_uuid_busy() argument
65 if (is_nd_region(dev)) in is_namespace_uuid_busy()
66 return device_for_each_child(dev, data, is_uuid_busy); in is_namespace_uuid_busy()
71 * nd_is_uuid_unique - verify that no other namespace has @uuid
72 * @dev: any device on a nvdimm_bus
73 * @uuid: uuid to check
75 bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid) in nd_is_uuid_unique() argument
77 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); in nd_is_uuid_unique()
81 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev)); in nd_is_uuid_unique()
82 if (device_for_each_child(&nvdimm_bus->dev, uuid, in nd_is_uuid_unique()
88 bool pmem_should_map_pages(struct device *dev) in pmem_should_map_pages() argument
90 struct nd_region *nd_region = to_nd_region(dev->parent); in pmem_should_map_pages()
91 struct nd_namespace_common *ndns = to_ndns(dev); in pmem_should_map_pages()
97 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags)) in pmem_should_map_pages()
100 if (is_nd_pfn(dev) || is_nd_btt(dev)) in pmem_should_map_pages()
103 if (ndns->force_raw) in pmem_should_map_pages()
106 nsio = to_nd_namespace_io(dev); in pmem_should_map_pages()
107 if (region_intersects(nsio->res.start, resource_size(&nsio->res), in pmem_should_map_pages()
118 if (is_namespace_pmem(&ndns->dev)) { in pmem_sector_size()
121 nspm = to_nd_namespace_pmem(&ndns->dev); in pmem_sector_size()
122 if (nspm->lbasize == 0 || nspm->lbasize == 512) in pmem_sector_size()
124 else if (nspm->lbasize == 4096) in pmem_sector_size()
127 dev_WARN(&ndns->dev, "unsupported sector size: %ld\n", in pmem_sector_size()
128 nspm->lbasize); in pmem_sector_size()
142 struct nd_region *nd_region = to_nd_region(ndns->dev.parent); in nvdimm_namespace_disk_name()
145 if (ndns->claim && is_nd_btt(ndns->claim)) in nvdimm_namespace_disk_name()
148 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) { in nvdimm_namespace_disk_name()
151 if (is_namespace_pmem(&ndns->dev)) { in nvdimm_namespace_disk_name()
154 nspm = to_nd_namespace_pmem(&ndns->dev); in nvdimm_namespace_disk_name()
155 nsidx = nspm->id; in nvdimm_namespace_disk_name()
159 sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx, in nvdimm_namespace_disk_name()
162 sprintf(name, "pmem%d%s", nd_region->id, in nvdimm_namespace_disk_name()
172 const uuid_t *nd_dev_to_uuid(struct device *dev) in nd_dev_to_uuid() argument
174 if (dev && is_namespace_pmem(dev)) { in nd_dev_to_uuid()
175 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in nd_dev_to_uuid()
177 return nspm->uuid; in nd_dev_to_uuid()
183 static ssize_t nstype_show(struct device *dev, in nstype_show() argument
186 struct nd_region *nd_region = to_nd_region(dev->parent); in nstype_show()
192 static ssize_t __alt_name_store(struct device *dev, const char *buf, in __alt_name_store() argument
198 if (is_namespace_pmem(dev)) { in __alt_name_store()
199 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in __alt_name_store()
201 ns_altname = &nspm->alt_name; in __alt_name_store()
203 return -ENXIO; in __alt_name_store()
205 if (dev->driver || to_ndns(dev)->claim) in __alt_name_store()
206 return -EBUSY; in __alt_name_store()
210 return -ENOMEM; in __alt_name_store()
214 rc = -EINVAL; in __alt_name_store()
220 rc = -ENOMEM; in __alt_name_store()
234 struct device *dev) in nd_namespace_label_update() argument
236 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim, in nd_namespace_label_update()
238 if (dev->driver || to_ndns(dev)->claim) in nd_namespace_label_update()
245 if (is_namespace_pmem(dev)) { in nd_namespace_label_update()
246 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in nd_namespace_label_update()
247 resource_size_t size = resource_size(&nspm->nsio.res); in nd_namespace_label_update()
249 if (size == 0 && nspm->uuid) in nd_namespace_label_update()
251 else if (!nspm->uuid) in nd_namespace_label_update()
256 return -ENXIO; in nd_namespace_label_update()
259 static ssize_t alt_name_store(struct device *dev, in alt_name_store() argument
262 struct nd_region *nd_region = to_nd_region(dev->parent); in alt_name_store()
265 device_lock(dev); in alt_name_store()
266 nvdimm_bus_lock(dev); in alt_name_store()
267 wait_nvdimm_bus_probe_idle(dev); in alt_name_store()
268 rc = __alt_name_store(dev, buf, len); in alt_name_store()
270 rc = nd_namespace_label_update(nd_region, dev); in alt_name_store()
271 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); in alt_name_store()
272 nvdimm_bus_unlock(dev); in alt_name_store()
273 device_unlock(dev); in alt_name_store()
278 static ssize_t alt_name_show(struct device *dev, in alt_name_show() argument
283 if (is_namespace_pmem(dev)) { in alt_name_show()
284 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in alt_name_show()
286 ns_altname = nspm->alt_name; in alt_name_show()
288 return -ENXIO; in alt_name_show()
306 if (strcmp(res->name, label_id->id) == 0) in scan_free()
313 n -= resource_size(res); in scan_free()
320 rc = adjust_resource(res, res->start, resource_size(res) - n); in scan_free()
322 res->flags |= DPA_RESOURCE_ADJUSTED; in scan_free()
331 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
334 * @n: number of bytes per-dimm to release
346 for (i = 0; i < nd_region->ndr_mappings; i++) { in shrink_dpa_allocation()
347 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in shrink_dpa_allocation()
366 /* first resource allocation for this label-id or dimm */ in init_dpa_allocation()
367 res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n); in init_dpa_allocation()
369 rc = -EBUSY; in init_dpa_allocation()
377 * space_valid() - validate free dpa space against constraints
387 * BLK-space is valid as long as it does not precede a PMEM
388 * allocation in a given region. PMEM-space must be contiguous
397 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0; in space_valid()
400 align = nd_region->align / nd_region->ndr_mappings; in space_valid()
401 valid->start = ALIGN(valid->start, align); in space_valid()
402 valid->end = ALIGN_DOWN(valid->end + 1, align) - 1; in space_valid()
404 if (valid->start >= valid->end) in space_valid()
419 if (valid->start == exist->end + 1 in space_valid()
420 || valid->end == exist->start - 1) in space_valid()
425 valid->end = valid->start - 1; in space_valid()
436 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; in scan_allocate()
443 if (strcmp(label_id->id, res->name) == 0) in scan_allocate()
446 valid.start = nd_mapping->start; in scan_allocate()
452 struct resource *next = res->sibling, *new_res = NULL; in scan_allocate()
459 if (res->start > mapping_end) in scan_allocate()
461 if (res->end < nd_mapping->start) in scan_allocate()
465 if (!first++ && res->start > nd_mapping->start) { in scan_allocate()
466 valid.start = nd_mapping->start; in scan_allocate()
467 valid.end = res->start - 1; in scan_allocate()
477 valid.start = res->start + resource_size(res); in scan_allocate()
478 valid.end = min(mapping_end, next->start - 1); in scan_allocate()
488 valid.start = res->start + resource_size(res); in scan_allocate()
502 if (strcmp(res->name, label_id->id) == 0) { in scan_allocate()
504 rc = adjust_resource(res, res->start - allocate, in scan_allocate()
511 if (strcmp(next->name, label_id->id) == 0) { in scan_allocate()
513 rc = adjust_resource(next, next->start in scan_allocate()
514 - allocate, resource_size(next) in scan_allocate()
518 } else if (strcmp(res->name, label_id->id) == 0) { in scan_allocate()
524 if (strcmp(res->name, label_id->id) == 0) in scan_allocate()
537 rc = -EBUSY; in scan_allocate()
540 rc = adjust_resource(res, res->start, resource_size(res) in scan_allocate()
543 res->flags |= DPA_RESOURCE_ADJUSTED; in scan_allocate()
555 n -= allocate; in scan_allocate()
580 if (strncmp("pmem", label_id->id, 4) == 0) in merge_dpa()
585 struct resource *next = res->sibling; in merge_dpa()
586 resource_size_t end = res->start + resource_size(res); in merge_dpa()
588 if (!next || strcmp(res->name, label_id->id) != 0 in merge_dpa()
589 || strcmp(next->name, label_id->id) != 0 in merge_dpa()
590 || end != next->start) in merge_dpa()
594 rc = adjust_resource(res, res->start, end - res->start); in merge_dpa()
598 res->flags |= DPA_RESOURCE_ADJUSTED; in merge_dpa()
605 int __reserve_free_pmem(struct device *dev, void *data) in __reserve_free_pmem() argument
612 if (!is_memory(dev)) in __reserve_free_pmem()
615 nd_region = to_nd_region(dev); in __reserve_free_pmem()
616 if (nd_region->ndr_mappings == 0) in __reserve_free_pmem()
620 strcat(label_id.id, "pmem-reserve"); in __reserve_free_pmem()
621 for (i = 0; i < nd_region->ndr_mappings; i++) { in __reserve_free_pmem()
622 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in __reserve_free_pmem()
625 if (nd_mapping->nvdimm != nvdimm) in __reserve_free_pmem()
632 dev_WARN_ONCE(&nd_region->dev, rem, in __reserve_free_pmem()
634 (unsigned long long) n - rem, in __reserve_free_pmem()
636 return rem ? -ENXIO : 0; in __reserve_free_pmem()
649 if (strcmp(res->name, "pmem-reserve") == 0) in release_free_pmem()
654 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
657 * @n: number of bytes per-dimm to add to the existing allocation
660 * BLK-only available DPA free space, then consume PMEM-aliased DPA
671 for (i = 0; i < nd_region->ndr_mappings; i++) { in grow_dpa_allocation()
672 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in grow_dpa_allocation()
677 dev_WARN_ONCE(&nd_region->dev, rem, in grow_dpa_allocation()
679 (unsigned long long) n - rem, in grow_dpa_allocation()
682 return -ENXIO; in grow_dpa_allocation()
695 struct resource *res = &nspm->nsio.res; in nd_namespace_pmem_set_resource()
698 if (size && !nspm->uuid) { in nd_namespace_pmem_set_resource()
703 if (size && nspm->uuid) { in nd_namespace_pmem_set_resource()
704 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in nd_namespace_pmem_set_resource()
714 nd_label_gen_id(&label_id, nspm->uuid, 0); in nd_namespace_pmem_set_resource()
718 if (strcmp(res->name, label_id.id) == 0) { in nd_namespace_pmem_set_resource()
719 offset = (res->start - nd_mapping->start) in nd_namespace_pmem_set_resource()
720 * nd_region->ndr_mappings; in nd_namespace_pmem_set_resource()
729 res->start = nd_region->ndr_start + offset; in nd_namespace_pmem_set_resource()
730 res->end = res->start + size - 1; in nd_namespace_pmem_set_resource()
733 static bool uuid_not_set(const uuid_t *uuid, struct device *dev, in uuid_not_set() argument
736 if (!uuid) { in uuid_not_set()
737 dev_dbg(dev, "%s: uuid not set\n", where); in uuid_not_set()
743 static ssize_t __size_store(struct device *dev, unsigned long long val) in __size_store() argument
746 struct nd_region *nd_region = to_nd_region(dev->parent); in __size_store()
747 struct nd_namespace_common *ndns = to_ndns(dev); in __size_store()
752 int rc, i, id = -1; in __size_store()
753 uuid_t *uuid = NULL; in __size_store() local
755 if (dev->driver || ndns->claim) in __size_store()
756 return -EBUSY; in __size_store()
758 if (is_namespace_pmem(dev)) { in __size_store()
759 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in __size_store()
761 uuid = nspm->uuid; in __size_store()
762 id = nspm->id; in __size_store()
766 * We need a uuid for the allocation-label and dimm(s) on which in __size_store()
769 if (uuid_not_set(uuid, dev, __func__)) in __size_store()
770 return -ENXIO; in __size_store()
771 if (nd_region->ndr_mappings == 0) { in __size_store()
772 dev_dbg(dev, "not associated with dimm(s)\n"); in __size_store()
773 return -ENXIO; in __size_store()
776 div_u64_rem(val, nd_region->align, &remainder); in __size_store()
778 dev_dbg(dev, "%llu is not %ldK aligned\n", val, in __size_store()
779 nd_region->align / SZ_1K); in __size_store()
780 return -EINVAL; in __size_store()
783 nd_label_gen_id(&label_id, uuid, flags); in __size_store()
784 for (i = 0; i < nd_region->ndr_mappings; i++) { in __size_store()
785 nd_mapping = &nd_region->mapping[i]; in __size_store()
793 return -ENXIO; in __size_store()
800 return -ENOSPC; in __size_store()
805 val = div_u64(val, nd_region->ndr_mappings); in __size_store()
806 allocated = div_u64(allocated, nd_region->ndr_mappings); in __size_store()
809 allocated - val); in __size_store()
811 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated); in __size_store()
816 if (is_namespace_pmem(dev)) { in __size_store()
817 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in __size_store()
820 val * nd_region->ndr_mappings); in __size_store()
829 if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim) in __size_store()
830 nd_device_unregister(dev, ND_ASYNC); in __size_store()
835 static ssize_t size_store(struct device *dev, in size_store() argument
838 struct nd_region *nd_region = to_nd_region(dev->parent); in size_store()
846 device_lock(dev); in size_store()
847 nvdimm_bus_lock(dev); in size_store()
848 wait_nvdimm_bus_probe_idle(dev); in size_store()
849 rc = __size_store(dev, val); in size_store()
851 rc = nd_namespace_label_update(nd_region, dev); in size_store()
854 if (rc == 0 && val == 0 && is_namespace_pmem(dev)) { in size_store()
855 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in size_store()
857 kfree(nspm->uuid); in size_store()
858 nspm->uuid = NULL; in size_store()
861 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); in size_store()
863 nvdimm_bus_unlock(dev); in size_store()
864 device_unlock(dev); in size_store()
871 struct device *dev = &ndns->dev; in __nvdimm_namespace_capacity() local
873 if (is_namespace_pmem(dev)) { in __nvdimm_namespace_capacity()
874 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in __nvdimm_namespace_capacity()
876 return resource_size(&nspm->nsio.res); in __nvdimm_namespace_capacity()
877 } else if (is_namespace_io(dev)) { in __nvdimm_namespace_capacity()
878 struct nd_namespace_io *nsio = to_nd_namespace_io(dev); in __nvdimm_namespace_capacity()
880 return resource_size(&nsio->res); in __nvdimm_namespace_capacity()
890 nvdimm_bus_lock(&ndns->dev); in nvdimm_namespace_capacity()
892 nvdimm_bus_unlock(&ndns->dev); in nvdimm_namespace_capacity()
902 struct device *dev = &ndns->dev; in nvdimm_namespace_locked() local
903 struct nd_region *nd_region = to_nd_region(dev->parent); in nvdimm_namespace_locked()
905 for (i = 0; i < nd_region->ndr_mappings; i++) { in nvdimm_namespace_locked()
906 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nvdimm_namespace_locked()
907 struct nvdimm *nvdimm = nd_mapping->nvdimm; in nvdimm_namespace_locked()
909 if (test_bit(NDD_LOCKED, &nvdimm->flags)) { in nvdimm_namespace_locked()
910 dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm)); in nvdimm_namespace_locked()
918 static ssize_t size_show(struct device *dev, in size_show() argument
922 nvdimm_namespace_capacity(to_ndns(dev))); in size_show()
926 static uuid_t *namespace_to_uuid(struct device *dev) in namespace_to_uuid() argument
928 if (is_namespace_pmem(dev)) { in namespace_to_uuid()
929 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in namespace_to_uuid()
931 return nspm->uuid; in namespace_to_uuid()
933 return ERR_PTR(-ENXIO); in namespace_to_uuid()
936 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, in uuid_show() argument
939 uuid_t *uuid = namespace_to_uuid(dev); in uuid_show() local
941 if (IS_ERR(uuid)) in uuid_show()
942 return PTR_ERR(uuid); in uuid_show()
943 if (uuid) in uuid_show()
944 return sprintf(buf, "%pUb\n", uuid); in uuid_show()
949 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
951 * @dev: namespace type for generating label_id
952 * @new_uuid: incoming uuid
953 * @old_uuid: reference to the uuid storage location in the namespace object
956 struct device *dev, uuid_t *new_uuid, in namespace_update_uuid() argument
963 if (!nd_is_uuid_unique(dev, new_uuid)) in namespace_update_uuid()
964 return -EINVAL; in namespace_update_uuid()
970 * If we've already written a label with this uuid, then it's in namespace_update_uuid()
971 * too late to rename because we can't reliably update the uuid in namespace_update_uuid()
973 * namespace to abandon the old uuid. in namespace_update_uuid()
975 for (i = 0; i < nd_region->ndr_mappings; i++) { in namespace_update_uuid()
976 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in namespace_update_uuid()
980 * would be NULL above if this uuid did not exist in the in namespace_update_uuid()
983 * FIXME: can we delete uuid with zero dpa allocated? in namespace_update_uuid()
985 if (list_empty(&nd_mapping->labels)) in namespace_update_uuid()
986 return -EBUSY; in namespace_update_uuid()
991 for (i = 0; i < nd_region->ndr_mappings; i++) { in namespace_update_uuid()
992 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in namespace_update_uuid()
998 if (strcmp(res->name, old_label_id.id) == 0) in namespace_update_uuid()
999 sprintf((void *) res->name, "%s", in namespace_update_uuid()
1002 mutex_lock(&nd_mapping->lock); in namespace_update_uuid()
1003 list_for_each_entry(label_ent, &nd_mapping->labels, list) { in namespace_update_uuid()
1004 struct nd_namespace_label *nd_label = label_ent->label; in namespace_update_uuid()
1006 uuid_t uuid; in namespace_update_uuid() local
1010 nsl_get_uuid(ndd, nd_label, &uuid); in namespace_update_uuid()
1011 nd_label_gen_id(&label_id, &uuid, in namespace_update_uuid()
1014 set_bit(ND_LABEL_REAP, &label_ent->flags); in namespace_update_uuid()
1016 mutex_unlock(&nd_mapping->lock); in namespace_update_uuid()
1024 static ssize_t uuid_store(struct device *dev, in uuid_store() argument
1027 struct nd_region *nd_region = to_nd_region(dev->parent); in uuid_store()
1028 uuid_t *uuid = NULL; in uuid_store() local
1032 if (is_namespace_pmem(dev)) { in uuid_store()
1033 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in uuid_store()
1035 ns_uuid = &nspm->uuid; in uuid_store()
1037 return -ENXIO; in uuid_store()
1039 device_lock(dev); in uuid_store()
1040 nvdimm_bus_lock(dev); in uuid_store()
1041 wait_nvdimm_bus_probe_idle(dev); in uuid_store()
1042 if (to_ndns(dev)->claim) in uuid_store()
1043 rc = -EBUSY; in uuid_store()
1045 rc = nd_uuid_store(dev, &uuid, buf, len); in uuid_store()
1047 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid); in uuid_store()
1049 rc = nd_namespace_label_update(nd_region, dev); in uuid_store()
1051 kfree(uuid); in uuid_store()
1052 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, in uuid_store()
1053 buf[len - 1] == '\n' ? "" : "\n"); in uuid_store()
1054 nvdimm_bus_unlock(dev); in uuid_store()
1055 device_unlock(dev); in uuid_store()
1059 static DEVICE_ATTR_RW(uuid);
1061 static ssize_t resource_show(struct device *dev, in resource_show() argument
1066 if (is_namespace_pmem(dev)) { in resource_show()
1067 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in resource_show()
1069 res = &nspm->nsio.res; in resource_show()
1070 } else if (is_namespace_io(dev)) { in resource_show()
1071 struct nd_namespace_io *nsio = to_nd_namespace_io(dev); in resource_show()
1073 res = &nsio->res; in resource_show()
1075 return -ENXIO; in resource_show()
1079 return -ENXIO; in resource_show()
1080 return sprintf(buf, "%#llx\n", (unsigned long long) res->start); in resource_show()
1086 static ssize_t sector_size_show(struct device *dev, in sector_size_show() argument
1089 if (is_namespace_pmem(dev)) { in sector_size_show()
1090 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in sector_size_show()
1092 return nd_size_select_show(nspm->lbasize, in sector_size_show()
1095 return -ENXIO; in sector_size_show()
1098 static ssize_t sector_size_store(struct device *dev, in sector_size_store() argument
1101 struct nd_region *nd_region = to_nd_region(dev->parent); in sector_size_store()
1106 if (is_namespace_pmem(dev)) { in sector_size_store()
1107 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in sector_size_store()
1109 lbasize = &nspm->lbasize; in sector_size_store()
1112 return -ENXIO; in sector_size_store()
1114 device_lock(dev); in sector_size_store()
1115 nvdimm_bus_lock(dev); in sector_size_store()
1116 if (to_ndns(dev)->claim) in sector_size_store()
1117 rc = -EBUSY; in sector_size_store()
1119 rc = nd_size_select_store(dev, buf, lbasize, supported); in sector_size_store()
1121 rc = nd_namespace_label_update(nd_region, dev); in sector_size_store()
1122 dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote", in sector_size_store()
1123 buf, buf[len - 1] == '\n' ? "" : "\n"); in sector_size_store()
1124 nvdimm_bus_unlock(dev); in sector_size_store()
1125 device_unlock(dev); in sector_size_store()
1131 static ssize_t dpa_extents_show(struct device *dev, in dpa_extents_show() argument
1134 struct nd_region *nd_region = to_nd_region(dev->parent); in dpa_extents_show()
1136 uuid_t *uuid = NULL; in dpa_extents_show() local
1140 nvdimm_bus_lock(dev); in dpa_extents_show()
1141 if (is_namespace_pmem(dev)) { in dpa_extents_show()
1142 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); in dpa_extents_show()
1144 uuid = nspm->uuid; in dpa_extents_show()
1148 if (!uuid) in dpa_extents_show()
1151 nd_label_gen_id(&label_id, uuid, flags); in dpa_extents_show()
1152 for (i = 0; i < nd_region->ndr_mappings; i++) { in dpa_extents_show()
1153 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in dpa_extents_show()
1158 if (strcmp(res->name, label_id.id) == 0) in dpa_extents_show()
1162 nvdimm_bus_unlock(dev); in dpa_extents_show()
1168 static int btt_claim_class(struct device *dev) in btt_claim_class() argument
1170 struct nd_region *nd_region = to_nd_region(dev->parent); in btt_claim_class()
1173 for (i = 0; i < nd_region->ndr_mappings; i++) { in btt_claim_class()
1174 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in btt_claim_class()
1187 nsindex = to_namespace_index(ndd, ndd->ns_current); in btt_claim_class()
1192 if (__le16_to_cpu(nsindex->major) == 1 in btt_claim_class()
1193 && __le16_to_cpu(nsindex->minor) == 1) in btt_claim_class()
1225 return -ENXIO; in btt_claim_class()
1229 static ssize_t holder_show(struct device *dev, in holder_show() argument
1232 struct nd_namespace_common *ndns = to_ndns(dev); in holder_show()
1235 device_lock(dev); in holder_show()
1236 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : ""); in holder_show()
1237 device_unlock(dev); in holder_show()
1243 static int __holder_class_store(struct device *dev, const char *buf) in __holder_class_store() argument
1245 struct nd_namespace_common *ndns = to_ndns(dev); in __holder_class_store()
1247 if (dev->driver || ndns->claim) in __holder_class_store()
1248 return -EBUSY; in __holder_class_store()
1251 int rc = btt_claim_class(dev); in __holder_class_store()
1255 ndns->claim_class = rc; in __holder_class_store()
1257 ndns->claim_class = NVDIMM_CCLASS_PFN; in __holder_class_store()
1259 ndns->claim_class = NVDIMM_CCLASS_DAX; in __holder_class_store()
1261 ndns->claim_class = NVDIMM_CCLASS_NONE; in __holder_class_store()
1263 return -EINVAL; in __holder_class_store()
1268 static ssize_t holder_class_store(struct device *dev, in holder_class_store() argument
1271 struct nd_region *nd_region = to_nd_region(dev->parent); in holder_class_store()
1274 device_lock(dev); in holder_class_store()
1275 nvdimm_bus_lock(dev); in holder_class_store()
1276 wait_nvdimm_bus_probe_idle(dev); in holder_class_store()
1277 rc = __holder_class_store(dev, buf); in holder_class_store()
1279 rc = nd_namespace_label_update(nd_region, dev); in holder_class_store()
1280 dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc); in holder_class_store()
1281 nvdimm_bus_unlock(dev); in holder_class_store()
1282 device_unlock(dev); in holder_class_store()
1287 static ssize_t holder_class_show(struct device *dev, in holder_class_show() argument
1290 struct nd_namespace_common *ndns = to_ndns(dev); in holder_class_show()
1293 device_lock(dev); in holder_class_show()
1294 if (ndns->claim_class == NVDIMM_CCLASS_NONE) in holder_class_show()
1296 else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) || in holder_class_show()
1297 (ndns->claim_class == NVDIMM_CCLASS_BTT2)) in holder_class_show()
1299 else if (ndns->claim_class == NVDIMM_CCLASS_PFN) in holder_class_show()
1301 else if (ndns->claim_class == NVDIMM_CCLASS_DAX) in holder_class_show()
1305 device_unlock(dev); in holder_class_show()
1311 static ssize_t mode_show(struct device *dev, in mode_show() argument
1314 struct nd_namespace_common *ndns = to_ndns(dev); in mode_show()
1319 device_lock(dev); in mode_show()
1320 claim = ndns->claim; in mode_show()
1327 else if (!claim && pmem_should_map_pages(dev)) in mode_show()
1332 device_unlock(dev); in mode_show()
1338 static ssize_t force_raw_store(struct device *dev, in force_raw_store() argument
1347 to_ndns(dev)->force_raw = force_raw; in force_raw_store()
1351 static ssize_t force_raw_show(struct device *dev, in force_raw_show() argument
1354 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw); in force_raw_show()
1376 struct device *dev = container_of(kobj, struct device, kobj); in namespace_visible() local
1378 if (is_namespace_pmem(dev)) { in namespace_visible()
1382 return a->mode; in namespace_visible()
1390 return a->mode; in namespace_visible()
1419 static bool is_namespace_pmem(const struct device *dev) in is_namespace_pmem() argument
1421 return dev ? dev->type == &namespace_pmem_device_type : false; in is_namespace_pmem()
1424 static bool is_namespace_io(const struct device *dev) in is_namespace_io() argument
1426 return dev ? dev->type == &namespace_io_device_type : false; in is_namespace_io()
1429 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) in nvdimm_namespace_common_probe() argument
1431 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL; in nvdimm_namespace_common_probe()
1432 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL; in nvdimm_namespace_common_probe()
1433 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL; in nvdimm_namespace_common_probe()
1439 ndns = nd_btt->ndns; in nvdimm_namespace_common_probe()
1441 ndns = nd_pfn->ndns; in nvdimm_namespace_common_probe()
1443 ndns = nd_dax->nd_pfn.ndns; in nvdimm_namespace_common_probe()
1446 return ERR_PTR(-ENODEV); in nvdimm_namespace_common_probe()
1449 * Flush any in-progess probes / removals in the driver in nvdimm_namespace_common_probe()
1452 device_lock(&ndns->dev); in nvdimm_namespace_common_probe()
1453 device_unlock(&ndns->dev); in nvdimm_namespace_common_probe()
1454 if (ndns->dev.driver) { in nvdimm_namespace_common_probe()
1455 dev_dbg(&ndns->dev, "is active, can't bind %s\n", in nvdimm_namespace_common_probe()
1456 dev_name(dev)); in nvdimm_namespace_common_probe()
1457 return ERR_PTR(-EBUSY); in nvdimm_namespace_common_probe()
1459 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev, in nvdimm_namespace_common_probe()
1461 dev_name(dev), in nvdimm_namespace_common_probe()
1462 dev_name(ndns->claim))) in nvdimm_namespace_common_probe()
1463 return ERR_PTR(-ENXIO); in nvdimm_namespace_common_probe()
1465 ndns = to_ndns(dev); in nvdimm_namespace_common_probe()
1466 if (ndns->claim) { in nvdimm_namespace_common_probe()
1467 dev_dbg(dev, "claimed by %s, failing probe\n", in nvdimm_namespace_common_probe()
1468 dev_name(ndns->claim)); in nvdimm_namespace_common_probe()
1470 return ERR_PTR(-ENXIO); in nvdimm_namespace_common_probe()
1475 return ERR_PTR(-EACCES); in nvdimm_namespace_common_probe()
1479 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n", in nvdimm_namespace_common_probe()
1481 return ERR_PTR(-ENODEV); in nvdimm_namespace_common_probe()
1489 if (pmem_should_map_pages(dev)) { in nvdimm_namespace_common_probe()
1490 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); in nvdimm_namespace_common_probe()
1491 struct resource *res = &nsio->res; in nvdimm_namespace_common_probe()
1493 if (!IS_ALIGNED(res->start | (res->end + 1), in nvdimm_namespace_common_probe()
1495 dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res); in nvdimm_namespace_common_probe()
1496 return ERR_PTR(-EOPNOTSUPP); in nvdimm_namespace_common_probe()
1500 if (is_namespace_pmem(&ndns->dev)) { in nvdimm_namespace_common_probe()
1503 nspm = to_nd_namespace_pmem(&ndns->dev); in nvdimm_namespace_common_probe()
1504 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__)) in nvdimm_namespace_common_probe()
1505 return ERR_PTR(-ENODEV); in nvdimm_namespace_common_probe()
1512 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns, in devm_namespace_enable() argument
1515 return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size); in devm_namespace_enable()
1519 void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns) in devm_namespace_disable() argument
1521 devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev)); in devm_namespace_disable()
1528 struct device *dev, **devs; in create_namespace_io() local
1541 dev = &nsio->common.dev; in create_namespace_io()
1542 dev->type = &namespace_io_device_type; in create_namespace_io()
1543 dev->parent = &nd_region->dev; in create_namespace_io()
1544 res = &nsio->res; in create_namespace_io()
1545 res->name = dev_name(&nd_region->dev); in create_namespace_io()
1546 res->flags = IORESOURCE_MEM; in create_namespace_io()
1547 res->start = nd_region->ndr_start; in create_namespace_io()
1548 res->end = res->start + nd_region->ndr_size - 1; in create_namespace_io()
1550 devs[0] = dev; in create_namespace_io()
1554 static bool has_uuid_at_pos(struct nd_region *nd_region, const uuid_t *uuid, in has_uuid_at_pos() argument
1560 for (i = 0; i < nd_region->ndr_mappings; i++) { in has_uuid_at_pos()
1561 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in has_uuid_at_pos()
1562 struct nd_interleave_set *nd_set = nd_region->nd_set; in has_uuid_at_pos()
1567 list_for_each_entry(label_ent, &nd_mapping->labels, list) { in has_uuid_at_pos()
1568 struct nd_namespace_label *nd_label = label_ent->label; in has_uuid_at_pos()
1578 if (!nsl_uuid_equal(ndd, nd_label, uuid)) in has_uuid_at_pos()
1582 &nd_set->type_guid)) in has_uuid_at_pos()
1586 dev_dbg(ndd->dev, "duplicate entry for uuid\n"); in has_uuid_at_pos()
1608 return -ENODEV; in select_pmem_id()
1610 for (i = 0; i < nd_region->ndr_mappings; i++) { in select_pmem_id()
1611 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in select_pmem_id()
1617 lockdep_assert_held(&nd_mapping->lock); in select_pmem_id()
1618 list_for_each_entry(label_ent, &nd_mapping->labels, list) { in select_pmem_id()
1619 nd_label = label_ent->label; in select_pmem_id()
1629 return -EINVAL; in select_pmem_id()
1636 hw_start = nd_mapping->start; in select_pmem_id()
1637 hw_end = hw_start + nd_mapping->size; in select_pmem_id()
1644 dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n", in select_pmem_id()
1645 dev_name(ndd->dev), in select_pmem_id()
1647 return -EINVAL; in select_pmem_id()
1651 list_move(&label_ent->list, &nd_mapping->labels); in select_pmem_id()
1657 * create_namespace_pmem - validate interleave set labelling, retrieve label0
1668 to_namespace_index(ndd, ndd->ns_current); in create_namespace_pmem()
1675 struct device *dev; in create_namespace_pmem() local
1676 uuid_t uuid; in create_namespace_pmem() local
1681 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n"); in create_namespace_pmem()
1682 return ERR_PTR(-ENXIO); in create_namespace_pmem()
1686 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n", in create_namespace_pmem()
1689 return ERR_PTR(-EAGAIN); in create_namespace_pmem()
1691 dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n", in create_namespace_pmem()
1697 return ERR_PTR(-ENOMEM); in create_namespace_pmem()
1699 nspm->id = -1; in create_namespace_pmem()
1700 dev = &nspm->nsio.common.dev; in create_namespace_pmem()
1701 dev->type = &namespace_pmem_device_type; in create_namespace_pmem()
1702 dev->parent = &nd_region->dev; in create_namespace_pmem()
1703 res = &nspm->nsio.res; in create_namespace_pmem()
1704 res->name = dev_name(&nd_region->dev); in create_namespace_pmem()
1705 res->flags = IORESOURCE_MEM; in create_namespace_pmem()
1707 for (i = 0; i < nd_region->ndr_mappings; i++) { in create_namespace_pmem()
1708 nsl_get_uuid(ndd, nd_label, &uuid); in create_namespace_pmem()
1709 if (has_uuid_at_pos(nd_region, &uuid, cookie, i)) in create_namespace_pmem()
1711 if (has_uuid_at_pos(nd_region, &uuid, altcookie, i)) in create_namespace_pmem()
1716 if (i < nd_region->ndr_mappings) { in create_namespace_pmem()
1717 struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm; in create_namespace_pmem()
1720 * Give up if we don't find an instance of a uuid at each in create_namespace_pmem()
1721 * position (from 0 to nd_region->ndr_mappings - 1), or if we in create_namespace_pmem()
1722 * find a dimm with two instances of the same uuid. in create_namespace_pmem()
1724 dev_err(&nd_region->dev, "%s missing label for %pUb\n", in create_namespace_pmem()
1726 rc = -EINVAL; in create_namespace_pmem()
1733 * check that the namespace aligns with interleave-set. in create_namespace_pmem()
1735 nsl_get_uuid(ndd, nd_label, &uuid); in create_namespace_pmem()
1736 rc = select_pmem_id(nd_region, &uuid); in create_namespace_pmem()
1741 for (i = 0; i < nd_region->ndr_mappings; i++) { in create_namespace_pmem()
1745 nd_mapping = &nd_region->mapping[i]; in create_namespace_pmem()
1746 label_ent = list_first_entry_or_null(&nd_mapping->labels, in create_namespace_pmem()
1748 label0 = label_ent ? label_ent->label : NULL; in create_namespace_pmem()
1759 WARN_ON(nspm->alt_name || nspm->uuid); in create_namespace_pmem()
1760 nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0), in create_namespace_pmem()
1762 nsl_get_uuid(ndd, label0, &uuid); in create_namespace_pmem()
1763 nspm->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL); in create_namespace_pmem()
1764 nspm->lbasize = nsl_get_lbasize(ndd, label0); in create_namespace_pmem()
1765 nspm->nsio.common.claim_class = in create_namespace_pmem()
1769 if (!nspm->alt_name || !nspm->uuid) { in create_namespace_pmem()
1770 rc = -ENOMEM; in create_namespace_pmem()
1776 return dev; in create_namespace_pmem()
1778 namespace_pmem_release(dev); in create_namespace_pmem()
1780 case -EINVAL: in create_namespace_pmem()
1781 dev_dbg(&nd_region->dev, "invalid label(s)\n"); in create_namespace_pmem()
1783 case -ENODEV: in create_namespace_pmem()
1784 dev_dbg(&nd_region->dev, "label not found\n"); in create_namespace_pmem()
1787 dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc); in create_namespace_pmem()
1797 struct device *dev; in nd_namespace_pmem_create() local
1799 if (!is_memory(&nd_region->dev)) in nd_namespace_pmem_create()
1806 dev = &nspm->nsio.common.dev; in nd_namespace_pmem_create()
1807 dev->type = &namespace_pmem_device_type; in nd_namespace_pmem_create()
1808 dev->parent = &nd_region->dev; in nd_namespace_pmem_create()
1809 res = &nspm->nsio.res; in nd_namespace_pmem_create()
1810 res->name = dev_name(&nd_region->dev); in nd_namespace_pmem_create()
1811 res->flags = IORESOURCE_MEM; in nd_namespace_pmem_create()
1813 nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL); in nd_namespace_pmem_create()
1814 if (nspm->id < 0) { in nd_namespace_pmem_create()
1818 dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id); in nd_namespace_pmem_create()
1821 return dev; in nd_namespace_pmem_create()
1828 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); in nd_region_create_ns_seed()
1833 nd_region->ns_seed = nd_namespace_pmem_create(nd_region); in nd_region_create_ns_seed()
1839 if (!nd_region->ns_seed) in nd_region_create_ns_seed()
1840 dev_err(&nd_region->dev, "failed to create namespace\n"); in nd_region_create_ns_seed()
1842 device_initialize(nd_region->ns_seed); in nd_region_create_ns_seed()
1843 lockdep_set_class(&nd_region->ns_seed->mutex, in nd_region_create_ns_seed()
1845 nd_device_register(nd_region->ns_seed); in nd_region_create_ns_seed()
1851 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); in nd_region_create_dax_seed()
1852 nd_region->dax_seed = nd_dax_create(nd_region); in nd_region_create_dax_seed()
1857 if (!nd_region->dax_seed) in nd_region_create_dax_seed()
1858 dev_err(&nd_region->dev, "failed to create dax namespace\n"); in nd_region_create_dax_seed()
1863 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); in nd_region_create_pfn_seed()
1864 nd_region->pfn_seed = nd_pfn_create(nd_region); in nd_region_create_pfn_seed()
1869 if (!nd_region->pfn_seed) in nd_region_create_pfn_seed()
1870 dev_err(&nd_region->dev, "failed to create pfn namespace\n"); in nd_region_create_pfn_seed()
1875 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); in nd_region_create_btt_seed()
1876 nd_region->btt_seed = nd_btt_create(nd_region); in nd_region_create_btt_seed()
1881 if (!nd_region->btt_seed) in nd_region_create_btt_seed()
1882 dev_err(&nd_region->dev, "failed to create btt namespace\n"); in nd_region_create_btt_seed()
1889 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in add_namespace_resource()
1894 uuid_t *uuid = namespace_to_uuid(devs[i]); in add_namespace_resource() local
1896 if (IS_ERR(uuid)) { in add_namespace_resource()
1901 if (!nsl_uuid_equal(ndd, nd_label, uuid)) in add_namespace_resource()
1903 dev_err(&nd_region->dev, in add_namespace_resource()
1904 "error: conflicting extents for uuid: %pUb\n", uuid); in add_namespace_resource()
1905 return -ENXIO; in add_namespace_resource()
1923 return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start, in cmp_dpa()
1930 struct device *dev, **devs; in scan_labels() local
1932 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in scan_labels()
1934 resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; in scan_labels()
1936 devs = kcalloc(2, sizeof(dev), GFP_KERNEL); in scan_labels()
1941 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { in scan_labels()
1942 struct nd_namespace_label *nd_label = label_ent->label; in scan_labels()
1949 if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start || in scan_labels()
1959 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); in scan_labels()
1962 memcpy(__devs, devs, sizeof(dev) * count); in scan_labels()
1967 dev = create_namespace_pmem(nd_region, nd_mapping, nd_label); in scan_labels()
1968 if (IS_ERR(dev)) { in scan_labels()
1969 switch (PTR_ERR(dev)) { in scan_labels()
1970 case -EAGAIN: in scan_labels()
1973 case -ENODEV: in scan_labels()
1980 devs[count++] = dev; in scan_labels()
1984 dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count, in scan_labels()
1990 /* Publish a zero-sized namespace for userspace to configure. */ in scan_labels()
1995 dev = &nspm->nsio.common.dev; in scan_labels()
1996 dev->type = &namespace_pmem_device_type; in scan_labels()
1998 dev->parent = &nd_region->dev; in scan_labels()
1999 devs[count++] = dev; in scan_labels()
2000 } else if (is_memory(&nd_region->dev)) { in scan_labels()
2002 for (i = 0; i < nd_region->ndr_mappings; i++) { in scan_labels()
2007 nd_mapping = &nd_region->mapping[i]; in scan_labels()
2008 if (list_empty(&nd_mapping->labels)) { in scan_labels()
2014 list_for_each_safe(l, e, &nd_mapping->labels) { in scan_labels()
2015 if (!j--) in scan_labels()
2020 list_splice_init(&list, &nd_mapping->labels); in scan_labels()
2043 if (nd_region->ndr_mappings == 0) in create_namespaces()
2047 for (i = 0; i < nd_region->ndr_mappings; i++) { in create_namespaces()
2048 nd_mapping = &nd_region->mapping[i]; in create_namespaces()
2049 mutex_lock_nested(&nd_mapping->lock, i); in create_namespaces()
2054 for (i = 0; i < nd_region->ndr_mappings; i++) { in create_namespaces()
2055 int reverse = nd_region->ndr_mappings - 1 - i; in create_namespaces()
2057 nd_mapping = &nd_region->mapping[reverse]; in create_namespaces()
2058 mutex_unlock(&nd_mapping->lock); in create_namespaces()
2069 for (i = 0; i < nd_region->ndr_mappings; i++) { in deactivate_labels()
2070 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in deactivate_labels()
2071 struct nvdimm_drvdata *ndd = nd_mapping->ndd; in deactivate_labels()
2072 struct nvdimm *nvdimm = nd_mapping->nvdimm; in deactivate_labels()
2074 mutex_lock(&nd_mapping->lock); in deactivate_labels()
2076 mutex_unlock(&nd_mapping->lock); in deactivate_labels()
2079 nd_mapping->ndd = NULL; in deactivate_labels()
2081 atomic_dec(&nvdimm->busy); in deactivate_labels()
2089 for (i = 0; i < nd_region->ndr_mappings; i++) { in init_active_labels()
2090 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in init_active_labels()
2092 struct nvdimm *nvdimm = nd_mapping->nvdimm; in init_active_labels()
2101 if (test_bit(NDD_LOCKED, &nvdimm->flags)) in init_active_labels()
2103 else if (test_bit(NDD_LABELING, &nvdimm->flags)) in init_active_labels()
2108 dev_err(&nd_region->dev, "%s: is %s, failing probe\n", in init_active_labels()
2109 dev_name(&nd_mapping->nvdimm->dev), in init_active_labels()
2110 test_bit(NDD_LOCKED, &nvdimm->flags) in init_active_labels()
2112 rc = -ENXIO; in init_active_labels()
2115 nd_mapping->ndd = ndd; in init_active_labels()
2116 atomic_inc(&nvdimm->busy); in init_active_labels()
2120 dev_dbg(ndd->dev, "count: %d\n", count); in init_active_labels()
2130 label_ent->label = label; in init_active_labels()
2132 mutex_lock(&nd_mapping->lock); in init_active_labels()
2133 list_add_tail(&label_ent->list, &nd_mapping->labels); in init_active_labels()
2134 mutex_unlock(&nd_mapping->lock); in init_active_labels()
2141 if (i < nd_region->ndr_mappings) in init_active_labels()
2142 rc = -ENOMEM; in init_active_labels()
2150 return devm_add_action_or_reset(&nd_region->dev, deactivate_labels, in init_active_labels()
2160 nvdimm_bus_lock(&nd_region->dev); in nd_region_register_namespaces()
2163 nvdimm_bus_unlock(&nd_region->dev); in nd_region_register_namespaces()
2178 nvdimm_bus_unlock(&nd_region->dev); in nd_region_register_namespaces()
2181 return -ENODEV; in nd_region_register_namespaces()
2184 struct device *dev = devs[i]; in nd_region_register_namespaces() local
2190 nspm = to_nd_namespace_pmem(dev); in nd_region_register_namespaces()
2191 id = ida_simple_get(&nd_region->ns_ida, 0, 0, in nd_region_register_namespaces()
2193 nspm->id = id; in nd_region_register_namespaces()
2199 dev_set_name(dev, "namespace%d.%d", nd_region->id, id); in nd_region_register_namespaces()
2200 device_initialize(dev); in nd_region_register_namespaces()
2201 lockdep_set_class(&dev->mutex, &nvdimm_namespace_key); in nd_region_register_namespaces()
2202 nd_device_register(dev); in nd_region_register_namespaces()
2205 nd_region->ns_seed = devs[0]; in nd_region_register_namespaces()
2211 struct device *dev = devs[j]; in nd_region_register_namespaces() local
2213 device_initialize(dev); in nd_region_register_namespaces()
2214 put_device(dev); in nd_region_register_namespaces()
2216 *err = j - i; in nd_region_register_namespaces()
2222 rc = -ENODEV; in nd_region_register_namespaces()
2226 if (rc == -ENODEV) in nd_region_register_namespaces()