1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/device.h> 14 #include <linux/sizes.h> 15 #include <linux/pmem.h> 16 #include "nd-core.h" 17 #include "pfn.h" 18 #include "btt.h" 19 #include "nd.h" 20 21 void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns) 22 { 23 struct nd_namespace_common *ndns = *_ndns; 24 struct nvdimm_bus *nvdimm_bus; 25 26 if (!ndns) 27 return; 28 29 nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev); 30 lockdep_assert_held(&nvdimm_bus->reconfig_mutex); 31 dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__); 32 ndns->claim = NULL; 33 *_ndns = NULL; 34 put_device(&ndns->dev); 35 } 36 37 void nd_detach_ndns(struct device *dev, 38 struct nd_namespace_common **_ndns) 39 { 40 struct nd_namespace_common *ndns = *_ndns; 41 42 if (!ndns) 43 return; 44 get_device(&ndns->dev); 45 nvdimm_bus_lock(&ndns->dev); 46 __nd_detach_ndns(dev, _ndns); 47 nvdimm_bus_unlock(&ndns->dev); 48 put_device(&ndns->dev); 49 } 50 51 bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, 52 struct nd_namespace_common **_ndns) 53 { 54 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev); 55 56 if (attach->claim) 57 return false; 58 lockdep_assert_held(&nvdimm_bus->reconfig_mutex); 59 dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__); 60 attach->claim = dev; 61 *_ndns = attach; 62 get_device(&attach->dev); 63 return true; 64 } 65 66 bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, 67 struct nd_namespace_common **_ndns) 68 { 69 bool claimed; 70 71 nvdimm_bus_lock(&attach->dev); 72 claimed = __nd_attach_ndns(dev, attach, _ndns); 73 nvdimm_bus_unlock(&attach->dev); 74 return claimed; 75 } 76 77 static int namespace_match(struct device *dev, void *data) 78 { 79 char *name = data; 80 81 return strcmp(name, dev_name(dev)) == 0; 82 } 83 84 static bool is_idle(struct device *dev, struct nd_namespace_common *ndns) 85 { 86 struct nd_region *nd_region = to_nd_region(dev->parent); 87 struct device *seed = NULL; 88 89 if (is_nd_btt(dev)) 90 seed = nd_region->btt_seed; 91 else if (is_nd_pfn(dev)) 92 seed = nd_region->pfn_seed; 93 else if (is_nd_dax(dev)) 94 seed = nd_region->dax_seed; 95 96 if (seed == dev || ndns || dev->driver) 97 return false; 98 return true; 99 } 100 101 struct nd_pfn *to_nd_pfn_safe(struct device *dev) 102 { 103 /* 104 * pfn device attributes are re-used by dax device instances, so we 105 * need to be careful to correct device-to-nd_pfn conversion. 106 */ 107 if (is_nd_pfn(dev)) 108 return to_nd_pfn(dev); 109 110 if (is_nd_dax(dev)) { 111 struct nd_dax *nd_dax = to_nd_dax(dev); 112 113 return &nd_dax->nd_pfn; 114 } 115 116 WARN_ON(1); 117 return NULL; 118 } 119 120 static void nd_detach_and_reset(struct device *dev, 121 struct nd_namespace_common **_ndns) 122 { 123 /* detach the namespace and destroy / reset the device */ 124 __nd_detach_ndns(dev, _ndns); 125 if (is_idle(dev, *_ndns)) { 126 nd_device_unregister(dev, ND_ASYNC); 127 } else if (is_nd_btt(dev)) { 128 struct nd_btt *nd_btt = to_nd_btt(dev); 129 130 nd_btt->lbasize = 0; 131 kfree(nd_btt->uuid); 132 nd_btt->uuid = NULL; 133 } else if (is_nd_pfn(dev) || is_nd_dax(dev)) { 134 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 135 136 kfree(nd_pfn->uuid); 137 nd_pfn->uuid = NULL; 138 nd_pfn->mode = PFN_MODE_NONE; 139 } 140 } 141 142 ssize_t nd_namespace_store(struct device *dev, 143 struct nd_namespace_common **_ndns, const char *buf, 144 size_t len) 145 { 146 struct nd_namespace_common *ndns; 147 struct device *found; 148 char *name; 149 150 if (dev->driver) { 151 dev_dbg(dev, "%s: -EBUSY\n", __func__); 152 return -EBUSY; 153 } 154 155 name = kstrndup(buf, len, GFP_KERNEL); 156 if (!name) 157 return -ENOMEM; 158 strim(name); 159 160 if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0) 161 /* pass */; 162 else { 163 len = -EINVAL; 164 goto out; 165 } 166 167 ndns = *_ndns; 168 if (strcmp(name, "") == 0) { 169 nd_detach_and_reset(dev, _ndns); 170 goto out; 171 } else if (ndns) { 172 dev_dbg(dev, "namespace already set to: %s\n", 173 dev_name(&ndns->dev)); 174 len = -EBUSY; 175 goto out; 176 } 177 178 found = device_find_child(dev->parent, name, namespace_match); 179 if (!found) { 180 dev_dbg(dev, "'%s' not found under %s\n", name, 181 dev_name(dev->parent)); 182 len = -ENODEV; 183 goto out; 184 } 185 186 ndns = to_ndns(found); 187 if (__nvdimm_namespace_capacity(ndns) < SZ_16M) { 188 dev_dbg(dev, "%s too small to host\n", name); 189 len = -ENXIO; 190 goto out_attach; 191 } 192 193 WARN_ON_ONCE(!is_nvdimm_bus_locked(dev)); 194 if (!__nd_attach_ndns(dev, ndns, _ndns)) { 195 dev_dbg(dev, "%s already claimed\n", 196 dev_name(&ndns->dev)); 197 len = -EBUSY; 198 } 199 200 out_attach: 201 put_device(&ndns->dev); /* from device_find_child */ 202 out: 203 kfree(name); 204 return len; 205 } 206 207 /* 208 * nd_sb_checksum: compute checksum for a generic info block 209 * 210 * Returns a fletcher64 checksum of everything in the given info block 211 * except the last field (since that's where the checksum lives). 212 */ 213 u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb) 214 { 215 u64 sum; 216 __le64 sum_save; 217 218 BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K); 219 BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K); 220 BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K); 221 222 sum_save = nd_gen_sb->checksum; 223 nd_gen_sb->checksum = 0; 224 sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1); 225 nd_gen_sb->checksum = sum_save; 226 return sum; 227 } 228 EXPORT_SYMBOL(nd_sb_checksum); 229 230 static int nsio_rw_bytes(struct nd_namespace_common *ndns, 231 resource_size_t offset, void *buf, size_t size, int rw, 232 unsigned long flags) 233 { 234 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 235 unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); 236 sector_t sector = offset >> 9; 237 int rc = 0; 238 239 if (unlikely(!size)) 240 return 0; 241 242 if (unlikely(offset + size > nsio->size)) { 243 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n"); 244 return -EFAULT; 245 } 246 247 if (rw == READ) { 248 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) 249 return -EIO; 250 return memcpy_mcsafe(buf, nsio->addr + offset, size); 251 } 252 253 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { 254 /* 255 * FIXME: nsio_rw_bytes() may be called from atomic 256 * context in the btt case and the ACPI DSM path for 257 * clearing the error takes sleeping locks and allocates 258 * memory. An explicit error clearing path, and support 259 * for tracking badblocks in BTT metadata is needed to 260 * work around this collision. 261 */ 262 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) 263 && !(flags & NVDIMM_IO_ATOMIC) 264 && !ndns->claim) { 265 long cleared; 266 267 cleared = nvdimm_clear_poison(&ndns->dev, 268 nsio->res.start + offset, size); 269 if (cleared < size) 270 rc = -EIO; 271 if (cleared > 0 && cleared / 512) { 272 cleared /= 512; 273 badblocks_clear(&nsio->bb, sector, cleared); 274 } 275 invalidate_pmem(nsio->addr + offset, size); 276 } else 277 rc = -EIO; 278 } 279 280 memcpy_to_pmem(nsio->addr + offset, buf, size); 281 nvdimm_flush(to_nd_region(ndns->dev.parent)); 282 283 return rc; 284 } 285 286 int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio) 287 { 288 struct resource *res = &nsio->res; 289 struct nd_namespace_common *ndns = &nsio->common; 290 291 nsio->size = resource_size(res); 292 if (!devm_request_mem_region(dev, res->start, resource_size(res), 293 dev_name(&ndns->dev))) { 294 dev_warn(dev, "could not reserve region %pR\n", res); 295 return -EBUSY; 296 } 297 298 ndns->rw_bytes = nsio_rw_bytes; 299 if (devm_init_badblocks(dev, &nsio->bb)) 300 return -ENOMEM; 301 nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb, 302 &nsio->res); 303 304 nsio->addr = devm_memremap(dev, res->start, resource_size(res), 305 ARCH_MEMREMAP_PMEM); 306 307 return PTR_ERR_OR_ZERO(nsio->addr); 308 } 309 EXPORT_SYMBOL_GPL(devm_nsio_enable); 310 311 void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio) 312 { 313 struct resource *res = &nsio->res; 314 315 devm_memunmap(dev, nsio->addr); 316 devm_exit_badblocks(dev, &nsio->bb); 317 devm_release_mem_region(dev, res->start, resource_size(res)); 318 } 319 EXPORT_SYMBOL_GPL(devm_nsio_disable); 320