1 /* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/device.h> 14 #include <linux/sizes.h> 15 #include "nd-core.h" 16 #include "pmem.h" 17 #include "pfn.h" 18 #include "btt.h" 19 #include "nd.h" 20 21 void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns) 22 { 23 struct nd_namespace_common *ndns = *_ndns; 24 struct nvdimm_bus *nvdimm_bus; 25 26 if (!ndns) 27 return; 28 29 nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev); 30 lockdep_assert_held(&nvdimm_bus->reconfig_mutex); 31 dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__); 32 ndns->claim = NULL; 33 *_ndns = NULL; 34 put_device(&ndns->dev); 35 } 36 37 void nd_detach_ndns(struct device *dev, 38 struct nd_namespace_common **_ndns) 39 { 40 struct nd_namespace_common *ndns = *_ndns; 41 42 if (!ndns) 43 return; 44 get_device(&ndns->dev); 45 nvdimm_bus_lock(&ndns->dev); 46 __nd_detach_ndns(dev, _ndns); 47 nvdimm_bus_unlock(&ndns->dev); 48 put_device(&ndns->dev); 49 } 50 51 bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, 52 struct nd_namespace_common **_ndns) 53 { 54 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev); 55 56 if (attach->claim) 57 return false; 58 lockdep_assert_held(&nvdimm_bus->reconfig_mutex); 59 dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__); 60 attach->claim = dev; 61 *_ndns = attach; 62 get_device(&attach->dev); 63 return true; 64 } 65 66 bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, 67 struct nd_namespace_common **_ndns) 68 { 69 bool claimed; 70 71 nvdimm_bus_lock(&attach->dev); 72 claimed = __nd_attach_ndns(dev, attach, _ndns); 73 nvdimm_bus_unlock(&attach->dev); 74 return claimed; 75 } 76 77 static int namespace_match(struct device *dev, void *data) 78 { 79 char *name = data; 80 81 return strcmp(name, dev_name(dev)) == 0; 82 } 83 84 static bool is_idle(struct device *dev, struct nd_namespace_common *ndns) 85 { 86 struct nd_region *nd_region = to_nd_region(dev->parent); 87 struct device *seed = NULL; 88 89 if (is_nd_btt(dev)) 90 seed = nd_region->btt_seed; 91 else if (is_nd_pfn(dev)) 92 seed = nd_region->pfn_seed; 93 else if (is_nd_dax(dev)) 94 seed = nd_region->dax_seed; 95 96 if (seed == dev || ndns || dev->driver) 97 return false; 98 return true; 99 } 100 101 struct nd_pfn *to_nd_pfn_safe(struct device *dev) 102 { 103 /* 104 * pfn device attributes are re-used by dax device instances, so we 105 * need to be careful to correct device-to-nd_pfn conversion. 106 */ 107 if (is_nd_pfn(dev)) 108 return to_nd_pfn(dev); 109 110 if (is_nd_dax(dev)) { 111 struct nd_dax *nd_dax = to_nd_dax(dev); 112 113 return &nd_dax->nd_pfn; 114 } 115 116 WARN_ON(1); 117 return NULL; 118 } 119 120 static void nd_detach_and_reset(struct device *dev, 121 struct nd_namespace_common **_ndns) 122 { 123 /* detach the namespace and destroy / reset the device */ 124 __nd_detach_ndns(dev, _ndns); 125 if (is_idle(dev, *_ndns)) { 126 nd_device_unregister(dev, ND_ASYNC); 127 } else if (is_nd_btt(dev)) { 128 struct nd_btt *nd_btt = to_nd_btt(dev); 129 130 nd_btt->lbasize = 0; 131 kfree(nd_btt->uuid); 132 nd_btt->uuid = NULL; 133 } else if (is_nd_pfn(dev) || is_nd_dax(dev)) { 134 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 135 136 kfree(nd_pfn->uuid); 137 nd_pfn->uuid = NULL; 138 nd_pfn->mode = PFN_MODE_NONE; 139 } 140 } 141 142 ssize_t nd_namespace_store(struct device *dev, 143 struct nd_namespace_common **_ndns, const char *buf, 144 size_t len) 145 { 146 struct nd_namespace_common *ndns; 147 struct device *found; 148 char *name; 149 150 if (dev->driver) { 151 dev_dbg(dev, "namespace already active\n"); 152 return -EBUSY; 153 } 154 155 name = kstrndup(buf, len, GFP_KERNEL); 156 if (!name) 157 return -ENOMEM; 158 strim(name); 159 160 if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0) 161 /* pass */; 162 else { 163 len = -EINVAL; 164 goto out; 165 } 166 167 ndns = *_ndns; 168 if (strcmp(name, "") == 0) { 169 nd_detach_and_reset(dev, _ndns); 170 goto out; 171 } else if (ndns) { 172 dev_dbg(dev, "namespace already set to: %s\n", 173 dev_name(&ndns->dev)); 174 len = -EBUSY; 175 goto out; 176 } 177 178 found = device_find_child(dev->parent, name, namespace_match); 179 if (!found) { 180 dev_dbg(dev, "'%s' not found under %s\n", name, 181 dev_name(dev->parent)); 182 len = -ENODEV; 183 goto out; 184 } 185 186 ndns = to_ndns(found); 187 188 switch (ndns->claim_class) { 189 case NVDIMM_CCLASS_NONE: 190 break; 191 case NVDIMM_CCLASS_BTT: 192 case NVDIMM_CCLASS_BTT2: 193 if (!is_nd_btt(dev)) { 194 len = -EBUSY; 195 goto out_attach; 196 } 197 break; 198 case NVDIMM_CCLASS_PFN: 199 if (!is_nd_pfn(dev)) { 200 len = -EBUSY; 201 goto out_attach; 202 } 203 break; 204 case NVDIMM_CCLASS_DAX: 205 if (!is_nd_dax(dev)) { 206 len = -EBUSY; 207 goto out_attach; 208 } 209 break; 210 default: 211 len = -EBUSY; 212 goto out_attach; 213 break; 214 } 215 216 if (__nvdimm_namespace_capacity(ndns) < SZ_16M) { 217 dev_dbg(dev, "%s too small to host\n", name); 218 len = -ENXIO; 219 goto out_attach; 220 } 221 222 WARN_ON_ONCE(!is_nvdimm_bus_locked(dev)); 223 if (!__nd_attach_ndns(dev, ndns, _ndns)) { 224 dev_dbg(dev, "%s already claimed\n", 225 dev_name(&ndns->dev)); 226 len = -EBUSY; 227 } 228 229 out_attach: 230 put_device(&ndns->dev); /* from device_find_child */ 231 out: 232 kfree(name); 233 return len; 234 } 235 236 /* 237 * nd_sb_checksum: compute checksum for a generic info block 238 * 239 * Returns a fletcher64 checksum of everything in the given info block 240 * except the last field (since that's where the checksum lives). 241 */ 242 u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb) 243 { 244 u64 sum; 245 __le64 sum_save; 246 247 BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K); 248 BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K); 249 BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K); 250 251 sum_save = nd_gen_sb->checksum; 252 nd_gen_sb->checksum = 0; 253 sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1); 254 nd_gen_sb->checksum = sum_save; 255 return sum; 256 } 257 EXPORT_SYMBOL(nd_sb_checksum); 258 259 static int nsio_rw_bytes(struct nd_namespace_common *ndns, 260 resource_size_t offset, void *buf, size_t size, int rw, 261 unsigned long flags) 262 { 263 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 264 unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); 265 sector_t sector = offset >> 9; 266 int rc = 0; 267 268 if (unlikely(!size)) 269 return 0; 270 271 if (unlikely(offset + size > nsio->size)) { 272 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n"); 273 return -EFAULT; 274 } 275 276 if (rw == READ) { 277 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) 278 return -EIO; 279 if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0) 280 return -EIO; 281 return 0; 282 } 283 284 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { 285 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) 286 && !(flags & NVDIMM_IO_ATOMIC)) { 287 long cleared; 288 289 might_sleep(); 290 cleared = nvdimm_clear_poison(&ndns->dev, 291 nsio->res.start + offset, size); 292 if (cleared < size) 293 rc = -EIO; 294 if (cleared > 0 && cleared / 512) { 295 cleared /= 512; 296 badblocks_clear(&nsio->bb, sector, cleared); 297 } 298 arch_invalidate_pmem(nsio->addr + offset, size); 299 } else 300 rc = -EIO; 301 } 302 303 memcpy_flushcache(nsio->addr + offset, buf, size); 304 nvdimm_flush(to_nd_region(ndns->dev.parent)); 305 306 return rc; 307 } 308 309 int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio) 310 { 311 struct resource *res = &nsio->res; 312 struct nd_namespace_common *ndns = &nsio->common; 313 314 nsio->size = resource_size(res); 315 if (!devm_request_mem_region(dev, res->start, resource_size(res), 316 dev_name(&ndns->dev))) { 317 dev_warn(dev, "could not reserve region %pR\n", res); 318 return -EBUSY; 319 } 320 321 ndns->rw_bytes = nsio_rw_bytes; 322 if (devm_init_badblocks(dev, &nsio->bb)) 323 return -ENOMEM; 324 nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb, 325 &nsio->res); 326 327 nsio->addr = devm_memremap(dev, res->start, resource_size(res), 328 ARCH_MEMREMAP_PMEM); 329 330 return PTR_ERR_OR_ZERO(nsio->addr); 331 } 332 EXPORT_SYMBOL_GPL(devm_nsio_enable); 333 334 void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio) 335 { 336 struct resource *res = &nsio->res; 337 338 devm_memunmap(dev, nsio->addr); 339 devm_exit_badblocks(dev, &nsio->bb); 340 devm_release_mem_region(dev, res->start, resource_size(res)); 341 } 342 EXPORT_SYMBOL_GPL(devm_nsio_disable); 343