1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 */ 5 #ifndef __ND_H__ 6 #define __ND_H__ 7 #include <linux/libnvdimm.h> 8 #include <linux/badblocks.h> 9 #include <linux/blkdev.h> 10 #include <linux/device.h> 11 #include <linux/mutex.h> 12 #include <linux/ndctl.h> 13 #include <linux/types.h> 14 #include <linux/nd.h> 15 #include "label.h" 16 17 enum { 18 /* 19 * Limits the maximum number of block apertures a dimm can 20 * support and is an input to the geometry/on-disk-format of a 21 * BTT instance 22 */ 23 ND_MAX_LANES = 256, 24 INT_LBASIZE_ALIGNMENT = 64, 25 NVDIMM_IO_ATOMIC = 1, 26 }; 27 28 struct nvdimm_drvdata { 29 struct device *dev; 30 int nslabel_size; 31 struct nd_cmd_get_config_size nsarea; 32 void *data; 33 int ns_current, ns_next; 34 struct resource dpa; 35 struct kref kref; 36 }; 37 38 static inline const u8 *nsl_ref_name(struct nvdimm_drvdata *ndd, 39 struct nd_namespace_label *nd_label) 40 { 41 return nd_label->name; 42 } 43 44 static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd, 45 struct nd_namespace_label *nd_label, u8 *name) 46 { 47 return memcpy(name, nd_label->name, NSLABEL_NAME_LEN); 48 } 49 50 static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd, 51 struct nd_namespace_label *nd_label, u8 *name) 52 { 53 if (!name) 54 return NULL; 55 return memcpy(nd_label->name, name, NSLABEL_NAME_LEN); 56 } 57 58 static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd, 59 struct nd_namespace_label *nd_label) 60 { 61 return __le32_to_cpu(nd_label->slot); 62 } 63 64 static inline void nsl_set_slot(struct nvdimm_drvdata *ndd, 65 struct nd_namespace_label *nd_label, u32 slot) 66 { 67 nd_label->slot = __cpu_to_le32(slot); 68 } 69 70 static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd, 71 struct nd_namespace_label *nd_label) 72 { 73 return __le64_to_cpu(nd_label->checksum); 74 } 75 76 static inline void nsl_set_checksum(struct nvdimm_drvdata *ndd, 77 struct nd_namespace_label *nd_label, 78 u64 checksum) 79 { 80 nd_label->checksum = __cpu_to_le64(checksum); 81 } 82 83 static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd, 84 struct nd_namespace_label *nd_label) 85 { 86 return __le32_to_cpu(nd_label->flags); 87 } 88 89 static inline void nsl_set_flags(struct nvdimm_drvdata *ndd, 90 struct nd_namespace_label *nd_label, u32 flags) 91 { 92 nd_label->flags = __cpu_to_le32(flags); 93 } 94 95 static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd, 96 struct nd_namespace_label *nd_label) 97 { 98 return __le64_to_cpu(nd_label->dpa); 99 } 100 101 static inline void nsl_set_dpa(struct nvdimm_drvdata *ndd, 102 struct nd_namespace_label *nd_label, u64 dpa) 103 { 104 nd_label->dpa = __cpu_to_le64(dpa); 105 } 106 107 static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd, 108 struct nd_namespace_label *nd_label) 109 { 110 return __le64_to_cpu(nd_label->rawsize); 111 } 112 113 static inline void nsl_set_rawsize(struct nvdimm_drvdata *ndd, 114 struct nd_namespace_label *nd_label, 115 u64 rawsize) 116 { 117 nd_label->rawsize = __cpu_to_le64(rawsize); 118 } 119 120 static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd, 121 struct nd_namespace_label *nd_label) 122 { 123 return __le64_to_cpu(nd_label->isetcookie); 124 } 125 126 static inline void nsl_set_isetcookie(struct nvdimm_drvdata *ndd, 127 struct nd_namespace_label *nd_label, 128 u64 isetcookie) 129 { 130 nd_label->isetcookie = __cpu_to_le64(isetcookie); 131 } 132 133 static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd, 134 struct nd_namespace_label *nd_label, 135 u64 cookie) 136 { 137 return cookie == __le64_to_cpu(nd_label->isetcookie); 138 } 139 140 static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd, 141 struct nd_namespace_label *nd_label) 142 { 143 return __le16_to_cpu(nd_label->position); 144 } 145 146 static inline void nsl_set_position(struct nvdimm_drvdata *ndd, 147 struct nd_namespace_label *nd_label, 148 u16 position) 149 { 150 nd_label->position = __cpu_to_le16(position); 151 } 152 153 154 static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd, 155 struct nd_namespace_label *nd_label) 156 { 157 return __le16_to_cpu(nd_label->nlabel); 158 } 159 160 static inline void nsl_set_nlabel(struct nvdimm_drvdata *ndd, 161 struct nd_namespace_label *nd_label, 162 u16 nlabel) 163 { 164 nd_label->nlabel = __cpu_to_le16(nlabel); 165 } 166 167 static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd, 168 struct nd_namespace_label *nd_label) 169 { 170 return __le64_to_cpu(nd_label->lbasize); 171 } 172 173 static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd, 174 struct nd_namespace_label *nd_label, 175 u64 lbasize) 176 { 177 nd_label->lbasize = __cpu_to_le64(lbasize); 178 } 179 180 bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, 181 struct nd_namespace_label *nd_label, 182 u64 isetcookie); 183 184 struct nd_region_data { 185 int ns_count; 186 int ns_active; 187 unsigned int hints_shift; 188 void __iomem *flush_wpq[]; 189 }; 190 191 static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd, 192 int dimm, int hint) 193 { 194 unsigned int num = 1 << ndrd->hints_shift; 195 unsigned int mask = num - 1; 196 197 return ndrd->flush_wpq[dimm * num + (hint & mask)]; 198 } 199 200 static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm, 201 int hint, void __iomem *flush) 202 { 203 unsigned int num = 1 << ndrd->hints_shift; 204 unsigned int mask = num - 1; 205 206 ndrd->flush_wpq[dimm * num + (hint & mask)] = flush; 207 } 208 209 static inline struct nd_namespace_index *to_namespace_index( 210 struct nvdimm_drvdata *ndd, int i) 211 { 212 if (i < 0) 213 return NULL; 214 215 return ndd->data + sizeof_namespace_index(ndd) * i; 216 } 217 218 static inline struct nd_namespace_index *to_current_namespace_index( 219 struct nvdimm_drvdata *ndd) 220 { 221 return to_namespace_index(ndd, ndd->ns_current); 222 } 223 224 static inline struct nd_namespace_index *to_next_namespace_index( 225 struct nvdimm_drvdata *ndd) 226 { 227 return to_namespace_index(ndd, ndd->ns_next); 228 } 229 230 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd); 231 232 #define namespace_label_has(ndd, field) \ 233 (offsetof(struct nd_namespace_label, field) \ 234 < sizeof_namespace_label(ndd)) 235 236 #define nd_dbg_dpa(r, d, res, fmt, arg...) \ 237 dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \ 238 (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \ 239 (unsigned long long) (res ? resource_size(res) : 0), \ 240 (unsigned long long) (res ? res->start : 0), ##arg) 241 242 #define for_each_dpa_resource(ndd, res) \ 243 for (res = (ndd)->dpa.child; res; res = res->sibling) 244 245 #define for_each_dpa_resource_safe(ndd, res, next) \ 246 for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \ 247 res; res = next, next = next ? next->sibling : NULL) 248 249 struct nd_percpu_lane { 250 int count; 251 spinlock_t lock; 252 }; 253 254 enum nd_label_flags { 255 ND_LABEL_REAP, 256 }; 257 struct nd_label_ent { 258 struct list_head list; 259 unsigned long flags; 260 struct nd_namespace_label *label; 261 }; 262 263 enum nd_mapping_lock_class { 264 ND_MAPPING_CLASS0, 265 ND_MAPPING_UUID_SCAN, 266 }; 267 268 struct nd_mapping { 269 struct nvdimm *nvdimm; 270 u64 start; 271 u64 size; 272 int position; 273 struct list_head labels; 274 struct mutex lock; 275 /* 276 * @ndd is for private use at region enable / disable time for 277 * get_ndd() + put_ndd(), all other nd_mapping to ndd 278 * conversions use to_ndd() which respects enabled state of the 279 * nvdimm. 280 */ 281 struct nvdimm_drvdata *ndd; 282 }; 283 284 struct nd_region { 285 struct device dev; 286 struct ida ns_ida; 287 struct ida btt_ida; 288 struct ida pfn_ida; 289 struct ida dax_ida; 290 unsigned long flags; 291 struct device *ns_seed; 292 struct device *btt_seed; 293 struct device *pfn_seed; 294 struct device *dax_seed; 295 unsigned long align; 296 u16 ndr_mappings; 297 u64 ndr_size; 298 u64 ndr_start; 299 int id, num_lanes, ro, numa_node, target_node; 300 void *provider_data; 301 struct kernfs_node *bb_state; 302 struct badblocks bb; 303 struct nd_interleave_set *nd_set; 304 struct nd_percpu_lane __percpu *lane; 305 int (*flush)(struct nd_region *nd_region, struct bio *bio); 306 struct nd_mapping mapping[]; 307 }; 308 309 struct nd_blk_region { 310 int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); 311 int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, 312 void *iobuf, u64 len, int rw); 313 void *blk_provider_data; 314 struct nd_region nd_region; 315 }; 316 317 /* 318 * Lookup next in the repeating sequence of 01, 10, and 11. 319 */ 320 static inline unsigned nd_inc_seq(unsigned seq) 321 { 322 static const unsigned next[] = { 0, 2, 3, 1 }; 323 324 return next[seq & 3]; 325 } 326 327 struct btt; 328 struct nd_btt { 329 struct device dev; 330 struct nd_namespace_common *ndns; 331 struct btt *btt; 332 unsigned long lbasize; 333 u64 size; 334 u8 *uuid; 335 int id; 336 int initial_offset; 337 u16 version_major; 338 u16 version_minor; 339 }; 340 341 enum nd_pfn_mode { 342 PFN_MODE_NONE, 343 PFN_MODE_RAM, 344 PFN_MODE_PMEM, 345 }; 346 347 struct nd_pfn { 348 int id; 349 u8 *uuid; 350 struct device dev; 351 unsigned long align; 352 unsigned long npfns; 353 enum nd_pfn_mode mode; 354 struct nd_pfn_sb *pfn_sb; 355 struct nd_namespace_common *ndns; 356 }; 357 358 struct nd_dax { 359 struct nd_pfn nd_pfn; 360 }; 361 362 static inline u32 nd_info_block_reserve(void) 363 { 364 return ALIGN(SZ_8K, PAGE_SIZE); 365 } 366 367 enum nd_async_mode { 368 ND_SYNC, 369 ND_ASYNC, 370 }; 371 372 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size); 373 void wait_nvdimm_bus_probe_idle(struct device *dev); 374 void nd_device_register(struct device *dev); 375 void nd_device_unregister(struct device *dev, enum nd_async_mode mode); 376 void nd_device_notify(struct device *dev, enum nvdimm_event event); 377 int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, 378 size_t len); 379 ssize_t nd_size_select_show(unsigned long current_size, 380 const unsigned long *supported, char *buf); 381 ssize_t nd_size_select_store(struct device *dev, const char *buf, 382 unsigned long *current_size, const unsigned long *supported); 383 int __init nvdimm_init(void); 384 int __init nd_region_init(void); 385 int __init nd_label_init(void); 386 void nvdimm_exit(void); 387 void nd_region_exit(void); 388 struct nvdimm; 389 extern const struct attribute_group nd_device_attribute_group; 390 extern const struct attribute_group nd_numa_attribute_group; 391 extern const struct attribute_group *nvdimm_bus_attribute_groups[]; 392 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping); 393 int nvdimm_check_config_data(struct device *dev); 394 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd); 395 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd); 396 int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf, 397 size_t offset, size_t len); 398 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, 399 void *buf, size_t len); 400 long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, 401 unsigned int len); 402 void nvdimm_set_labeling(struct device *dev); 403 void nvdimm_set_locked(struct device *dev); 404 void nvdimm_clear_locked(struct device *dev); 405 int nvdimm_security_setup_events(struct device *dev); 406 #if IS_ENABLED(CONFIG_NVDIMM_KEYS) 407 int nvdimm_security_unlock(struct device *dev); 408 #else 409 static inline int nvdimm_security_unlock(struct device *dev) 410 { 411 return 0; 412 } 413 #endif 414 struct nd_btt *to_nd_btt(struct device *dev); 415 416 struct nd_gen_sb { 417 char reserved[SZ_4K - 8]; 418 __le64 checksum; 419 }; 420 421 u64 nd_sb_checksum(struct nd_gen_sb *sb); 422 #if IS_ENABLED(CONFIG_BTT) 423 int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns); 424 bool is_nd_btt(struct device *dev); 425 struct device *nd_btt_create(struct nd_region *nd_region); 426 #else 427 static inline int nd_btt_probe(struct device *dev, 428 struct nd_namespace_common *ndns) 429 { 430 return -ENODEV; 431 } 432 433 static inline bool is_nd_btt(struct device *dev) 434 { 435 return false; 436 } 437 438 static inline struct device *nd_btt_create(struct nd_region *nd_region) 439 { 440 return NULL; 441 } 442 #endif 443 444 struct nd_pfn *to_nd_pfn(struct device *dev); 445 #if IS_ENABLED(CONFIG_NVDIMM_PFN) 446 447 #define MAX_NVDIMM_ALIGN 4 448 449 int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns); 450 bool is_nd_pfn(struct device *dev); 451 struct device *nd_pfn_create(struct nd_region *nd_region); 452 struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, 453 struct nd_namespace_common *ndns); 454 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig); 455 extern const struct attribute_group *nd_pfn_attribute_groups[]; 456 #else 457 static inline int nd_pfn_probe(struct device *dev, 458 struct nd_namespace_common *ndns) 459 { 460 return -ENODEV; 461 } 462 463 static inline bool is_nd_pfn(struct device *dev) 464 { 465 return false; 466 } 467 468 static inline struct device *nd_pfn_create(struct nd_region *nd_region) 469 { 470 return NULL; 471 } 472 473 static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) 474 { 475 return -ENODEV; 476 } 477 #endif 478 479 struct nd_dax *to_nd_dax(struct device *dev); 480 #if IS_ENABLED(CONFIG_NVDIMM_DAX) 481 int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns); 482 bool is_nd_dax(struct device *dev); 483 struct device *nd_dax_create(struct nd_region *nd_region); 484 #else 485 static inline int nd_dax_probe(struct device *dev, 486 struct nd_namespace_common *ndns) 487 { 488 return -ENODEV; 489 } 490 491 static inline bool is_nd_dax(struct device *dev) 492 { 493 return false; 494 } 495 496 static inline struct device *nd_dax_create(struct nd_region *nd_region) 497 { 498 return NULL; 499 } 500 #endif 501 502 int nd_region_to_nstype(struct nd_region *nd_region); 503 int nd_region_register_namespaces(struct nd_region *nd_region, int *err); 504 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, 505 struct nd_namespace_index *nsindex); 506 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region); 507 void nvdimm_bus_lock(struct device *dev); 508 void nvdimm_bus_unlock(struct device *dev); 509 bool is_nvdimm_bus_locked(struct device *dev); 510 void nvdimm_check_and_set_ro(struct gendisk *disk); 511 void nvdimm_drvdata_release(struct kref *kref); 512 void put_ndd(struct nvdimm_drvdata *ndd); 513 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd); 514 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res); 515 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, 516 struct nd_label_id *label_id, resource_size_t start, 517 resource_size_t n); 518 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns); 519 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns); 520 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev); 521 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns); 522 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt); 523 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, 524 char *name); 525 unsigned int pmem_sector_size(struct nd_namespace_common *ndns); 526 struct range; 527 void nvdimm_badblocks_populate(struct nd_region *nd_region, 528 struct badblocks *bb, const struct range *range); 529 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns, 530 resource_size_t size); 531 void devm_namespace_disable(struct device *dev, 532 struct nd_namespace_common *ndns); 533 #if IS_ENABLED(CONFIG_ND_CLAIM) 534 /* max struct page size independent of kernel config */ 535 #define MAX_STRUCT_PAGE_SIZE 64 536 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap); 537 #else 538 static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, 539 struct dev_pagemap *pgmap) 540 { 541 return -ENXIO; 542 } 543 #endif 544 int nd_blk_region_init(struct nd_region *nd_region); 545 int nd_region_activate(struct nd_region *nd_region); 546 static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, 547 unsigned int len) 548 { 549 if (bb->count) { 550 sector_t first_bad; 551 int num_bad; 552 553 return !!badblocks_check(bb, sector, len / 512, &first_bad, 554 &num_bad); 555 } 556 557 return false; 558 } 559 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk); 560 const u8 *nd_dev_to_uuid(struct device *dev); 561 bool pmem_should_map_pages(struct device *dev); 562 #endif /* __ND_H__ */ 563