1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. */ 3 #include <linux/device.h> 4 #include <linux/slab.h> 5 #include <linux/idr.h> 6 #include <cxlmem.h> 7 #include <cxl.h> 8 #include "core.h" 9 10 /** 11 * DOC: cxl pmem 12 * 13 * The core CXL PMEM infrastructure supports persistent memory 14 * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL 15 * 'bridge' device is added at the root of a CXL device topology if 16 * platform firmware advertises at least one persistent memory capable 17 * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus' 18 * device. Then for each cxl_memdev in the CXL device topology a bridge 19 * device is added to host a LIBNVDIMM dimm object. When these bridges 20 * are registered native LIBNVDIMM uapis are translated to CXL 21 * operations, for example, namespace label access commands. 22 */ 23 24 static DEFINE_IDA(cxl_nvdimm_bridge_ida); 25 26 static void cxl_nvdimm_bridge_release(struct device *dev) 27 { 28 struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); 29 30 ida_free(&cxl_nvdimm_bridge_ida, cxl_nvb->id); 31 kfree(cxl_nvb); 32 } 33 34 static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = { 35 &cxl_base_attribute_group, 36 NULL, 37 }; 38 39 const struct device_type cxl_nvdimm_bridge_type = { 40 .name = "cxl_nvdimm_bridge", 41 .release = cxl_nvdimm_bridge_release, 42 .groups = cxl_nvdimm_bridge_attribute_groups, 43 }; 44 45 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev) 46 { 47 if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type, 48 "not a cxl_nvdimm_bridge device\n")) 49 return NULL; 50 return container_of(dev, struct cxl_nvdimm_bridge, dev); 51 } 52 EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL); 53 54 bool is_cxl_nvdimm_bridge(struct device *dev) 55 { 56 return dev->type == &cxl_nvdimm_bridge_type; 57 } 58 EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL); 59 60 static int match_nvdimm_bridge(struct device *dev, void *data) 61 { 62 return is_cxl_nvdimm_bridge(dev); 63 } 64 65 struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd) 66 { 67 struct cxl_port *port = find_cxl_root(&cxl_nvd->dev); 68 struct device *dev; 69 70 if (!port) 71 return NULL; 72 73 dev = device_find_child(&port->dev, NULL, match_nvdimm_bridge); 74 put_device(&port->dev); 75 76 if (!dev) 77 return NULL; 78 79 return to_cxl_nvdimm_bridge(dev); 80 } 81 EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL); 82 83 static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port) 84 { 85 struct cxl_nvdimm_bridge *cxl_nvb; 86 struct device *dev; 87 int rc; 88 89 cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL); 90 if (!cxl_nvb) 91 return ERR_PTR(-ENOMEM); 92 93 rc = ida_alloc(&cxl_nvdimm_bridge_ida, GFP_KERNEL); 94 if (rc < 0) 95 goto err; 96 cxl_nvb->id = rc; 97 98 dev = &cxl_nvb->dev; 99 cxl_nvb->port = port; 100 cxl_nvb->state = CXL_NVB_NEW; 101 device_initialize(dev); 102 device_set_pm_not_required(dev); 103 dev->parent = &port->dev; 104 dev->bus = &cxl_bus_type; 105 dev->type = &cxl_nvdimm_bridge_type; 106 107 return cxl_nvb; 108 109 err: 110 kfree(cxl_nvb); 111 return ERR_PTR(rc); 112 } 113 114 static void unregister_nvb(void *_cxl_nvb) 115 { 116 struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb; 117 bool flush; 118 119 /* 120 * If the bridge was ever activated then there might be in-flight state 121 * work to flush. Once the state has been changed to 'dead' then no new 122 * work can be queued by user-triggered bind. 123 */ 124 cxl_device_lock(&cxl_nvb->dev); 125 flush = cxl_nvb->state != CXL_NVB_NEW; 126 cxl_nvb->state = CXL_NVB_DEAD; 127 cxl_device_unlock(&cxl_nvb->dev); 128 129 /* 130 * Even though the device core will trigger device_release_driver() 131 * before the unregister, it does not know about the fact that 132 * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver 133 * release not and flush it before tearing down the nvdimm device 134 * hierarchy. 135 */ 136 device_release_driver(&cxl_nvb->dev); 137 if (flush) 138 flush_work(&cxl_nvb->state_work); 139 device_unregister(&cxl_nvb->dev); 140 } 141 142 /** 143 * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology 144 * @host: platform firmware root device 145 * @port: CXL port at the root of a CXL topology 146 * 147 * Return: bridge device that can host cxl_nvdimm objects 148 */ 149 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, 150 struct cxl_port *port) 151 { 152 struct cxl_nvdimm_bridge *cxl_nvb; 153 struct device *dev; 154 int rc; 155 156 if (!IS_ENABLED(CONFIG_CXL_PMEM)) 157 return ERR_PTR(-ENXIO); 158 159 cxl_nvb = cxl_nvdimm_bridge_alloc(port); 160 if (IS_ERR(cxl_nvb)) 161 return cxl_nvb; 162 163 dev = &cxl_nvb->dev; 164 rc = dev_set_name(dev, "nvdimm-bridge%d", cxl_nvb->id); 165 if (rc) 166 goto err; 167 168 rc = device_add(dev); 169 if (rc) 170 goto err; 171 172 rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb); 173 if (rc) 174 return ERR_PTR(rc); 175 176 return cxl_nvb; 177 178 err: 179 put_device(dev); 180 return ERR_PTR(rc); 181 } 182 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL); 183 184 static void cxl_nvdimm_release(struct device *dev) 185 { 186 struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); 187 188 kfree(cxl_nvd); 189 } 190 191 static const struct attribute_group *cxl_nvdimm_attribute_groups[] = { 192 &cxl_base_attribute_group, 193 NULL, 194 }; 195 196 const struct device_type cxl_nvdimm_type = { 197 .name = "cxl_nvdimm", 198 .release = cxl_nvdimm_release, 199 .groups = cxl_nvdimm_attribute_groups, 200 }; 201 202 bool is_cxl_nvdimm(struct device *dev) 203 { 204 return dev->type == &cxl_nvdimm_type; 205 } 206 EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL); 207 208 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev) 209 { 210 if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev), 211 "not a cxl_nvdimm device\n")) 212 return NULL; 213 return container_of(dev, struct cxl_nvdimm, dev); 214 } 215 EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL); 216 217 static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) 218 { 219 struct cxl_nvdimm *cxl_nvd; 220 struct device *dev; 221 222 cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL); 223 if (!cxl_nvd) 224 return ERR_PTR(-ENOMEM); 225 226 dev = &cxl_nvd->dev; 227 cxl_nvd->cxlmd = cxlmd; 228 device_initialize(dev); 229 device_set_pm_not_required(dev); 230 dev->parent = &cxlmd->dev; 231 dev->bus = &cxl_bus_type; 232 dev->type = &cxl_nvdimm_type; 233 234 return cxl_nvd; 235 } 236 237 static void cxl_nvd_unregister(void *dev) 238 { 239 device_unregister(dev); 240 } 241 242 /** 243 * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm 244 * @host: same host as @cxlmd 245 * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations 246 * 247 * Return: 0 on success negative error code on failure. 248 */ 249 int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd) 250 { 251 struct cxl_nvdimm *cxl_nvd; 252 struct device *dev; 253 int rc; 254 255 cxl_nvd = cxl_nvdimm_alloc(cxlmd); 256 if (IS_ERR(cxl_nvd)) 257 return PTR_ERR(cxl_nvd); 258 259 dev = &cxl_nvd->dev; 260 rc = dev_set_name(dev, "pmem%d", cxlmd->id); 261 if (rc) 262 goto err; 263 264 rc = device_add(dev); 265 if (rc) 266 goto err; 267 268 dev_dbg(host, "%s: register %s\n", dev_name(dev->parent), 269 dev_name(dev)); 270 271 return devm_add_action_or_reset(host, cxl_nvd_unregister, dev); 272 273 err: 274 put_device(dev); 275 return rc; 276 } 277 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL); 278