1 // SPDX-License-Identifier: GPL-2.0 2 3 #define pr_fmt(fmt) "papr-scm: " fmt 4 5 #include <linux/of.h> 6 #include <linux/kernel.h> 7 #include <linux/module.h> 8 #include <linux/ioport.h> 9 #include <linux/slab.h> 10 #include <linux/ndctl.h> 11 #include <linux/sched.h> 12 #include <linux/libnvdimm.h> 13 #include <linux/platform_device.h> 14 15 #include <asm/plpar_wrappers.h> 16 17 #define BIND_ANY_ADDR (~0ul) 18 19 #define PAPR_SCM_DIMM_CMD_MASK \ 20 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \ 21 (1ul << ND_CMD_GET_CONFIG_DATA) | \ 22 (1ul << ND_CMD_SET_CONFIG_DATA)) 23 24 struct papr_scm_priv { 25 struct platform_device *pdev; 26 struct device_node *dn; 27 uint32_t drc_index; 28 uint64_t blocks; 29 uint64_t block_size; 30 int metadata_size; 31 32 uint64_t bound_addr; 33 34 struct nvdimm_bus_descriptor bus_desc; 35 struct nvdimm_bus *bus; 36 struct nvdimm *nvdimm; 37 struct resource res; 38 struct nd_region *region; 39 struct nd_interleave_set nd_set; 40 }; 41 42 static int drc_pmem_bind(struct papr_scm_priv *p) 43 { 44 unsigned long ret[PLPAR_HCALL_BUFSIZE]; 45 uint64_t rc, token; 46 uint64_t saved = 0; 47 48 /* 49 * When the hypervisor cannot map all the requested memory in a single 50 * hcall it returns H_BUSY and we call again with the token until 51 * we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS 52 * leave the system in an undefined state, so we wait. 53 */ 54 token = 0; 55 56 do { 57 rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, 58 p->blocks, BIND_ANY_ADDR, token); 59 token = ret[0]; 60 if (!saved) 61 saved = ret[1]; 62 cond_resched(); 63 } while (rc == H_BUSY); 64 65 if (rc) { 66 dev_err(&p->pdev->dev, "bind err: %lld\n", rc); 67 return -ENXIO; 68 } 69 70 p->bound_addr = saved; 71 72 dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); 73 74 return 0; 75 } 76 77 static int drc_pmem_unbind(struct papr_scm_priv *p) 78 { 79 unsigned long ret[PLPAR_HCALL_BUFSIZE]; 80 uint64_t rc, token; 81 82 token = 0; 83 84 /* NB: unbind has the same retry requirements mentioned above */ 85 do { 86 rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index, 87 p->bound_addr, p->blocks, token); 88 token = ret[0]; 89 cond_resched(); 90 } while (rc == H_BUSY); 91 92 if (rc) 93 dev_err(&p->pdev->dev, "unbind error: %lld\n", rc); 94 95 return !!rc; 96 } 97 98 static int papr_scm_meta_get(struct papr_scm_priv *p, 99 struct nd_cmd_get_config_data_hdr *hdr) 100 { 101 unsigned long data[PLPAR_HCALL_BUFSIZE]; 102 int64_t ret; 103 104 if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1) 105 return -EINVAL; 106 107 ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index, 108 hdr->in_offset, 1); 109 110 if (ret == H_PARAMETER) /* bad DRC index */ 111 return -ENODEV; 112 if (ret) 113 return -EINVAL; /* other invalid parameter */ 114 115 hdr->out_buf[0] = data[0] & 0xff; 116 117 return 0; 118 } 119 120 static int papr_scm_meta_set(struct papr_scm_priv *p, 121 struct nd_cmd_set_config_hdr *hdr) 122 { 123 int64_t ret; 124 125 if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1) 126 return -EINVAL; 127 128 ret = plpar_hcall_norets(H_SCM_WRITE_METADATA, 129 p->drc_index, hdr->in_offset, hdr->in_buf[0], 1); 130 131 if (ret == H_PARAMETER) /* bad DRC index */ 132 return -ENODEV; 133 if (ret) 134 return -EINVAL; /* other invalid parameter */ 135 136 return 0; 137 } 138 139 int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 140 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 141 { 142 struct nd_cmd_get_config_size *get_size_hdr; 143 struct papr_scm_priv *p; 144 145 /* Only dimm-specific calls are supported atm */ 146 if (!nvdimm) 147 return -EINVAL; 148 149 p = nvdimm_provider_data(nvdimm); 150 151 switch (cmd) { 152 case ND_CMD_GET_CONFIG_SIZE: 153 get_size_hdr = buf; 154 155 get_size_hdr->status = 0; 156 get_size_hdr->max_xfer = 1; 157 get_size_hdr->config_size = p->metadata_size; 158 *cmd_rc = 0; 159 break; 160 161 case ND_CMD_GET_CONFIG_DATA: 162 *cmd_rc = papr_scm_meta_get(p, buf); 163 break; 164 165 case ND_CMD_SET_CONFIG_DATA: 166 *cmd_rc = papr_scm_meta_set(p, buf); 167 break; 168 169 default: 170 return -EINVAL; 171 } 172 173 dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc); 174 175 return 0; 176 } 177 178 static const struct attribute_group *region_attr_groups[] = { 179 &nd_region_attribute_group, 180 &nd_device_attribute_group, 181 &nd_mapping_attribute_group, 182 &nd_numa_attribute_group, 183 NULL, 184 }; 185 186 static const struct attribute_group *bus_attr_groups[] = { 187 &nvdimm_bus_attribute_group, 188 NULL, 189 }; 190 191 static const struct attribute_group *papr_scm_dimm_groups[] = { 192 &nvdimm_attribute_group, 193 &nd_device_attribute_group, 194 NULL, 195 }; 196 197 static int papr_scm_nvdimm_init(struct papr_scm_priv *p) 198 { 199 struct device *dev = &p->pdev->dev; 200 struct nd_mapping_desc mapping; 201 struct nd_region_desc ndr_desc; 202 unsigned long dimm_flags; 203 204 p->bus_desc.ndctl = papr_scm_ndctl; 205 p->bus_desc.module = THIS_MODULE; 206 p->bus_desc.of_node = p->pdev->dev.of_node; 207 p->bus_desc.attr_groups = bus_attr_groups; 208 p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL); 209 210 if (!p->bus_desc.provider_name) 211 return -ENOMEM; 212 213 p->bus = nvdimm_bus_register(NULL, &p->bus_desc); 214 if (!p->bus) { 215 dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn); 216 return -ENXIO; 217 } 218 219 dimm_flags = 0; 220 set_bit(NDD_ALIASING, &dimm_flags); 221 222 p->nvdimm = nvdimm_create(p->bus, p, papr_scm_dimm_groups, 223 dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL); 224 if (!p->nvdimm) { 225 dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn); 226 goto err; 227 } 228 229 if (nvdimm_bus_check_dimm_count(p->bus, 1)) 230 goto err; 231 232 /* now add the region */ 233 234 memset(&mapping, 0, sizeof(mapping)); 235 mapping.nvdimm = p->nvdimm; 236 mapping.start = 0; 237 mapping.size = p->blocks * p->block_size; // XXX: potential overflow? 238 239 memset(&ndr_desc, 0, sizeof(ndr_desc)); 240 ndr_desc.attr_groups = region_attr_groups; 241 ndr_desc.numa_node = dev_to_node(&p->pdev->dev); 242 ndr_desc.target_node = ndr_desc.numa_node; 243 ndr_desc.res = &p->res; 244 ndr_desc.of_node = p->dn; 245 ndr_desc.provider_data = p; 246 ndr_desc.mapping = &mapping; 247 ndr_desc.num_mappings = 1; 248 ndr_desc.nd_set = &p->nd_set; 249 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); 250 251 p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc); 252 if (!p->region) { 253 dev_err(dev, "Error registering region %pR from %pOF\n", 254 ndr_desc.res, p->dn); 255 goto err; 256 } 257 258 return 0; 259 260 err: nvdimm_bus_unregister(p->bus); 261 kfree(p->bus_desc.provider_name); 262 return -ENXIO; 263 } 264 265 static int papr_scm_probe(struct platform_device *pdev) 266 { 267 struct device_node *dn = pdev->dev.of_node; 268 u32 drc_index, metadata_size; 269 u64 blocks, block_size; 270 struct papr_scm_priv *p; 271 const char *uuid_str; 272 u64 uuid[2]; 273 int rc; 274 275 /* check we have all the required DT properties */ 276 if (of_property_read_u32(dn, "ibm,my-drc-index", &drc_index)) { 277 dev_err(&pdev->dev, "%pOF: missing drc-index!\n", dn); 278 return -ENODEV; 279 } 280 281 if (of_property_read_u64(dn, "ibm,block-size", &block_size)) { 282 dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn); 283 return -ENODEV; 284 } 285 286 if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) { 287 dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn); 288 return -ENODEV; 289 } 290 291 if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) { 292 dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn); 293 return -ENODEV; 294 } 295 296 p = kzalloc(sizeof(*p), GFP_KERNEL); 297 if (!p) 298 return -ENOMEM; 299 300 /* optional DT properties */ 301 of_property_read_u32(dn, "ibm,metadata-size", &metadata_size); 302 303 p->dn = dn; 304 p->drc_index = drc_index; 305 p->block_size = block_size; 306 p->blocks = blocks; 307 308 /* We just need to ensure that set cookies are unique across */ 309 uuid_parse(uuid_str, (uuid_t *) uuid); 310 p->nd_set.cookie1 = uuid[0]; 311 p->nd_set.cookie2 = uuid[1]; 312 313 /* might be zero */ 314 p->metadata_size = metadata_size; 315 p->pdev = pdev; 316 317 /* request the hypervisor to bind this region to somewhere in memory */ 318 rc = drc_pmem_bind(p); 319 if (rc) 320 goto err; 321 322 /* setup the resource for the newly bound range */ 323 p->res.start = p->bound_addr; 324 p->res.end = p->bound_addr + p->blocks * p->block_size - 1; 325 p->res.name = pdev->name; 326 p->res.flags = IORESOURCE_MEM; 327 328 rc = papr_scm_nvdimm_init(p); 329 if (rc) 330 goto err2; 331 332 platform_set_drvdata(pdev, p); 333 334 return 0; 335 336 err2: drc_pmem_unbind(p); 337 err: kfree(p); 338 return rc; 339 } 340 341 static int papr_scm_remove(struct platform_device *pdev) 342 { 343 struct papr_scm_priv *p = platform_get_drvdata(pdev); 344 345 nvdimm_bus_unregister(p->bus); 346 drc_pmem_unbind(p); 347 kfree(p); 348 349 return 0; 350 } 351 352 static const struct of_device_id papr_scm_match[] = { 353 { .compatible = "ibm,pmemory" }, 354 { }, 355 }; 356 357 static struct platform_driver papr_scm_driver = { 358 .probe = papr_scm_probe, 359 .remove = papr_scm_remove, 360 .driver = { 361 .name = "papr_scm", 362 .owner = THIS_MODULE, 363 .of_match_table = papr_scm_match, 364 }, 365 }; 366 367 module_platform_driver(papr_scm_driver); 368 MODULE_DEVICE_TABLE(of, papr_scm_match); 369 MODULE_LICENSE("GPL"); 370 MODULE_AUTHOR("IBM Corporation"); 371