1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 #include <linux/memregion.h> 4 #include <linux/genalloc.h> 5 #include <linux/device.h> 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/uuid.h> 9 #include <linux/idr.h> 10 #include <cxlmem.h> 11 #include <cxl.h> 12 #include "core.h" 13 14 /** 15 * DOC: cxl core region 16 * 17 * CXL Regions represent mapped memory capacity in system physical address 18 * space. Whereas the CXL Root Decoders identify the bounds of potential CXL 19 * Memory ranges, Regions represent the active mapped capacity by the HDM 20 * Decoder Capability structures throughout the Host Bridges, Switches, and 21 * Endpoints in the topology. 22 * 23 * Region configuration has ordering constraints. UUID may be set at any time 24 * but is only visible for persistent regions. 25 * 1. Interleave granularity 26 * 2. Interleave size 27 */ 28 29 /* 30 * All changes to the interleave configuration occur with this lock held 31 * for write. 32 */ 33 static DECLARE_RWSEM(cxl_region_rwsem); 34 35 static struct cxl_region *to_cxl_region(struct device *dev); 36 37 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 38 char *buf) 39 { 40 struct cxl_region *cxlr = to_cxl_region(dev); 41 struct cxl_region_params *p = &cxlr->params; 42 ssize_t rc; 43 44 rc = down_read_interruptible(&cxl_region_rwsem); 45 if (rc) 46 return rc; 47 rc = sysfs_emit(buf, "%pUb\n", &p->uuid); 48 up_read(&cxl_region_rwsem); 49 50 return rc; 51 } 52 53 static int is_dup(struct device *match, void *data) 54 { 55 struct cxl_region_params *p; 56 struct cxl_region *cxlr; 57 uuid_t *uuid = data; 58 59 if (!is_cxl_region(match)) 60 return 0; 61 62 lockdep_assert_held(&cxl_region_rwsem); 63 cxlr = to_cxl_region(match); 64 p = &cxlr->params; 65 66 if (uuid_equal(&p->uuid, uuid)) { 67 dev_dbg(match, "already has uuid: %pUb\n", uuid); 68 return -EBUSY; 69 } 70 71 return 0; 72 } 73 74 static ssize_t uuid_store(struct device *dev, struct device_attribute *attr, 75 const char *buf, size_t len) 76 { 77 struct cxl_region *cxlr = to_cxl_region(dev); 78 struct cxl_region_params *p = &cxlr->params; 79 uuid_t temp; 80 ssize_t rc; 81 82 if (len != UUID_STRING_LEN + 1) 83 return -EINVAL; 84 85 rc = uuid_parse(buf, &temp); 86 if (rc) 87 return rc; 88 89 if (uuid_is_null(&temp)) 90 return -EINVAL; 91 92 rc = down_write_killable(&cxl_region_rwsem); 93 if (rc) 94 return rc; 95 96 if (uuid_equal(&p->uuid, &temp)) 97 goto out; 98 99 rc = -EBUSY; 100 if (p->state >= CXL_CONFIG_ACTIVE) 101 goto out; 102 103 rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup); 104 if (rc < 0) 105 goto out; 106 107 uuid_copy(&p->uuid, &temp); 108 out: 109 up_write(&cxl_region_rwsem); 110 111 if (rc) 112 return rc; 113 return len; 114 } 115 static DEVICE_ATTR_RW(uuid); 116 117 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a, 118 int n) 119 { 120 struct device *dev = kobj_to_dev(kobj); 121 struct cxl_region *cxlr = to_cxl_region(dev); 122 123 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM) 124 return 0; 125 return a->mode; 126 } 127 128 static ssize_t interleave_ways_show(struct device *dev, 129 struct device_attribute *attr, char *buf) 130 { 131 struct cxl_region *cxlr = to_cxl_region(dev); 132 struct cxl_region_params *p = &cxlr->params; 133 ssize_t rc; 134 135 rc = down_read_interruptible(&cxl_region_rwsem); 136 if (rc) 137 return rc; 138 rc = sysfs_emit(buf, "%d\n", p->interleave_ways); 139 up_read(&cxl_region_rwsem); 140 141 return rc; 142 } 143 144 static ssize_t interleave_ways_store(struct device *dev, 145 struct device_attribute *attr, 146 const char *buf, size_t len) 147 { 148 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 149 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 150 struct cxl_region *cxlr = to_cxl_region(dev); 151 struct cxl_region_params *p = &cxlr->params; 152 int rc, val; 153 u8 iw; 154 155 rc = kstrtoint(buf, 0, &val); 156 if (rc) 157 return rc; 158 159 rc = ways_to_cxl(val, &iw); 160 if (rc) 161 return rc; 162 163 /* 164 * Even for x3, x9, and x12 interleaves the region interleave must be a 165 * power of 2 multiple of the host bridge interleave. 166 */ 167 if (!is_power_of_2(val / cxld->interleave_ways) || 168 (val % cxld->interleave_ways)) { 169 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val); 170 return -EINVAL; 171 } 172 173 rc = down_write_killable(&cxl_region_rwsem); 174 if (rc) 175 return rc; 176 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { 177 rc = -EBUSY; 178 goto out; 179 } 180 181 p->interleave_ways = val; 182 out: 183 up_write(&cxl_region_rwsem); 184 if (rc) 185 return rc; 186 return len; 187 } 188 static DEVICE_ATTR_RW(interleave_ways); 189 190 static ssize_t interleave_granularity_show(struct device *dev, 191 struct device_attribute *attr, 192 char *buf) 193 { 194 struct cxl_region *cxlr = to_cxl_region(dev); 195 struct cxl_region_params *p = &cxlr->params; 196 ssize_t rc; 197 198 rc = down_read_interruptible(&cxl_region_rwsem); 199 if (rc) 200 return rc; 201 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity); 202 up_read(&cxl_region_rwsem); 203 204 return rc; 205 } 206 207 static ssize_t interleave_granularity_store(struct device *dev, 208 struct device_attribute *attr, 209 const char *buf, size_t len) 210 { 211 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 212 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 213 struct cxl_region *cxlr = to_cxl_region(dev); 214 struct cxl_region_params *p = &cxlr->params; 215 int rc, val; 216 u16 ig; 217 218 rc = kstrtoint(buf, 0, &val); 219 if (rc) 220 return rc; 221 222 rc = granularity_to_cxl(val, &ig); 223 if (rc) 224 return rc; 225 226 /* 227 * Disallow region granularity less than root granularity to 228 * simplify the implementation. Otherwise, region's with a 229 * granularity less than the root interleave result in needing 230 * multiple endpoints to support a single slot in the 231 * interleave. 232 */ 233 if (val < cxld->interleave_granularity) 234 return -EINVAL; 235 236 rc = down_write_killable(&cxl_region_rwsem); 237 if (rc) 238 return rc; 239 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { 240 rc = -EBUSY; 241 goto out; 242 } 243 244 p->interleave_granularity = val; 245 out: 246 up_write(&cxl_region_rwsem); 247 if (rc) 248 return rc; 249 return len; 250 } 251 static DEVICE_ATTR_RW(interleave_granularity); 252 253 static struct attribute *cxl_region_attrs[] = { 254 &dev_attr_uuid.attr, 255 &dev_attr_interleave_ways.attr, 256 &dev_attr_interleave_granularity.attr, 257 NULL, 258 }; 259 260 static const struct attribute_group cxl_region_group = { 261 .attrs = cxl_region_attrs, 262 .is_visible = cxl_region_visible, 263 }; 264 265 static const struct attribute_group *region_groups[] = { 266 &cxl_base_attribute_group, 267 &cxl_region_group, 268 NULL, 269 }; 270 271 static void cxl_region_release(struct device *dev) 272 { 273 struct cxl_region *cxlr = to_cxl_region(dev); 274 275 memregion_free(cxlr->id); 276 kfree(cxlr); 277 } 278 279 static const struct device_type cxl_region_type = { 280 .name = "cxl_region", 281 .release = cxl_region_release, 282 .groups = region_groups 283 }; 284 285 bool is_cxl_region(struct device *dev) 286 { 287 return dev->type == &cxl_region_type; 288 } 289 EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL); 290 291 static struct cxl_region *to_cxl_region(struct device *dev) 292 { 293 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type, 294 "not a cxl_region device\n")) 295 return NULL; 296 297 return container_of(dev, struct cxl_region, dev); 298 } 299 300 static void unregister_region(void *dev) 301 { 302 device_unregister(dev); 303 } 304 305 static struct lock_class_key cxl_region_key; 306 307 static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id) 308 { 309 struct cxl_region *cxlr; 310 struct device *dev; 311 312 cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL); 313 if (!cxlr) { 314 memregion_free(id); 315 return ERR_PTR(-ENOMEM); 316 } 317 318 dev = &cxlr->dev; 319 device_initialize(dev); 320 lockdep_set_class(&dev->mutex, &cxl_region_key); 321 dev->parent = &cxlrd->cxlsd.cxld.dev; 322 device_set_pm_not_required(dev); 323 dev->bus = &cxl_bus_type; 324 dev->type = &cxl_region_type; 325 cxlr->id = id; 326 327 return cxlr; 328 } 329 330 /** 331 * devm_cxl_add_region - Adds a region to a decoder 332 * @cxlrd: root decoder 333 * @id: memregion id to create, or memregion_free() on failure 334 * @mode: mode for the endpoint decoders of this region 335 * @type: select whether this is an expander or accelerator (type-2 or type-3) 336 * 337 * This is the second step of region initialization. Regions exist within an 338 * address space which is mapped by a @cxlrd. 339 * 340 * Return: 0 if the region was added to the @cxlrd, else returns negative error 341 * code. The region will be named "regionZ" where Z is the unique region number. 342 */ 343 static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, 344 int id, 345 enum cxl_decoder_mode mode, 346 enum cxl_decoder_type type) 347 { 348 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); 349 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 350 struct cxl_region_params *p; 351 struct cxl_region *cxlr; 352 struct device *dev; 353 int rc; 354 355 cxlr = cxl_region_alloc(cxlrd, id); 356 if (IS_ERR(cxlr)) 357 return cxlr; 358 p = &cxlr->params; 359 cxlr->mode = mode; 360 cxlr->type = type; 361 p->interleave_granularity = cxld->interleave_granularity; 362 363 dev = &cxlr->dev; 364 rc = dev_set_name(dev, "region%d", id); 365 if (rc) 366 goto err; 367 368 rc = device_add(dev); 369 if (rc) 370 goto err; 371 372 rc = devm_add_action_or_reset(port->uport, unregister_region, cxlr); 373 if (rc) 374 return ERR_PTR(rc); 375 376 dev_dbg(port->uport, "%s: created %s\n", 377 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev)); 378 return cxlr; 379 380 err: 381 put_device(dev); 382 return ERR_PTR(rc); 383 } 384 385 static ssize_t create_pmem_region_show(struct device *dev, 386 struct device_attribute *attr, char *buf) 387 { 388 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 389 390 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id)); 391 } 392 393 static ssize_t create_pmem_region_store(struct device *dev, 394 struct device_attribute *attr, 395 const char *buf, size_t len) 396 { 397 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 398 struct cxl_region *cxlr; 399 int id, rc; 400 401 rc = sscanf(buf, "region%d\n", &id); 402 if (rc != 1) 403 return -EINVAL; 404 405 rc = memregion_alloc(GFP_KERNEL); 406 if (rc < 0) 407 return rc; 408 409 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) { 410 memregion_free(rc); 411 return -EBUSY; 412 } 413 414 cxlr = devm_cxl_add_region(cxlrd, id, CXL_DECODER_PMEM, 415 CXL_DECODER_EXPANDER); 416 if (IS_ERR(cxlr)) 417 return PTR_ERR(cxlr); 418 419 return len; 420 } 421 DEVICE_ATTR_RW(create_pmem_region); 422 423 static struct cxl_region * 424 cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name) 425 { 426 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 427 struct device *region_dev; 428 429 region_dev = device_find_child_by_name(&cxld->dev, name); 430 if (!region_dev) 431 return ERR_PTR(-ENODEV); 432 433 return to_cxl_region(region_dev); 434 } 435 436 static ssize_t delete_region_store(struct device *dev, 437 struct device_attribute *attr, 438 const char *buf, size_t len) 439 { 440 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 441 struct cxl_port *port = to_cxl_port(dev->parent); 442 struct cxl_region *cxlr; 443 444 cxlr = cxl_find_region_by_name(cxlrd, buf); 445 if (IS_ERR(cxlr)) 446 return PTR_ERR(cxlr); 447 448 devm_release_action(port->uport, unregister_region, cxlr); 449 put_device(&cxlr->dev); 450 451 return len; 452 } 453 DEVICE_ATTR_WO(delete_region); 454