1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * drivers/mfd/mfd-core.c 4 * 5 * core MFD support 6 * Copyright (c) 2006 Ian Molton 7 * Copyright (c) 2007,2008 Dmitry Baryshkov 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/platform_device.h> 12 #include <linux/acpi.h> 13 #include <linux/property.h> 14 #include <linux/mfd/core.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/slab.h> 17 #include <linux/module.h> 18 #include <linux/irqdomain.h> 19 #include <linux/of.h> 20 #include <linux/regulator/consumer.h> 21 22 static struct device_type mfd_dev_type = { 23 .name = "mfd_device", 24 }; 25 26 int mfd_cell_enable(struct platform_device *pdev) 27 { 28 const struct mfd_cell *cell = mfd_get_cell(pdev); 29 int err = 0; 30 31 /* only call enable hook if the cell wasn't previously enabled */ 32 if (atomic_inc_return(cell->usage_count) == 1) 33 err = cell->enable(pdev); 34 35 /* if the enable hook failed, decrement counter to allow retries */ 36 if (err) 37 atomic_dec(cell->usage_count); 38 39 return err; 40 } 41 EXPORT_SYMBOL(mfd_cell_enable); 42 43 int mfd_cell_disable(struct platform_device *pdev) 44 { 45 const struct mfd_cell *cell = mfd_get_cell(pdev); 46 int err = 0; 47 48 /* only disable if no other clients are using it */ 49 if (atomic_dec_return(cell->usage_count) == 0) 50 err = cell->disable(pdev); 51 52 /* if the disable hook failed, increment to allow retries */ 53 if (err) 54 atomic_inc(cell->usage_count); 55 56 /* sanity check; did someone call disable too many times? */ 57 WARN_ON(atomic_read(cell->usage_count) < 0); 58 59 return err; 60 } 61 EXPORT_SYMBOL(mfd_cell_disable); 62 63 static int mfd_platform_add_cell(struct platform_device *pdev, 64 const struct mfd_cell *cell, 65 atomic_t *usage_count) 66 { 67 if (!cell) 68 return 0; 69 70 pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL); 71 if (!pdev->mfd_cell) 72 return -ENOMEM; 73 74 pdev->mfd_cell->usage_count = usage_count; 75 return 0; 76 } 77 78 #if IS_ENABLED(CONFIG_ACPI) 79 static void mfd_acpi_add_device(const struct mfd_cell *cell, 80 struct platform_device *pdev) 81 { 82 const struct mfd_cell_acpi_match *match = cell->acpi_match; 83 struct acpi_device *parent, *child; 84 struct acpi_device *adev; 85 86 parent = ACPI_COMPANION(pdev->dev.parent); 87 if (!parent) 88 return; 89 90 /* 91 * MFD child device gets its ACPI handle either from the ACPI device 92 * directly under the parent that matches the either _HID or _CID, or 93 * _ADR or it will use the parent handle if is no ID is given. 94 * 95 * Note that use of _ADR is a grey area in the ACPI specification, 96 * though Intel Galileo Gen2 is using it to distinguish the children 97 * devices. 98 */ 99 adev = parent; 100 if (match) { 101 if (match->pnpid) { 102 struct acpi_device_id ids[2] = {}; 103 104 strlcpy(ids[0].id, match->pnpid, sizeof(ids[0].id)); 105 list_for_each_entry(child, &parent->children, node) { 106 if (!acpi_match_device_ids(child, ids)) { 107 adev = child; 108 break; 109 } 110 } 111 } else { 112 unsigned long long adr; 113 acpi_status status; 114 115 list_for_each_entry(child, &parent->children, node) { 116 status = acpi_evaluate_integer(child->handle, 117 "_ADR", NULL, 118 &adr); 119 if (ACPI_SUCCESS(status) && match->adr == adr) { 120 adev = child; 121 break; 122 } 123 } 124 } 125 } 126 127 ACPI_COMPANION_SET(&pdev->dev, adev); 128 } 129 #else 130 static inline void mfd_acpi_add_device(const struct mfd_cell *cell, 131 struct platform_device *pdev) 132 { 133 } 134 #endif 135 136 static int mfd_add_device(struct device *parent, int id, 137 const struct mfd_cell *cell, atomic_t *usage_count, 138 struct resource *mem_base, 139 int irq_base, struct irq_domain *domain) 140 { 141 struct resource *res; 142 struct platform_device *pdev; 143 struct device_node *np = NULL; 144 int ret = -ENOMEM; 145 int platform_id; 146 int r; 147 148 if (id == PLATFORM_DEVID_AUTO) 149 platform_id = id; 150 else 151 platform_id = id + cell->id; 152 153 pdev = platform_device_alloc(cell->name, platform_id); 154 if (!pdev) 155 goto fail_alloc; 156 157 res = kcalloc(cell->num_resources, sizeof(*res), GFP_KERNEL); 158 if (!res) 159 goto fail_device; 160 161 pdev->dev.parent = parent; 162 pdev->dev.type = &mfd_dev_type; 163 pdev->dev.dma_mask = parent->dma_mask; 164 pdev->dev.dma_parms = parent->dma_parms; 165 pdev->dev.coherent_dma_mask = parent->coherent_dma_mask; 166 167 ret = regulator_bulk_register_supply_alias( 168 &pdev->dev, cell->parent_supplies, 169 parent, cell->parent_supplies, 170 cell->num_parent_supplies); 171 if (ret < 0) 172 goto fail_res; 173 174 if (parent->of_node && cell->of_compatible) { 175 for_each_child_of_node(parent->of_node, np) { 176 if (of_device_is_compatible(np, cell->of_compatible)) { 177 pdev->dev.of_node = np; 178 break; 179 } 180 } 181 } 182 183 mfd_acpi_add_device(cell, pdev); 184 185 if (cell->pdata_size) { 186 ret = platform_device_add_data(pdev, 187 cell->platform_data, cell->pdata_size); 188 if (ret) 189 goto fail_alias; 190 } 191 192 if (cell->properties) { 193 ret = platform_device_add_properties(pdev, cell->properties); 194 if (ret) 195 goto fail_alias; 196 } 197 198 ret = mfd_platform_add_cell(pdev, cell, usage_count); 199 if (ret) 200 goto fail_alias; 201 202 for (r = 0; r < cell->num_resources; r++) { 203 res[r].name = cell->resources[r].name; 204 res[r].flags = cell->resources[r].flags; 205 206 /* Find out base to use */ 207 if ((cell->resources[r].flags & IORESOURCE_MEM) && mem_base) { 208 res[r].parent = mem_base; 209 res[r].start = mem_base->start + 210 cell->resources[r].start; 211 res[r].end = mem_base->start + 212 cell->resources[r].end; 213 } else if (cell->resources[r].flags & IORESOURCE_IRQ) { 214 if (domain) { 215 /* Unable to create mappings for IRQ ranges. */ 216 WARN_ON(cell->resources[r].start != 217 cell->resources[r].end); 218 res[r].start = res[r].end = irq_create_mapping( 219 domain, cell->resources[r].start); 220 } else { 221 res[r].start = irq_base + 222 cell->resources[r].start; 223 res[r].end = irq_base + 224 cell->resources[r].end; 225 } 226 } else { 227 res[r].parent = cell->resources[r].parent; 228 res[r].start = cell->resources[r].start; 229 res[r].end = cell->resources[r].end; 230 } 231 232 if (!cell->ignore_resource_conflicts) { 233 if (has_acpi_companion(&pdev->dev)) { 234 ret = acpi_check_resource_conflict(&res[r]); 235 if (ret) 236 goto fail_alias; 237 } 238 } 239 } 240 241 ret = platform_device_add_resources(pdev, res, cell->num_resources); 242 if (ret) 243 goto fail_alias; 244 245 ret = platform_device_add(pdev); 246 if (ret) 247 goto fail_alias; 248 249 if (cell->pm_runtime_no_callbacks) 250 pm_runtime_no_callbacks(&pdev->dev); 251 252 kfree(res); 253 254 return 0; 255 256 fail_alias: 257 regulator_bulk_unregister_supply_alias(&pdev->dev, 258 cell->parent_supplies, 259 cell->num_parent_supplies); 260 fail_res: 261 kfree(res); 262 fail_device: 263 platform_device_put(pdev); 264 fail_alloc: 265 return ret; 266 } 267 268 /** 269 * mfd_add_devices - register child devices 270 * 271 * @parent: Pointer to parent device. 272 * @id: Can be PLATFORM_DEVID_AUTO to let the Platform API take care 273 * of device numbering, or will be added to a device's cell_id. 274 * @cells: Array of (struct mfd_cell)s describing child devices. 275 * @n_devs: Number of child devices to register. 276 * @mem_base: Parent register range resource for child devices. 277 * @irq_base: Base of the range of virtual interrupt numbers allocated for 278 * this MFD device. Unused if @domain is specified. 279 * @domain: Interrupt domain to create mappings for hardware interrupts. 280 */ 281 int mfd_add_devices(struct device *parent, int id, 282 const struct mfd_cell *cells, int n_devs, 283 struct resource *mem_base, 284 int irq_base, struct irq_domain *domain) 285 { 286 int i; 287 int ret; 288 atomic_t *cnts; 289 290 /* initialize reference counting for all cells */ 291 cnts = kcalloc(n_devs, sizeof(*cnts), GFP_KERNEL); 292 if (!cnts) 293 return -ENOMEM; 294 295 for (i = 0; i < n_devs; i++) { 296 atomic_set(&cnts[i], 0); 297 ret = mfd_add_device(parent, id, cells + i, cnts + i, mem_base, 298 irq_base, domain); 299 if (ret) 300 goto fail; 301 } 302 303 return 0; 304 305 fail: 306 if (i) 307 mfd_remove_devices(parent); 308 else 309 kfree(cnts); 310 return ret; 311 } 312 EXPORT_SYMBOL(mfd_add_devices); 313 314 static int mfd_remove_devices_fn(struct device *dev, void *c) 315 { 316 struct platform_device *pdev; 317 const struct mfd_cell *cell; 318 atomic_t **usage_count = c; 319 320 if (dev->type != &mfd_dev_type) 321 return 0; 322 323 pdev = to_platform_device(dev); 324 cell = mfd_get_cell(pdev); 325 326 regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies, 327 cell->num_parent_supplies); 328 329 /* find the base address of usage_count pointers (for freeing) */ 330 if (!*usage_count || (cell->usage_count < *usage_count)) 331 *usage_count = cell->usage_count; 332 333 platform_device_unregister(pdev); 334 return 0; 335 } 336 337 void mfd_remove_devices(struct device *parent) 338 { 339 atomic_t *cnts = NULL; 340 341 device_for_each_child_reverse(parent, &cnts, mfd_remove_devices_fn); 342 kfree(cnts); 343 } 344 EXPORT_SYMBOL(mfd_remove_devices); 345 346 static void devm_mfd_dev_release(struct device *dev, void *res) 347 { 348 mfd_remove_devices(dev); 349 } 350 351 /** 352 * devm_mfd_add_devices - Resource managed version of mfd_add_devices() 353 * 354 * Returns 0 on success or an appropriate negative error number on failure. 355 * All child-devices of the MFD will automatically be removed when it gets 356 * unbinded. 357 */ 358 int devm_mfd_add_devices(struct device *dev, int id, 359 const struct mfd_cell *cells, int n_devs, 360 struct resource *mem_base, 361 int irq_base, struct irq_domain *domain) 362 { 363 struct device **ptr; 364 int ret; 365 366 ptr = devres_alloc(devm_mfd_dev_release, sizeof(*ptr), GFP_KERNEL); 367 if (!ptr) 368 return -ENOMEM; 369 370 ret = mfd_add_devices(dev, id, cells, n_devs, mem_base, 371 irq_base, domain); 372 if (ret < 0) { 373 devres_free(ptr); 374 return ret; 375 } 376 377 *ptr = dev; 378 devres_add(dev, ptr); 379 380 return ret; 381 } 382 EXPORT_SYMBOL(devm_mfd_add_devices); 383 384 int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones) 385 { 386 struct mfd_cell cell_entry; 387 struct device *dev; 388 struct platform_device *pdev; 389 int i; 390 391 /* fetch the parent cell's device (should already be registered!) */ 392 dev = bus_find_device_by_name(&platform_bus_type, NULL, cell); 393 if (!dev) { 394 printk(KERN_ERR "failed to find device for cell %s\n", cell); 395 return -ENODEV; 396 } 397 pdev = to_platform_device(dev); 398 memcpy(&cell_entry, mfd_get_cell(pdev), sizeof(cell_entry)); 399 400 WARN_ON(!cell_entry.enable); 401 402 for (i = 0; i < n_clones; i++) { 403 cell_entry.name = clones[i]; 404 /* don't give up if a single call fails; just report error */ 405 if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, 406 cell_entry.usage_count, NULL, 0, NULL)) 407 dev_err(dev, "failed to create platform device '%s'\n", 408 clones[i]); 409 } 410 411 put_device(dev); 412 413 return 0; 414 } 415 EXPORT_SYMBOL(mfd_clone_cell); 416 417 MODULE_LICENSE("GPL"); 418 MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov"); 419