1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * drivers/mfd/mfd-core.c 4 * 5 * core MFD support 6 * Copyright (c) 2006 Ian Molton 7 * Copyright (c) 2007,2008 Dmitry Baryshkov 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/platform_device.h> 12 #include <linux/acpi.h> 13 #include <linux/property.h> 14 #include <linux/mfd/core.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/slab.h> 17 #include <linux/module.h> 18 #include <linux/irqdomain.h> 19 #include <linux/of.h> 20 #include <linux/regulator/consumer.h> 21 22 static struct device_type mfd_dev_type = { 23 .name = "mfd_device", 24 }; 25 26 int mfd_cell_enable(struct platform_device *pdev) 27 { 28 const struct mfd_cell *cell = mfd_get_cell(pdev); 29 30 if (!cell->enable) { 31 dev_dbg(&pdev->dev, "No .enable() call-back registered\n"); 32 return 0; 33 } 34 35 return cell->enable(pdev); 36 } 37 EXPORT_SYMBOL(mfd_cell_enable); 38 39 int mfd_cell_disable(struct platform_device *pdev) 40 { 41 const struct mfd_cell *cell = mfd_get_cell(pdev); 42 43 if (!cell->disable) { 44 dev_dbg(&pdev->dev, "No .disable() call-back registered\n"); 45 return 0; 46 } 47 48 return cell->disable(pdev); 49 } 50 EXPORT_SYMBOL(mfd_cell_disable); 51 52 #if IS_ENABLED(CONFIG_ACPI) 53 static void mfd_acpi_add_device(const struct mfd_cell *cell, 54 struct platform_device *pdev) 55 { 56 const struct mfd_cell_acpi_match *match = cell->acpi_match; 57 struct acpi_device *parent, *child; 58 struct acpi_device *adev; 59 60 parent = ACPI_COMPANION(pdev->dev.parent); 61 if (!parent) 62 return; 63 64 /* 65 * MFD child device gets its ACPI handle either from the ACPI device 66 * directly under the parent that matches the either _HID or _CID, or 67 * _ADR or it will use the parent handle if is no ID is given. 68 * 69 * Note that use of _ADR is a grey area in the ACPI specification, 70 * though Intel Galileo Gen2 is using it to distinguish the children 71 * devices. 72 */ 73 adev = parent; 74 if (match) { 75 if (match->pnpid) { 76 struct acpi_device_id ids[2] = {}; 77 78 strlcpy(ids[0].id, match->pnpid, sizeof(ids[0].id)); 79 list_for_each_entry(child, &parent->children, node) { 80 if (!acpi_match_device_ids(child, ids)) { 81 adev = child; 82 break; 83 } 84 } 85 } else { 86 unsigned long long adr; 87 acpi_status status; 88 89 list_for_each_entry(child, &parent->children, node) { 90 status = acpi_evaluate_integer(child->handle, 91 "_ADR", NULL, 92 &adr); 93 if (ACPI_SUCCESS(status) && match->adr == adr) { 94 adev = child; 95 break; 96 } 97 } 98 } 99 } 100 101 ACPI_COMPANION_SET(&pdev->dev, adev); 102 } 103 #else 104 static inline void mfd_acpi_add_device(const struct mfd_cell *cell, 105 struct platform_device *pdev) 106 { 107 } 108 #endif 109 110 static int mfd_add_device(struct device *parent, int id, 111 const struct mfd_cell *cell, 112 struct resource *mem_base, 113 int irq_base, struct irq_domain *domain) 114 { 115 struct resource *res; 116 struct platform_device *pdev; 117 struct device_node *np = NULL; 118 int ret = -ENOMEM; 119 int platform_id; 120 int r; 121 122 if (id == PLATFORM_DEVID_AUTO) 123 platform_id = id; 124 else 125 platform_id = id + cell->id; 126 127 pdev = platform_device_alloc(cell->name, platform_id); 128 if (!pdev) 129 goto fail_alloc; 130 131 pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL); 132 if (!pdev->mfd_cell) 133 goto fail_device; 134 135 res = kcalloc(cell->num_resources, sizeof(*res), GFP_KERNEL); 136 if (!res) 137 goto fail_device; 138 139 pdev->dev.parent = parent; 140 pdev->dev.type = &mfd_dev_type; 141 pdev->dev.dma_mask = parent->dma_mask; 142 pdev->dev.dma_parms = parent->dma_parms; 143 pdev->dev.coherent_dma_mask = parent->coherent_dma_mask; 144 145 ret = regulator_bulk_register_supply_alias( 146 &pdev->dev, cell->parent_supplies, 147 parent, cell->parent_supplies, 148 cell->num_parent_supplies); 149 if (ret < 0) 150 goto fail_res; 151 152 if (parent->of_node && cell->of_compatible) { 153 for_each_child_of_node(parent->of_node, np) { 154 if (of_device_is_compatible(np, cell->of_compatible)) { 155 if (!of_device_is_available(np)) { 156 /* Ignore disabled devices error free */ 157 ret = 0; 158 goto fail_alias; 159 } 160 pdev->dev.of_node = np; 161 pdev->dev.fwnode = &np->fwnode; 162 break; 163 } 164 } 165 } 166 167 mfd_acpi_add_device(cell, pdev); 168 169 if (cell->pdata_size) { 170 ret = platform_device_add_data(pdev, 171 cell->platform_data, cell->pdata_size); 172 if (ret) 173 goto fail_alias; 174 } 175 176 if (cell->properties) { 177 ret = platform_device_add_properties(pdev, cell->properties); 178 if (ret) 179 goto fail_alias; 180 } 181 182 for (r = 0; r < cell->num_resources; r++) { 183 res[r].name = cell->resources[r].name; 184 res[r].flags = cell->resources[r].flags; 185 186 /* Find out base to use */ 187 if ((cell->resources[r].flags & IORESOURCE_MEM) && mem_base) { 188 res[r].parent = mem_base; 189 res[r].start = mem_base->start + 190 cell->resources[r].start; 191 res[r].end = mem_base->start + 192 cell->resources[r].end; 193 } else if (cell->resources[r].flags & IORESOURCE_IRQ) { 194 if (domain) { 195 /* Unable to create mappings for IRQ ranges. */ 196 WARN_ON(cell->resources[r].start != 197 cell->resources[r].end); 198 res[r].start = res[r].end = irq_create_mapping( 199 domain, cell->resources[r].start); 200 } else { 201 res[r].start = irq_base + 202 cell->resources[r].start; 203 res[r].end = irq_base + 204 cell->resources[r].end; 205 } 206 } else { 207 res[r].parent = cell->resources[r].parent; 208 res[r].start = cell->resources[r].start; 209 res[r].end = cell->resources[r].end; 210 } 211 212 if (!cell->ignore_resource_conflicts) { 213 if (has_acpi_companion(&pdev->dev)) { 214 ret = acpi_check_resource_conflict(&res[r]); 215 if (ret) 216 goto fail_alias; 217 } 218 } 219 } 220 221 ret = platform_device_add_resources(pdev, res, cell->num_resources); 222 if (ret) 223 goto fail_alias; 224 225 ret = platform_device_add(pdev); 226 if (ret) 227 goto fail_alias; 228 229 if (cell->pm_runtime_no_callbacks) 230 pm_runtime_no_callbacks(&pdev->dev); 231 232 kfree(res); 233 234 return 0; 235 236 fail_alias: 237 regulator_bulk_unregister_supply_alias(&pdev->dev, 238 cell->parent_supplies, 239 cell->num_parent_supplies); 240 fail_res: 241 kfree(res); 242 fail_device: 243 platform_device_put(pdev); 244 fail_alloc: 245 return ret; 246 } 247 248 /** 249 * mfd_add_devices - register child devices 250 * 251 * @parent: Pointer to parent device. 252 * @id: Can be PLATFORM_DEVID_AUTO to let the Platform API take care 253 * of device numbering, or will be added to a device's cell_id. 254 * @cells: Array of (struct mfd_cell)s describing child devices. 255 * @n_devs: Number of child devices to register. 256 * @mem_base: Parent register range resource for child devices. 257 * @irq_base: Base of the range of virtual interrupt numbers allocated for 258 * this MFD device. Unused if @domain is specified. 259 * @domain: Interrupt domain to create mappings for hardware interrupts. 260 */ 261 int mfd_add_devices(struct device *parent, int id, 262 const struct mfd_cell *cells, int n_devs, 263 struct resource *mem_base, 264 int irq_base, struct irq_domain *domain) 265 { 266 int i; 267 int ret; 268 269 for (i = 0; i < n_devs; i++) { 270 ret = mfd_add_device(parent, id, cells + i, mem_base, 271 irq_base, domain); 272 if (ret) 273 goto fail; 274 } 275 276 return 0; 277 278 fail: 279 if (i) 280 mfd_remove_devices(parent); 281 282 return ret; 283 } 284 EXPORT_SYMBOL(mfd_add_devices); 285 286 static int mfd_remove_devices_fn(struct device *dev, void *data) 287 { 288 struct platform_device *pdev; 289 const struct mfd_cell *cell; 290 291 if (dev->type != &mfd_dev_type) 292 return 0; 293 294 pdev = to_platform_device(dev); 295 cell = mfd_get_cell(pdev); 296 297 regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies, 298 cell->num_parent_supplies); 299 300 platform_device_unregister(pdev); 301 return 0; 302 } 303 304 void mfd_remove_devices(struct device *parent) 305 { 306 device_for_each_child_reverse(parent, NULL, mfd_remove_devices_fn); 307 } 308 EXPORT_SYMBOL(mfd_remove_devices); 309 310 static void devm_mfd_dev_release(struct device *dev, void *res) 311 { 312 mfd_remove_devices(dev); 313 } 314 315 /** 316 * devm_mfd_add_devices - Resource managed version of mfd_add_devices() 317 * 318 * Returns 0 on success or an appropriate negative error number on failure. 319 * All child-devices of the MFD will automatically be removed when it gets 320 * unbinded. 321 */ 322 int devm_mfd_add_devices(struct device *dev, int id, 323 const struct mfd_cell *cells, int n_devs, 324 struct resource *mem_base, 325 int irq_base, struct irq_domain *domain) 326 { 327 struct device **ptr; 328 int ret; 329 330 ptr = devres_alloc(devm_mfd_dev_release, sizeof(*ptr), GFP_KERNEL); 331 if (!ptr) 332 return -ENOMEM; 333 334 ret = mfd_add_devices(dev, id, cells, n_devs, mem_base, 335 irq_base, domain); 336 if (ret < 0) { 337 devres_free(ptr); 338 return ret; 339 } 340 341 *ptr = dev; 342 devres_add(dev, ptr); 343 344 return ret; 345 } 346 EXPORT_SYMBOL(devm_mfd_add_devices); 347 348 MODULE_LICENSE("GPL"); 349 MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov"); 350