1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * MSI framework for platform devices 4 * 5 * Copyright (C) 2015 ARM Limited, All Rights Reserved. 6 * Author: Marc Zyngier <marc.zyngier@arm.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/idr.h> 11 #include <linux/irq.h> 12 #include <linux/irqdomain.h> 13 #include <linux/msi.h> 14 #include <linux/slab.h> 15 16 #define DEV_ID_SHIFT 21 17 #define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT)) 18 19 /* 20 * Internal data structure containing a (made up, but unique) devid 21 * and the callback to write the MSI message. 22 */ 23 struct platform_msi_priv_data { 24 struct device *dev; 25 void *host_data; 26 msi_alloc_info_t arg; 27 irq_write_msi_msg_t write_msg; 28 int devid; 29 }; 30 31 /* The devid allocator */ 32 static DEFINE_IDA(platform_msi_devid_ida); 33 34 #ifdef GENERIC_MSI_DOMAIN_OPS 35 /* 36 * Convert an msi_desc to a globaly unique identifier (per-device 37 * devid + msi_desc position in the msi_list). 38 */ 39 static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) 40 { 41 u32 devid; 42 43 devid = desc->platform.msi_priv_data->devid; 44 45 return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index; 46 } 47 48 static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) 49 { 50 arg->desc = desc; 51 arg->hwirq = platform_msi_calc_hwirq(desc); 52 } 53 54 static int platform_msi_init(struct irq_domain *domain, 55 struct msi_domain_info *info, 56 unsigned int virq, irq_hw_number_t hwirq, 57 msi_alloc_info_t *arg) 58 { 59 return irq_domain_set_hwirq_and_chip(domain, virq, hwirq, 60 info->chip, info->chip_data); 61 } 62 63 static void platform_msi_set_proxy_dev(msi_alloc_info_t *arg) 64 { 65 arg->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE; 66 } 67 #else 68 #define platform_msi_set_desc NULL 69 #define platform_msi_init NULL 70 #define platform_msi_set_proxy_dev(x) do {} while(0) 71 #endif 72 73 static void platform_msi_update_dom_ops(struct msi_domain_info *info) 74 { 75 struct msi_domain_ops *ops = info->ops; 76 77 BUG_ON(!ops); 78 79 if (ops->msi_init == NULL) 80 ops->msi_init = platform_msi_init; 81 if (ops->set_desc == NULL) 82 ops->set_desc = platform_msi_set_desc; 83 } 84 85 static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg) 86 { 87 struct msi_desc *desc = irq_data_get_msi_desc(data); 88 struct platform_msi_priv_data *priv_data; 89 90 priv_data = desc->platform.msi_priv_data; 91 92 priv_data->write_msg(desc, msg); 93 } 94 95 static void platform_msi_update_chip_ops(struct msi_domain_info *info) 96 { 97 struct irq_chip *chip = info->chip; 98 99 BUG_ON(!chip); 100 if (!chip->irq_mask) 101 chip->irq_mask = irq_chip_mask_parent; 102 if (!chip->irq_unmask) 103 chip->irq_unmask = irq_chip_unmask_parent; 104 if (!chip->irq_eoi) 105 chip->irq_eoi = irq_chip_eoi_parent; 106 if (!chip->irq_set_affinity) 107 chip->irq_set_affinity = msi_domain_set_affinity; 108 if (!chip->irq_write_msi_msg) 109 chip->irq_write_msi_msg = platform_msi_write_msg; 110 if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) && 111 !(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI))) 112 info->flags &= ~MSI_FLAG_LEVEL_CAPABLE; 113 } 114 115 static void platform_msi_free_descs(struct device *dev, int base, int nvec) 116 { 117 struct msi_desc *desc, *tmp; 118 119 list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { 120 if (desc->platform.msi_index >= base && 121 desc->platform.msi_index < (base + nvec)) { 122 list_del(&desc->list); 123 free_msi_entry(desc); 124 } 125 } 126 } 127 128 static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq, 129 int nvec, 130 struct platform_msi_priv_data *data) 131 132 { 133 struct msi_desc *desc; 134 int i, base = 0; 135 136 if (!list_empty(dev_to_msi_list(dev))) { 137 desc = list_last_entry(dev_to_msi_list(dev), 138 struct msi_desc, list); 139 base = desc->platform.msi_index + 1; 140 } 141 142 for (i = 0; i < nvec; i++) { 143 desc = alloc_msi_entry(dev, 1, NULL); 144 if (!desc) 145 break; 146 147 desc->platform.msi_priv_data = data; 148 desc->platform.msi_index = base + i; 149 desc->irq = virq ? virq + i : 0; 150 151 list_add_tail(&desc->list, dev_to_msi_list(dev)); 152 } 153 154 if (i != nvec) { 155 /* Clean up the mess */ 156 platform_msi_free_descs(dev, base, nvec); 157 158 return -ENOMEM; 159 } 160 161 return 0; 162 } 163 164 static int platform_msi_alloc_descs(struct device *dev, int nvec, 165 struct platform_msi_priv_data *data) 166 167 { 168 return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data); 169 } 170 171 /** 172 * platform_msi_create_irq_domain - Create a platform MSI interrupt domain 173 * @fwnode: Optional fwnode of the interrupt controller 174 * @info: MSI domain info 175 * @parent: Parent irq domain 176 * 177 * Updates the domain and chip ops and creates a platform MSI 178 * interrupt domain. 179 * 180 * Returns: 181 * A domain pointer or NULL in case of failure. 182 */ 183 struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, 184 struct msi_domain_info *info, 185 struct irq_domain *parent) 186 { 187 struct irq_domain *domain; 188 189 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) 190 platform_msi_update_dom_ops(info); 191 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 192 platform_msi_update_chip_ops(info); 193 194 domain = msi_create_irq_domain(fwnode, info, parent); 195 if (domain) 196 irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI); 197 198 return domain; 199 } 200 201 static struct platform_msi_priv_data * 202 platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec, 203 irq_write_msi_msg_t write_msi_msg) 204 { 205 struct platform_msi_priv_data *datap; 206 /* 207 * Limit the number of interrupts to 2048 per device. Should we 208 * need to bump this up, DEV_ID_SHIFT should be adjusted 209 * accordingly (which would impact the max number of MSI 210 * capable devices). 211 */ 212 if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS) 213 return ERR_PTR(-EINVAL); 214 215 if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) { 216 dev_err(dev, "Incompatible msi_domain, giving up\n"); 217 return ERR_PTR(-EINVAL); 218 } 219 220 /* Already had a helping of MSI? Greed... */ 221 if (!list_empty(dev_to_msi_list(dev))) 222 return ERR_PTR(-EBUSY); 223 224 datap = kzalloc(sizeof(*datap), GFP_KERNEL); 225 if (!datap) 226 return ERR_PTR(-ENOMEM); 227 228 datap->devid = ida_simple_get(&platform_msi_devid_ida, 229 0, 1 << DEV_ID_SHIFT, GFP_KERNEL); 230 if (datap->devid < 0) { 231 int err = datap->devid; 232 kfree(datap); 233 return ERR_PTR(err); 234 } 235 236 datap->write_msg = write_msi_msg; 237 datap->dev = dev; 238 239 return datap; 240 } 241 242 static void platform_msi_free_priv_data(struct platform_msi_priv_data *data) 243 { 244 ida_simple_remove(&platform_msi_devid_ida, data->devid); 245 kfree(data); 246 } 247 248 /** 249 * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev 250 * @dev: The device for which to allocate interrupts 251 * @nvec: The number of interrupts to allocate 252 * @write_msi_msg: Callback to write an interrupt message for @dev 253 * 254 * Returns: 255 * Zero for success, or an error code in case of failure 256 */ 257 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, 258 irq_write_msi_msg_t write_msi_msg) 259 { 260 struct platform_msi_priv_data *priv_data; 261 int err; 262 263 priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); 264 if (IS_ERR(priv_data)) 265 return PTR_ERR(priv_data); 266 267 err = platform_msi_alloc_descs(dev, nvec, priv_data); 268 if (err) 269 goto out_free_priv_data; 270 271 err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec); 272 if (err) 273 goto out_free_desc; 274 275 return 0; 276 277 out_free_desc: 278 platform_msi_free_descs(dev, 0, nvec); 279 out_free_priv_data: 280 platform_msi_free_priv_data(priv_data); 281 282 return err; 283 } 284 EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs); 285 286 /** 287 * platform_msi_domain_free_irqs - Free MSI interrupts for @dev 288 * @dev: The device for which to free interrupts 289 */ 290 void platform_msi_domain_free_irqs(struct device *dev) 291 { 292 if (!list_empty(dev_to_msi_list(dev))) { 293 struct msi_desc *desc; 294 295 desc = first_msi_entry(dev); 296 platform_msi_free_priv_data(desc->platform.msi_priv_data); 297 } 298 299 msi_domain_free_irqs(dev->msi_domain, dev); 300 platform_msi_free_descs(dev, 0, MAX_DEV_MSIS); 301 } 302 EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs); 303 304 /** 305 * platform_msi_get_host_data - Query the private data associated with 306 * a platform-msi domain 307 * @domain: The platform-msi domain 308 * 309 * Returns the private data provided when calling 310 * platform_msi_create_device_domain. 311 */ 312 void *platform_msi_get_host_data(struct irq_domain *domain) 313 { 314 struct platform_msi_priv_data *data = domain->host_data; 315 return data->host_data; 316 } 317 318 /** 319 * __platform_msi_create_device_domain - Create a platform-msi domain 320 * 321 * @dev: The device generating the MSIs 322 * @nvec: The number of MSIs that need to be allocated 323 * @is_tree: flag to indicate tree hierarchy 324 * @write_msi_msg: Callback to write an interrupt message for @dev 325 * @ops: The hierarchy domain operations to use 326 * @host_data: Private data associated to this domain 327 * 328 * Returns an irqdomain for @nvec interrupts 329 */ 330 struct irq_domain * 331 __platform_msi_create_device_domain(struct device *dev, 332 unsigned int nvec, 333 bool is_tree, 334 irq_write_msi_msg_t write_msi_msg, 335 const struct irq_domain_ops *ops, 336 void *host_data) 337 { 338 struct platform_msi_priv_data *data; 339 struct irq_domain *domain; 340 int err; 341 342 data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); 343 if (IS_ERR(data)) 344 return NULL; 345 346 data->host_data = host_data; 347 domain = irq_domain_create_hierarchy(dev->msi_domain, 0, 348 is_tree ? 0 : nvec, 349 dev->fwnode, ops, data); 350 if (!domain) 351 goto free_priv; 352 353 platform_msi_set_proxy_dev(&data->arg); 354 err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg); 355 if (err) 356 goto free_domain; 357 358 return domain; 359 360 free_domain: 361 irq_domain_remove(domain); 362 free_priv: 363 platform_msi_free_priv_data(data); 364 return NULL; 365 } 366 367 /** 368 * platform_msi_domain_free - Free interrupts associated with a platform-msi 369 * domain 370 * 371 * @domain: The platform-msi domain 372 * @virq: The base irq from which to perform the free operation 373 * @nvec: How many interrupts to free from @virq 374 */ 375 void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, 376 unsigned int nvec) 377 { 378 struct platform_msi_priv_data *data = domain->host_data; 379 struct msi_desc *desc, *tmp; 380 for_each_msi_entry_safe(desc, tmp, data->dev) { 381 if (WARN_ON(!desc->irq || desc->nvec_used != 1)) 382 return; 383 if (!(desc->irq >= virq && desc->irq < (virq + nvec))) 384 continue; 385 386 irq_domain_free_irqs_common(domain, desc->irq, 1); 387 list_del(&desc->list); 388 free_msi_entry(desc); 389 } 390 } 391 392 /** 393 * platform_msi_domain_alloc - Allocate interrupts associated with 394 * a platform-msi domain 395 * 396 * @domain: The platform-msi domain 397 * @virq: The base irq from which to perform the allocate operation 398 * @nr_irqs: How many interrupts to free from @virq 399 * 400 * Return 0 on success, or an error code on failure. Must be called 401 * with irq_domain_mutex held (which can only be done as part of a 402 * top-level interrupt allocation). 403 */ 404 int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, 405 unsigned int nr_irqs) 406 { 407 struct platform_msi_priv_data *data = domain->host_data; 408 int err; 409 410 err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data); 411 if (err) 412 return err; 413 414 err = msi_domain_populate_irqs(domain->parent, data->dev, 415 virq, nr_irqs, &data->arg); 416 if (err) 417 platform_msi_domain_free(domain, virq, nr_irqs); 418 419 return err; 420 } 421