1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2014 Intel Corp. 4 * Author: Jiang Liu <jiang.liu@linux.intel.com> 5 * 6 * This file is licensed under GPLv2. 7 * 8 * This file contains common code to support Message Signalled Interrupt for 9 * PCI compatible and non PCI compatible devices. 10 */ 11 #include <linux/types.h> 12 #include <linux/device.h> 13 #include <linux/irq.h> 14 #include <linux/irqdomain.h> 15 #include <linux/msi.h> 16 #include <linux/slab.h> 17 18 #include "internals.h" 19 20 /** 21 * alloc_msi_entry - Allocate an initialize msi_entry 22 * @dev: Pointer to the device for which this is allocated 23 * @nvec: The number of vectors used in this entry 24 * @affinity: Optional pointer to an affinity mask array size of @nvec 25 * 26 * If @affinity is not NULL then an affinity array[@nvec] is allocated 27 * and the affinity masks and flags from @affinity are copied. 28 */ 29 struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, 30 const struct irq_affinity_desc *affinity) 31 { 32 struct msi_desc *desc; 33 34 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 35 if (!desc) 36 return NULL; 37 38 INIT_LIST_HEAD(&desc->list); 39 desc->dev = dev; 40 desc->nvec_used = nvec; 41 if (affinity) { 42 desc->affinity = kmemdup(affinity, 43 nvec * sizeof(*desc->affinity), GFP_KERNEL); 44 if (!desc->affinity) { 45 kfree(desc); 46 return NULL; 47 } 48 } 49 50 return desc; 51 } 52 53 void free_msi_entry(struct msi_desc *entry) 54 { 55 kfree(entry->affinity); 56 kfree(entry); 57 } 58 59 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 60 { 61 *msg = entry->msg; 62 } 63 64 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) 65 { 66 struct msi_desc *entry = irq_get_msi_desc(irq); 67 68 __get_cached_msi_msg(entry, msg); 69 } 70 EXPORT_SYMBOL_GPL(get_cached_msi_msg); 71 72 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 73 static inline void irq_chip_write_msi_msg(struct irq_data *data, 74 struct msi_msg *msg) 75 { 76 data->chip->irq_write_msi_msg(data, msg); 77 } 78 79 static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg) 80 { 81 struct msi_domain_info *info = domain->host_data; 82 83 /* 84 * If the MSI provider has messed with the second message and 85 * not advertized that it is level-capable, signal the breakage. 86 */ 87 WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) && 88 (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) && 89 (msg[1].address_lo || msg[1].address_hi || msg[1].data)); 90 } 91 92 /** 93 * msi_domain_set_affinity - Generic affinity setter function for MSI domains 94 * @irq_data: The irq data associated to the interrupt 95 * @mask: The affinity mask to set 96 * @force: Flag to enforce setting (disable online checks) 97 * 98 * Intended to be used by MSI interrupt controllers which are 99 * implemented with hierarchical domains. 100 */ 101 int msi_domain_set_affinity(struct irq_data *irq_data, 102 const struct cpumask *mask, bool force) 103 { 104 struct irq_data *parent = irq_data->parent_data; 105 struct msi_msg msg[2] = { [1] = { }, }; 106 int ret; 107 108 ret = parent->chip->irq_set_affinity(parent, mask, force); 109 if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) { 110 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg)); 111 msi_check_level(irq_data->domain, msg); 112 irq_chip_write_msi_msg(irq_data, msg); 113 } 114 115 return ret; 116 } 117 118 static int msi_domain_activate(struct irq_domain *domain, 119 struct irq_data *irq_data, bool early) 120 { 121 struct msi_msg msg[2] = { [1] = { }, }; 122 123 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg)); 124 msi_check_level(irq_data->domain, msg); 125 irq_chip_write_msi_msg(irq_data, msg); 126 return 0; 127 } 128 129 static void msi_domain_deactivate(struct irq_domain *domain, 130 struct irq_data *irq_data) 131 { 132 struct msi_msg msg[2]; 133 134 memset(msg, 0, sizeof(msg)); 135 irq_chip_write_msi_msg(irq_data, msg); 136 } 137 138 static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, 139 unsigned int nr_irqs, void *arg) 140 { 141 struct msi_domain_info *info = domain->host_data; 142 struct msi_domain_ops *ops = info->ops; 143 irq_hw_number_t hwirq = ops->get_hwirq(info, arg); 144 int i, ret; 145 146 if (irq_find_mapping(domain, hwirq) > 0) 147 return -EEXIST; 148 149 if (domain->parent) { 150 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 151 if (ret < 0) 152 return ret; 153 } 154 155 for (i = 0; i < nr_irqs; i++) { 156 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); 157 if (ret < 0) { 158 if (ops->msi_free) { 159 for (i--; i > 0; i--) 160 ops->msi_free(domain, info, virq + i); 161 } 162 irq_domain_free_irqs_top(domain, virq, nr_irqs); 163 return ret; 164 } 165 } 166 167 return 0; 168 } 169 170 static void msi_domain_free(struct irq_domain *domain, unsigned int virq, 171 unsigned int nr_irqs) 172 { 173 struct msi_domain_info *info = domain->host_data; 174 int i; 175 176 if (info->ops->msi_free) { 177 for (i = 0; i < nr_irqs; i++) 178 info->ops->msi_free(domain, info, virq + i); 179 } 180 irq_domain_free_irqs_top(domain, virq, nr_irqs); 181 } 182 183 static const struct irq_domain_ops msi_domain_ops = { 184 .alloc = msi_domain_alloc, 185 .free = msi_domain_free, 186 .activate = msi_domain_activate, 187 .deactivate = msi_domain_deactivate, 188 }; 189 190 #ifdef GENERIC_MSI_DOMAIN_OPS 191 static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, 192 msi_alloc_info_t *arg) 193 { 194 return arg->hwirq; 195 } 196 197 static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, 198 int nvec, msi_alloc_info_t *arg) 199 { 200 memset(arg, 0, sizeof(*arg)); 201 return 0; 202 } 203 204 static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, 205 struct msi_desc *desc) 206 { 207 arg->desc = desc; 208 } 209 #else 210 #define msi_domain_ops_get_hwirq NULL 211 #define msi_domain_ops_prepare NULL 212 #define msi_domain_ops_set_desc NULL 213 #endif /* !GENERIC_MSI_DOMAIN_OPS */ 214 215 static int msi_domain_ops_init(struct irq_domain *domain, 216 struct msi_domain_info *info, 217 unsigned int virq, irq_hw_number_t hwirq, 218 msi_alloc_info_t *arg) 219 { 220 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, 221 info->chip_data); 222 if (info->handler && info->handler_name) { 223 __irq_set_handler(virq, info->handler, 0, info->handler_name); 224 if (info->handler_data) 225 irq_set_handler_data(virq, info->handler_data); 226 } 227 return 0; 228 } 229 230 static int msi_domain_ops_check(struct irq_domain *domain, 231 struct msi_domain_info *info, 232 struct device *dev) 233 { 234 return 0; 235 } 236 237 static struct msi_domain_ops msi_domain_ops_default = { 238 .get_hwirq = msi_domain_ops_get_hwirq, 239 .msi_init = msi_domain_ops_init, 240 .msi_check = msi_domain_ops_check, 241 .msi_prepare = msi_domain_ops_prepare, 242 .set_desc = msi_domain_ops_set_desc, 243 }; 244 245 static void msi_domain_update_dom_ops(struct msi_domain_info *info) 246 { 247 struct msi_domain_ops *ops = info->ops; 248 249 if (ops == NULL) { 250 info->ops = &msi_domain_ops_default; 251 return; 252 } 253 254 if (ops->get_hwirq == NULL) 255 ops->get_hwirq = msi_domain_ops_default.get_hwirq; 256 if (ops->msi_init == NULL) 257 ops->msi_init = msi_domain_ops_default.msi_init; 258 if (ops->msi_check == NULL) 259 ops->msi_check = msi_domain_ops_default.msi_check; 260 if (ops->msi_prepare == NULL) 261 ops->msi_prepare = msi_domain_ops_default.msi_prepare; 262 if (ops->set_desc == NULL) 263 ops->set_desc = msi_domain_ops_default.set_desc; 264 } 265 266 static void msi_domain_update_chip_ops(struct msi_domain_info *info) 267 { 268 struct irq_chip *chip = info->chip; 269 270 BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); 271 if (!chip->irq_set_affinity) 272 chip->irq_set_affinity = msi_domain_set_affinity; 273 } 274 275 /** 276 * msi_create_irq_domain - Create a MSI interrupt domain 277 * @fwnode: Optional fwnode of the interrupt controller 278 * @info: MSI domain info 279 * @parent: Parent irq domain 280 */ 281 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, 282 struct msi_domain_info *info, 283 struct irq_domain *parent) 284 { 285 struct irq_domain *domain; 286 287 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) 288 msi_domain_update_dom_ops(info); 289 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 290 msi_domain_update_chip_ops(info); 291 292 domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0, 293 fwnode, &msi_domain_ops, info); 294 295 if (domain && !domain->name && info->chip) 296 domain->name = info->chip->name; 297 298 return domain; 299 } 300 301 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, 302 int nvec, msi_alloc_info_t *arg) 303 { 304 struct msi_domain_info *info = domain->host_data; 305 struct msi_domain_ops *ops = info->ops; 306 int ret; 307 308 ret = ops->msi_check(domain, info, dev); 309 if (ret == 0) 310 ret = ops->msi_prepare(domain, dev, nvec, arg); 311 312 return ret; 313 } 314 315 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, 316 int virq, int nvec, msi_alloc_info_t *arg) 317 { 318 struct msi_domain_info *info = domain->host_data; 319 struct msi_domain_ops *ops = info->ops; 320 struct msi_desc *desc; 321 int ret = 0; 322 323 for_each_msi_entry(desc, dev) { 324 /* Don't even try the multi-MSI brain damage. */ 325 if (WARN_ON(!desc->irq || desc->nvec_used != 1)) { 326 ret = -EINVAL; 327 break; 328 } 329 330 if (!(desc->irq >= virq && desc->irq < (virq + nvec))) 331 continue; 332 333 ops->set_desc(arg, desc); 334 /* Assumes the domain mutex is held! */ 335 ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1, 336 arg); 337 if (ret) 338 break; 339 340 irq_set_msi_desc_off(desc->irq, 0, desc); 341 } 342 343 if (ret) { 344 /* Mop up the damage */ 345 for_each_msi_entry(desc, dev) { 346 if (!(desc->irq >= virq && desc->irq < (virq + nvec))) 347 continue; 348 349 irq_domain_free_irqs_common(domain, desc->irq, 1); 350 } 351 } 352 353 return ret; 354 } 355 356 /* 357 * Carefully check whether the device can use reservation mode. If 358 * reservation mode is enabled then the early activation will assign a 359 * dummy vector to the device. If the PCI/MSI device does not support 360 * masking of the entry then this can result in spurious interrupts when 361 * the device driver is not absolutely careful. But even then a malfunction 362 * of the hardware could result in a spurious interrupt on the dummy vector 363 * and render the device unusable. If the entry can be masked then the core 364 * logic will prevent the spurious interrupt and reservation mode can be 365 * used. For now reservation mode is restricted to PCI/MSI. 366 */ 367 static bool msi_check_reservation_mode(struct irq_domain *domain, 368 struct msi_domain_info *info, 369 struct device *dev) 370 { 371 struct msi_desc *desc; 372 373 if (domain->bus_token != DOMAIN_BUS_PCI_MSI) 374 return false; 375 376 if (!(info->flags & MSI_FLAG_MUST_REACTIVATE)) 377 return false; 378 379 if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask) 380 return false; 381 382 /* 383 * Checking the first MSI descriptor is sufficient. MSIX supports 384 * masking and MSI does so when the maskbit is set. 385 */ 386 desc = first_msi_entry(dev); 387 return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit; 388 } 389 390 /** 391 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain 392 * @domain: The domain to allocate from 393 * @dev: Pointer to device struct of the device for which the interrupts 394 * are allocated 395 * @nvec: The number of interrupts to allocate 396 * 397 * Returns 0 on success or an error code. 398 */ 399 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, 400 int nvec) 401 { 402 struct msi_domain_info *info = domain->host_data; 403 struct msi_domain_ops *ops = info->ops; 404 struct irq_data *irq_data; 405 struct msi_desc *desc; 406 msi_alloc_info_t arg; 407 int i, ret, virq; 408 bool can_reserve; 409 410 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); 411 if (ret) 412 return ret; 413 414 for_each_msi_entry(desc, dev) { 415 ops->set_desc(&arg, desc); 416 417 virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used, 418 dev_to_node(dev), &arg, false, 419 desc->affinity); 420 if (virq < 0) { 421 ret = -ENOSPC; 422 if (ops->handle_error) 423 ret = ops->handle_error(domain, desc, ret); 424 if (ops->msi_finish) 425 ops->msi_finish(&arg, ret); 426 return ret; 427 } 428 429 for (i = 0; i < desc->nvec_used; i++) { 430 irq_set_msi_desc_off(virq, i, desc); 431 irq_debugfs_copy_devname(virq + i, dev); 432 } 433 } 434 435 if (ops->msi_finish) 436 ops->msi_finish(&arg, 0); 437 438 can_reserve = msi_check_reservation_mode(domain, info, dev); 439 440 for_each_msi_entry(desc, dev) { 441 virq = desc->irq; 442 if (desc->nvec_used == 1) 443 dev_dbg(dev, "irq %d for MSI\n", virq); 444 else 445 dev_dbg(dev, "irq [%d-%d] for MSI\n", 446 virq, virq + desc->nvec_used - 1); 447 /* 448 * This flag is set by the PCI layer as we need to activate 449 * the MSI entries before the PCI layer enables MSI in the 450 * card. Otherwise the card latches a random msi message. 451 */ 452 if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) 453 continue; 454 455 irq_data = irq_domain_get_irq_data(domain, desc->irq); 456 if (!can_reserve) { 457 irqd_clr_can_reserve(irq_data); 458 if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) 459 irqd_set_msi_nomask_quirk(irq_data); 460 } 461 ret = irq_domain_activate_irq(irq_data, can_reserve); 462 if (ret) 463 goto cleanup; 464 } 465 466 /* 467 * If these interrupts use reservation mode, clear the activated bit 468 * so request_irq() will assign the final vector. 469 */ 470 if (can_reserve) { 471 for_each_msi_entry(desc, dev) { 472 irq_data = irq_domain_get_irq_data(domain, desc->irq); 473 irqd_clr_activated(irq_data); 474 } 475 } 476 return 0; 477 478 cleanup: 479 for_each_msi_entry(desc, dev) { 480 struct irq_data *irqd; 481 482 if (desc->irq == virq) 483 break; 484 485 irqd = irq_domain_get_irq_data(domain, desc->irq); 486 if (irqd_is_activated(irqd)) 487 irq_domain_deactivate_irq(irqd); 488 } 489 msi_domain_free_irqs(domain, dev); 490 return ret; 491 } 492 493 /** 494 * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev 495 * @domain: The domain to managing the interrupts 496 * @dev: Pointer to device struct of the device for which the interrupts 497 * are free 498 */ 499 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) 500 { 501 struct msi_desc *desc; 502 503 for_each_msi_entry(desc, dev) { 504 /* 505 * We might have failed to allocate an MSI early 506 * enough that there is no IRQ associated to this 507 * entry. If that's the case, don't do anything. 508 */ 509 if (desc->irq) { 510 irq_domain_free_irqs(desc->irq, desc->nvec_used); 511 desc->irq = 0; 512 } 513 } 514 } 515 516 /** 517 * msi_get_domain_info - Get the MSI interrupt domain info for @domain 518 * @domain: The interrupt domain to retrieve data from 519 * 520 * Returns the pointer to the msi_domain_info stored in 521 * @domain->host_data. 522 */ 523 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) 524 { 525 return (struct msi_domain_info *)domain->host_data; 526 } 527 528 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ 529