1 /* 2 * linux/kernel/irq/msi.c 3 * 4 * Copyright (C) 2014 Intel Corp. 5 * Author: Jiang Liu <jiang.liu@linux.intel.com> 6 * 7 * This file is licensed under GPLv2. 8 * 9 * This file contains common code to support Message Signalled Interrupt for 10 * PCI compatible and non PCI compatible devices. 11 */ 12 #include <linux/types.h> 13 #include <linux/device.h> 14 #include <linux/irq.h> 15 #include <linux/irqdomain.h> 16 #include <linux/msi.h> 17 18 /* Temparory solution for building, will be removed later */ 19 #include <linux/pci.h> 20 21 /** 22 * alloc_msi_entry - Allocate an initialize msi_entry 23 * @dev: Pointer to the device for which this is allocated 24 * @nvec: The number of vectors used in this entry 25 * @affinity: Optional pointer to an affinity mask array size of @nvec 26 * 27 * If @affinity is not NULL then a an affinity array[@nvec] is allocated 28 * and the affinity masks from @affinity are copied. 29 */ 30 struct msi_desc * 31 alloc_msi_entry(struct device *dev, int nvec, const struct cpumask *affinity) 32 { 33 struct msi_desc *desc; 34 35 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 36 if (!desc) 37 return NULL; 38 39 INIT_LIST_HEAD(&desc->list); 40 desc->dev = dev; 41 desc->nvec_used = nvec; 42 if (affinity) { 43 desc->affinity = kmemdup(affinity, 44 nvec * sizeof(*desc->affinity), GFP_KERNEL); 45 if (!desc->affinity) { 46 kfree(desc); 47 return NULL; 48 } 49 } 50 51 return desc; 52 } 53 54 void free_msi_entry(struct msi_desc *entry) 55 { 56 kfree(entry->affinity); 57 kfree(entry); 58 } 59 60 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 61 { 62 *msg = entry->msg; 63 } 64 65 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) 66 { 67 struct msi_desc *entry = irq_get_msi_desc(irq); 68 69 __get_cached_msi_msg(entry, msg); 70 } 71 EXPORT_SYMBOL_GPL(get_cached_msi_msg); 72 73 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 74 static inline void irq_chip_write_msi_msg(struct irq_data *data, 75 struct msi_msg *msg) 76 { 77 data->chip->irq_write_msi_msg(data, msg); 78 } 79 80 /** 81 * msi_domain_set_affinity - Generic affinity setter function for MSI domains 82 * @irq_data: The irq data associated to the interrupt 83 * @mask: The affinity mask to set 84 * @force: Flag to enforce setting (disable online checks) 85 * 86 * Intended to be used by MSI interrupt controllers which are 87 * implemented with hierarchical domains. 88 */ 89 int msi_domain_set_affinity(struct irq_data *irq_data, 90 const struct cpumask *mask, bool force) 91 { 92 struct irq_data *parent = irq_data->parent_data; 93 struct msi_msg msg; 94 int ret; 95 96 ret = parent->chip->irq_set_affinity(parent, mask, force); 97 if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) { 98 BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); 99 irq_chip_write_msi_msg(irq_data, &msg); 100 } 101 102 return ret; 103 } 104 105 static void msi_domain_activate(struct irq_domain *domain, 106 struct irq_data *irq_data) 107 { 108 struct msi_msg msg; 109 110 BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); 111 irq_chip_write_msi_msg(irq_data, &msg); 112 } 113 114 static void msi_domain_deactivate(struct irq_domain *domain, 115 struct irq_data *irq_data) 116 { 117 struct msi_msg msg; 118 119 memset(&msg, 0, sizeof(msg)); 120 irq_chip_write_msi_msg(irq_data, &msg); 121 } 122 123 static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, 124 unsigned int nr_irqs, void *arg) 125 { 126 struct msi_domain_info *info = domain->host_data; 127 struct msi_domain_ops *ops = info->ops; 128 irq_hw_number_t hwirq = ops->get_hwirq(info, arg); 129 int i, ret; 130 131 if (irq_find_mapping(domain, hwirq) > 0) 132 return -EEXIST; 133 134 if (domain->parent) { 135 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 136 if (ret < 0) 137 return ret; 138 } 139 140 for (i = 0; i < nr_irqs; i++) { 141 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); 142 if (ret < 0) { 143 if (ops->msi_free) { 144 for (i--; i > 0; i--) 145 ops->msi_free(domain, info, virq + i); 146 } 147 irq_domain_free_irqs_top(domain, virq, nr_irqs); 148 return ret; 149 } 150 } 151 152 return 0; 153 } 154 155 static void msi_domain_free(struct irq_domain *domain, unsigned int virq, 156 unsigned int nr_irqs) 157 { 158 struct msi_domain_info *info = domain->host_data; 159 int i; 160 161 if (info->ops->msi_free) { 162 for (i = 0; i < nr_irqs; i++) 163 info->ops->msi_free(domain, info, virq + i); 164 } 165 irq_domain_free_irqs_top(domain, virq, nr_irqs); 166 } 167 168 static const struct irq_domain_ops msi_domain_ops = { 169 .alloc = msi_domain_alloc, 170 .free = msi_domain_free, 171 .activate = msi_domain_activate, 172 .deactivate = msi_domain_deactivate, 173 }; 174 175 #ifdef GENERIC_MSI_DOMAIN_OPS 176 static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, 177 msi_alloc_info_t *arg) 178 { 179 return arg->hwirq; 180 } 181 182 static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, 183 int nvec, msi_alloc_info_t *arg) 184 { 185 memset(arg, 0, sizeof(*arg)); 186 return 0; 187 } 188 189 static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, 190 struct msi_desc *desc) 191 { 192 arg->desc = desc; 193 } 194 #else 195 #define msi_domain_ops_get_hwirq NULL 196 #define msi_domain_ops_prepare NULL 197 #define msi_domain_ops_set_desc NULL 198 #endif /* !GENERIC_MSI_DOMAIN_OPS */ 199 200 static int msi_domain_ops_init(struct irq_domain *domain, 201 struct msi_domain_info *info, 202 unsigned int virq, irq_hw_number_t hwirq, 203 msi_alloc_info_t *arg) 204 { 205 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, 206 info->chip_data); 207 if (info->handler && info->handler_name) { 208 __irq_set_handler(virq, info->handler, 0, info->handler_name); 209 if (info->handler_data) 210 irq_set_handler_data(virq, info->handler_data); 211 } 212 return 0; 213 } 214 215 static int msi_domain_ops_check(struct irq_domain *domain, 216 struct msi_domain_info *info, 217 struct device *dev) 218 { 219 return 0; 220 } 221 222 static struct msi_domain_ops msi_domain_ops_default = { 223 .get_hwirq = msi_domain_ops_get_hwirq, 224 .msi_init = msi_domain_ops_init, 225 .msi_check = msi_domain_ops_check, 226 .msi_prepare = msi_domain_ops_prepare, 227 .set_desc = msi_domain_ops_set_desc, 228 }; 229 230 static void msi_domain_update_dom_ops(struct msi_domain_info *info) 231 { 232 struct msi_domain_ops *ops = info->ops; 233 234 if (ops == NULL) { 235 info->ops = &msi_domain_ops_default; 236 return; 237 } 238 239 if (ops->get_hwirq == NULL) 240 ops->get_hwirq = msi_domain_ops_default.get_hwirq; 241 if (ops->msi_init == NULL) 242 ops->msi_init = msi_domain_ops_default.msi_init; 243 if (ops->msi_check == NULL) 244 ops->msi_check = msi_domain_ops_default.msi_check; 245 if (ops->msi_prepare == NULL) 246 ops->msi_prepare = msi_domain_ops_default.msi_prepare; 247 if (ops->set_desc == NULL) 248 ops->set_desc = msi_domain_ops_default.set_desc; 249 } 250 251 static void msi_domain_update_chip_ops(struct msi_domain_info *info) 252 { 253 struct irq_chip *chip = info->chip; 254 255 BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); 256 if (!chip->irq_set_affinity) 257 chip->irq_set_affinity = msi_domain_set_affinity; 258 } 259 260 /** 261 * msi_create_irq_domain - Create a MSI interrupt domain 262 * @fwnode: Optional fwnode of the interrupt controller 263 * @info: MSI domain info 264 * @parent: Parent irq domain 265 */ 266 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, 267 struct msi_domain_info *info, 268 struct irq_domain *parent) 269 { 270 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) 271 msi_domain_update_dom_ops(info); 272 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 273 msi_domain_update_chip_ops(info); 274 275 return irq_domain_create_hierarchy(parent, 0, 0, fwnode, 276 &msi_domain_ops, info); 277 } 278 279 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, 280 int nvec, msi_alloc_info_t *arg) 281 { 282 struct msi_domain_info *info = domain->host_data; 283 struct msi_domain_ops *ops = info->ops; 284 int ret; 285 286 ret = ops->msi_check(domain, info, dev); 287 if (ret == 0) 288 ret = ops->msi_prepare(domain, dev, nvec, arg); 289 290 return ret; 291 } 292 293 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, 294 int virq, int nvec, msi_alloc_info_t *arg) 295 { 296 struct msi_domain_info *info = domain->host_data; 297 struct msi_domain_ops *ops = info->ops; 298 struct msi_desc *desc; 299 int ret = 0; 300 301 for_each_msi_entry(desc, dev) { 302 /* Don't even try the multi-MSI brain damage. */ 303 if (WARN_ON(!desc->irq || desc->nvec_used != 1)) { 304 ret = -EINVAL; 305 break; 306 } 307 308 if (!(desc->irq >= virq && desc->irq < (virq + nvec))) 309 continue; 310 311 ops->set_desc(arg, desc); 312 /* Assumes the domain mutex is held! */ 313 ret = irq_domain_alloc_irqs_recursive(domain, virq, 1, arg); 314 if (ret) 315 break; 316 317 irq_set_msi_desc_off(virq, 0, desc); 318 } 319 320 if (ret) { 321 /* Mop up the damage */ 322 for_each_msi_entry(desc, dev) { 323 if (!(desc->irq >= virq && desc->irq < (virq + nvec))) 324 continue; 325 326 irq_domain_free_irqs_common(domain, desc->irq, 1); 327 } 328 } 329 330 return ret; 331 } 332 333 /** 334 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain 335 * @domain: The domain to allocate from 336 * @dev: Pointer to device struct of the device for which the interrupts 337 * are allocated 338 * @nvec: The number of interrupts to allocate 339 * 340 * Returns 0 on success or an error code. 341 */ 342 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, 343 int nvec) 344 { 345 struct msi_domain_info *info = domain->host_data; 346 struct msi_domain_ops *ops = info->ops; 347 msi_alloc_info_t arg; 348 struct msi_desc *desc; 349 int i, ret, virq; 350 351 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); 352 if (ret) 353 return ret; 354 355 for_each_msi_entry(desc, dev) { 356 ops->set_desc(&arg, desc); 357 358 virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used, 359 dev_to_node(dev), &arg, false, 360 desc->affinity); 361 if (virq < 0) { 362 ret = -ENOSPC; 363 if (ops->handle_error) 364 ret = ops->handle_error(domain, desc, ret); 365 if (ops->msi_finish) 366 ops->msi_finish(&arg, ret); 367 return ret; 368 } 369 370 for (i = 0; i < desc->nvec_used; i++) 371 irq_set_msi_desc_off(virq, i, desc); 372 } 373 374 if (ops->msi_finish) 375 ops->msi_finish(&arg, 0); 376 377 for_each_msi_entry(desc, dev) { 378 virq = desc->irq; 379 if (desc->nvec_used == 1) 380 dev_dbg(dev, "irq %d for MSI\n", virq); 381 else 382 dev_dbg(dev, "irq [%d-%d] for MSI\n", 383 virq, virq + desc->nvec_used - 1); 384 /* 385 * This flag is set by the PCI layer as we need to activate 386 * the MSI entries before the PCI layer enables MSI in the 387 * card. Otherwise the card latches a random msi message. 388 */ 389 if (info->flags & MSI_FLAG_ACTIVATE_EARLY) { 390 struct irq_data *irq_data; 391 392 irq_data = irq_domain_get_irq_data(domain, desc->irq); 393 irq_domain_activate_irq(irq_data); 394 } 395 } 396 397 return 0; 398 } 399 400 /** 401 * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev 402 * @domain: The domain to managing the interrupts 403 * @dev: Pointer to device struct of the device for which the interrupts 404 * are free 405 */ 406 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) 407 { 408 struct msi_desc *desc; 409 410 for_each_msi_entry(desc, dev) { 411 /* 412 * We might have failed to allocate an MSI early 413 * enough that there is no IRQ associated to this 414 * entry. If that's the case, don't do anything. 415 */ 416 if (desc->irq) { 417 irq_domain_free_irqs(desc->irq, desc->nvec_used); 418 desc->irq = 0; 419 } 420 } 421 } 422 423 /** 424 * msi_get_domain_info - Get the MSI interrupt domain info for @domain 425 * @domain: The interrupt domain to retrieve data from 426 * 427 * Returns the pointer to the msi_domain_info stored in 428 * @domain->host_data. 429 */ 430 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) 431 { 432 return (struct msi_domain_info *)domain->host_data; 433 } 434 435 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ 436