152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0 2f3cf8bb0SJiang Liu /* 3f3cf8bb0SJiang Liu * Copyright (C) 2014 Intel Corp. 4f3cf8bb0SJiang Liu * Author: Jiang Liu <jiang.liu@linux.intel.com> 5f3cf8bb0SJiang Liu * 6f3cf8bb0SJiang Liu * This file is licensed under GPLv2. 7f3cf8bb0SJiang Liu * 8a359f757SIngo Molnar * This file contains common code to support Message Signaled Interrupts for 9f3cf8bb0SJiang Liu * PCI compatible and non PCI compatible devices. 10f3cf8bb0SJiang Liu */ 11aeeb5965SJiang Liu #include <linux/types.h> 12aeeb5965SJiang Liu #include <linux/device.h> 13f3cf8bb0SJiang Liu #include <linux/irq.h> 14f3cf8bb0SJiang Liu #include <linux/irqdomain.h> 15f3cf8bb0SJiang Liu #include <linux/msi.h> 164e201566SMarc Zyngier #include <linux/slab.h> 173ba1f050SThomas Gleixner #include <linux/sysfs.h> 182f170814SBarry Song #include <linux/pci.h> 19d9109698SJiang Liu 2007557ccbSThomas Gleixner #include "internals.h" 2107557ccbSThomas Gleixner 2228f4b041SThomas Gleixner /** 233b35e7e6SRandy Dunlap * alloc_msi_entry - Allocate an initialized msi_desc 2428f4b041SThomas Gleixner * @dev: Pointer to the device for which this is allocated 2528f4b041SThomas Gleixner * @nvec: The number of vectors used in this entry 2628f4b041SThomas Gleixner * @affinity: Optional pointer to an affinity mask array size of @nvec 2728f4b041SThomas Gleixner * 283b35e7e6SRandy Dunlap * If @affinity is not %NULL then an affinity array[@nvec] is allocated 29bec04037SDou Liyang * and the affinity masks and flags from @affinity are copied. 303b35e7e6SRandy Dunlap * 313b35e7e6SRandy Dunlap * Return: pointer to allocated &msi_desc on success or %NULL on failure 3228f4b041SThomas Gleixner */ 33bec04037SDou Liyang struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, 34bec04037SDou Liyang const struct irq_affinity_desc *affinity) 35aa48b6f7SJiang Liu { 3628f4b041SThomas Gleixner struct msi_desc *desc; 3728f4b041SThomas Gleixner 3828f4b041SThomas Gleixner desc = kzalloc(sizeof(*desc), GFP_KERNEL); 39aa48b6f7SJiang Liu if (!desc) 40aa48b6f7SJiang Liu return NULL; 41aa48b6f7SJiang Liu 42aa48b6f7SJiang Liu INIT_LIST_HEAD(&desc->list); 43aa48b6f7SJiang Liu desc->dev = dev; 4428f4b041SThomas Gleixner desc->nvec_used = nvec; 4528f4b041SThomas Gleixner if (affinity) { 4628f4b041SThomas Gleixner desc->affinity = kmemdup(affinity, 4728f4b041SThomas Gleixner nvec * sizeof(*desc->affinity), GFP_KERNEL); 4828f4b041SThomas Gleixner if (!desc->affinity) { 4928f4b041SThomas Gleixner kfree(desc); 5028f4b041SThomas Gleixner return NULL; 5128f4b041SThomas Gleixner } 5228f4b041SThomas Gleixner } 53aa48b6f7SJiang Liu 54aa48b6f7SJiang Liu return desc; 55aa48b6f7SJiang Liu } 56aa48b6f7SJiang Liu 57aa48b6f7SJiang Liu void free_msi_entry(struct msi_desc *entry) 58aa48b6f7SJiang Liu { 5928f4b041SThomas Gleixner kfree(entry->affinity); 60aa48b6f7SJiang Liu kfree(entry); 61aa48b6f7SJiang Liu } 62aa48b6f7SJiang Liu 6360290525SThomas Gleixner /** 6460290525SThomas Gleixner * msi_add_msi_desc - Allocate and initialize a MSI descriptor 6560290525SThomas Gleixner * @dev: Pointer to the device for which the descriptor is allocated 6660290525SThomas Gleixner * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor 6760290525SThomas Gleixner * 6860290525SThomas Gleixner * Return: 0 on success or an appropriate failure code. 6960290525SThomas Gleixner */ 7060290525SThomas Gleixner int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc) 7160290525SThomas Gleixner { 7260290525SThomas Gleixner struct msi_desc *desc; 7360290525SThomas Gleixner 7460290525SThomas Gleixner lockdep_assert_held(&dev->msi.data->mutex); 7560290525SThomas Gleixner 7660290525SThomas Gleixner desc = alloc_msi_entry(dev, init_desc->nvec_used, init_desc->affinity); 7760290525SThomas Gleixner if (!desc) 7860290525SThomas Gleixner return -ENOMEM; 7960290525SThomas Gleixner 8060290525SThomas Gleixner /* Copy the MSI index and type specific data to the new descriptor. */ 8160290525SThomas Gleixner desc->msi_index = init_desc->msi_index; 8260290525SThomas Gleixner desc->pci = init_desc->pci; 8360290525SThomas Gleixner 8460290525SThomas Gleixner list_add_tail(&desc->list, &dev->msi.data->list); 8560290525SThomas Gleixner return 0; 8660290525SThomas Gleixner } 8760290525SThomas Gleixner 8860290525SThomas Gleixner /** 8960290525SThomas Gleixner * msi_add_simple_msi_descs - Allocate and initialize MSI descriptors 9060290525SThomas Gleixner * @dev: Pointer to the device for which the descriptors are allocated 9160290525SThomas Gleixner * @index: Index for the first MSI descriptor 9260290525SThomas Gleixner * @ndesc: Number of descriptors to allocate 9360290525SThomas Gleixner * 9460290525SThomas Gleixner * Return: 0 on success or an appropriate failure code. 9560290525SThomas Gleixner */ 9660290525SThomas Gleixner static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc) 9760290525SThomas Gleixner { 9860290525SThomas Gleixner struct msi_desc *desc, *tmp; 9960290525SThomas Gleixner LIST_HEAD(list); 10060290525SThomas Gleixner unsigned int i; 10160290525SThomas Gleixner 10260290525SThomas Gleixner lockdep_assert_held(&dev->msi.data->mutex); 10360290525SThomas Gleixner 10460290525SThomas Gleixner for (i = 0; i < ndesc; i++) { 10560290525SThomas Gleixner desc = alloc_msi_entry(dev, 1, NULL); 10660290525SThomas Gleixner if (!desc) 10760290525SThomas Gleixner goto fail; 10860290525SThomas Gleixner desc->msi_index = index + i; 10960290525SThomas Gleixner list_add_tail(&desc->list, &list); 11060290525SThomas Gleixner } 11160290525SThomas Gleixner list_splice_tail(&list, &dev->msi.data->list); 11260290525SThomas Gleixner return 0; 11360290525SThomas Gleixner 11460290525SThomas Gleixner fail: 11560290525SThomas Gleixner list_for_each_entry_safe(desc, tmp, &list, list) { 11660290525SThomas Gleixner list_del(&desc->list); 11760290525SThomas Gleixner free_msi_entry(desc); 11860290525SThomas Gleixner } 11960290525SThomas Gleixner return -ENOMEM; 12060290525SThomas Gleixner } 12160290525SThomas Gleixner 122645474e2SThomas Gleixner /** 123645474e2SThomas Gleixner * msi_free_msi_descs_range - Free MSI descriptors of a device 124645474e2SThomas Gleixner * @dev: Device to free the descriptors 125645474e2SThomas Gleixner * @filter: Descriptor state filter 126645474e2SThomas Gleixner * @first_index: Index to start freeing from 127645474e2SThomas Gleixner * @last_index: Last index to be freed 128645474e2SThomas Gleixner */ 129645474e2SThomas Gleixner void msi_free_msi_descs_range(struct device *dev, enum msi_desc_filter filter, 130645474e2SThomas Gleixner unsigned int first_index, unsigned int last_index) 131645474e2SThomas Gleixner { 132645474e2SThomas Gleixner struct msi_desc *desc; 133645474e2SThomas Gleixner 134645474e2SThomas Gleixner lockdep_assert_held(&dev->msi.data->mutex); 135645474e2SThomas Gleixner 136645474e2SThomas Gleixner msi_for_each_desc(desc, dev, filter) { 137645474e2SThomas Gleixner /* 138645474e2SThomas Gleixner * Stupid for now to handle MSI device domain until the 139645474e2SThomas Gleixner * storage is switched over to an xarray. 140645474e2SThomas Gleixner */ 141645474e2SThomas Gleixner if (desc->msi_index < first_index || desc->msi_index > last_index) 142645474e2SThomas Gleixner continue; 143645474e2SThomas Gleixner list_del(&desc->list); 144645474e2SThomas Gleixner free_msi_entry(desc); 145645474e2SThomas Gleixner } 146645474e2SThomas Gleixner } 147645474e2SThomas Gleixner 14838b6a1cfSJiang Liu void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 14938b6a1cfSJiang Liu { 15038b6a1cfSJiang Liu *msg = entry->msg; 15138b6a1cfSJiang Liu } 15238b6a1cfSJiang Liu 15338b6a1cfSJiang Liu void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) 15438b6a1cfSJiang Liu { 15538b6a1cfSJiang Liu struct msi_desc *entry = irq_get_msi_desc(irq); 15638b6a1cfSJiang Liu 15738b6a1cfSJiang Liu __get_cached_msi_msg(entry, msg); 15838b6a1cfSJiang Liu } 15938b6a1cfSJiang Liu EXPORT_SYMBOL_GPL(get_cached_msi_msg); 16038b6a1cfSJiang Liu 161013bd8e5SThomas Gleixner static void msi_device_data_release(struct device *dev, void *res) 162013bd8e5SThomas Gleixner { 163125282cdSThomas Gleixner struct msi_device_data *md = res; 164125282cdSThomas Gleixner 165125282cdSThomas Gleixner WARN_ON_ONCE(!list_empty(&md->list)); 166013bd8e5SThomas Gleixner dev->msi.data = NULL; 167013bd8e5SThomas Gleixner } 168013bd8e5SThomas Gleixner 169013bd8e5SThomas Gleixner /** 170013bd8e5SThomas Gleixner * msi_setup_device_data - Setup MSI device data 171013bd8e5SThomas Gleixner * @dev: Device for which MSI device data should be set up 172013bd8e5SThomas Gleixner * 173013bd8e5SThomas Gleixner * Return: 0 on success, appropriate error code otherwise 174013bd8e5SThomas Gleixner * 175013bd8e5SThomas Gleixner * This can be called more than once for @dev. If the MSI device data is 176013bd8e5SThomas Gleixner * already allocated the call succeeds. The allocated memory is 177013bd8e5SThomas Gleixner * automatically released when the device is destroyed. 178013bd8e5SThomas Gleixner */ 179013bd8e5SThomas Gleixner int msi_setup_device_data(struct device *dev) 180013bd8e5SThomas Gleixner { 181013bd8e5SThomas Gleixner struct msi_device_data *md; 182013bd8e5SThomas Gleixner 183013bd8e5SThomas Gleixner if (dev->msi.data) 184013bd8e5SThomas Gleixner return 0; 185013bd8e5SThomas Gleixner 186013bd8e5SThomas Gleixner md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL); 187013bd8e5SThomas Gleixner if (!md) 188013bd8e5SThomas Gleixner return -ENOMEM; 189013bd8e5SThomas Gleixner 190125282cdSThomas Gleixner INIT_LIST_HEAD(&md->list); 191b5f687f9SThomas Gleixner mutex_init(&md->mutex); 192013bd8e5SThomas Gleixner dev->msi.data = md; 193013bd8e5SThomas Gleixner devres_add(dev, md); 194013bd8e5SThomas Gleixner return 0; 195013bd8e5SThomas Gleixner } 196013bd8e5SThomas Gleixner 197cf15f43aSThomas Gleixner /** 198b5f687f9SThomas Gleixner * msi_lock_descs - Lock the MSI descriptor storage of a device 199b5f687f9SThomas Gleixner * @dev: Device to operate on 200b5f687f9SThomas Gleixner */ 201b5f687f9SThomas Gleixner void msi_lock_descs(struct device *dev) 202b5f687f9SThomas Gleixner { 203b5f687f9SThomas Gleixner mutex_lock(&dev->msi.data->mutex); 204b5f687f9SThomas Gleixner } 205b5f687f9SThomas Gleixner EXPORT_SYMBOL_GPL(msi_lock_descs); 206b5f687f9SThomas Gleixner 207b5f687f9SThomas Gleixner /** 208b5f687f9SThomas Gleixner * msi_unlock_descs - Unlock the MSI descriptor storage of a device 209b5f687f9SThomas Gleixner * @dev: Device to operate on 210b5f687f9SThomas Gleixner */ 211b5f687f9SThomas Gleixner void msi_unlock_descs(struct device *dev) 212b5f687f9SThomas Gleixner { 2131046f71dSThomas Gleixner /* Clear the next pointer which was cached by the iterator */ 2141046f71dSThomas Gleixner dev->msi.data->__next = NULL; 215b5f687f9SThomas Gleixner mutex_unlock(&dev->msi.data->mutex); 216b5f687f9SThomas Gleixner } 217b5f687f9SThomas Gleixner EXPORT_SYMBOL_GPL(msi_unlock_descs); 218b5f687f9SThomas Gleixner 2191046f71dSThomas Gleixner static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter) 2201046f71dSThomas Gleixner { 2211046f71dSThomas Gleixner switch (filter) { 2221046f71dSThomas Gleixner case MSI_DESC_ALL: 2231046f71dSThomas Gleixner return true; 2241046f71dSThomas Gleixner case MSI_DESC_NOTASSOCIATED: 2251046f71dSThomas Gleixner return !desc->irq; 2261046f71dSThomas Gleixner case MSI_DESC_ASSOCIATED: 2271046f71dSThomas Gleixner return !!desc->irq; 2281046f71dSThomas Gleixner } 2291046f71dSThomas Gleixner WARN_ON_ONCE(1); 2301046f71dSThomas Gleixner return false; 2311046f71dSThomas Gleixner } 2321046f71dSThomas Gleixner 2331046f71dSThomas Gleixner static struct msi_desc *msi_find_first_desc(struct device *dev, enum msi_desc_filter filter) 2341046f71dSThomas Gleixner { 2351046f71dSThomas Gleixner struct msi_desc *desc; 2361046f71dSThomas Gleixner 2371046f71dSThomas Gleixner list_for_each_entry(desc, dev_to_msi_list(dev), list) { 2381046f71dSThomas Gleixner if (msi_desc_match(desc, filter)) 2391046f71dSThomas Gleixner return desc; 2401046f71dSThomas Gleixner } 2411046f71dSThomas Gleixner return NULL; 2421046f71dSThomas Gleixner } 2431046f71dSThomas Gleixner 2441046f71dSThomas Gleixner /** 2451046f71dSThomas Gleixner * msi_first_desc - Get the first MSI descriptor of a device 2461046f71dSThomas Gleixner * @dev: Device to operate on 2471046f71dSThomas Gleixner * @filter: Descriptor state filter 2481046f71dSThomas Gleixner * 2491046f71dSThomas Gleixner * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs() 2501046f71dSThomas Gleixner * must be invoked before the call. 2511046f71dSThomas Gleixner * 2521046f71dSThomas Gleixner * Return: Pointer to the first MSI descriptor matching the search 2531046f71dSThomas Gleixner * criteria, NULL if none found. 2541046f71dSThomas Gleixner */ 2551046f71dSThomas Gleixner struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter) 2561046f71dSThomas Gleixner { 2571046f71dSThomas Gleixner struct msi_desc *desc; 2581046f71dSThomas Gleixner 2591046f71dSThomas Gleixner if (WARN_ON_ONCE(!dev->msi.data)) 2601046f71dSThomas Gleixner return NULL; 2611046f71dSThomas Gleixner 2621046f71dSThomas Gleixner lockdep_assert_held(&dev->msi.data->mutex); 2631046f71dSThomas Gleixner 2641046f71dSThomas Gleixner desc = msi_find_first_desc(dev, filter); 2651046f71dSThomas Gleixner dev->msi.data->__next = desc ? list_next_entry(desc, list) : NULL; 2661046f71dSThomas Gleixner return desc; 2671046f71dSThomas Gleixner } 2681046f71dSThomas Gleixner EXPORT_SYMBOL_GPL(msi_first_desc); 2691046f71dSThomas Gleixner 2701046f71dSThomas Gleixner static struct msi_desc *__msi_next_desc(struct device *dev, enum msi_desc_filter filter, 2711046f71dSThomas Gleixner struct msi_desc *from) 2721046f71dSThomas Gleixner { 2731046f71dSThomas Gleixner struct msi_desc *desc = from; 2741046f71dSThomas Gleixner 2751046f71dSThomas Gleixner list_for_each_entry_from(desc, dev_to_msi_list(dev), list) { 2761046f71dSThomas Gleixner if (msi_desc_match(desc, filter)) 2771046f71dSThomas Gleixner return desc; 2781046f71dSThomas Gleixner } 2791046f71dSThomas Gleixner return NULL; 2801046f71dSThomas Gleixner } 2811046f71dSThomas Gleixner 2821046f71dSThomas Gleixner /** 2831046f71dSThomas Gleixner * msi_next_desc - Get the next MSI descriptor of a device 2841046f71dSThomas Gleixner * @dev: Device to operate on 2851046f71dSThomas Gleixner * 2861046f71dSThomas Gleixner * The first invocation of msi_next_desc() has to be preceeded by a 2871046f71dSThomas Gleixner * successful incovation of __msi_first_desc(). Consecutive invocations are 2881046f71dSThomas Gleixner * only valid if the previous one was successful. All these operations have 2891046f71dSThomas Gleixner * to be done within the same MSI mutex held region. 2901046f71dSThomas Gleixner * 2911046f71dSThomas Gleixner * Return: Pointer to the next MSI descriptor matching the search 2921046f71dSThomas Gleixner * criteria, NULL if none found. 2931046f71dSThomas Gleixner */ 2941046f71dSThomas Gleixner struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter) 2951046f71dSThomas Gleixner { 2961046f71dSThomas Gleixner struct msi_device_data *data = dev->msi.data; 2971046f71dSThomas Gleixner struct msi_desc *desc; 2981046f71dSThomas Gleixner 2991046f71dSThomas Gleixner if (WARN_ON_ONCE(!data)) 3001046f71dSThomas Gleixner return NULL; 3011046f71dSThomas Gleixner 3021046f71dSThomas Gleixner lockdep_assert_held(&data->mutex); 3031046f71dSThomas Gleixner 3041046f71dSThomas Gleixner if (!data->__next) 3051046f71dSThomas Gleixner return NULL; 3061046f71dSThomas Gleixner 3071046f71dSThomas Gleixner desc = __msi_next_desc(dev, filter, data->__next); 3081046f71dSThomas Gleixner dev->msi.data->__next = desc ? list_next_entry(desc, list) : NULL; 3091046f71dSThomas Gleixner return desc; 3101046f71dSThomas Gleixner } 3111046f71dSThomas Gleixner EXPORT_SYMBOL_GPL(msi_next_desc); 3121046f71dSThomas Gleixner 313b5f687f9SThomas Gleixner /** 314cf15f43aSThomas Gleixner * msi_get_virq - Return Linux interrupt number of a MSI interrupt 315cf15f43aSThomas Gleixner * @dev: Device to operate on 316cf15f43aSThomas Gleixner * @index: MSI interrupt index to look for (0-based) 317cf15f43aSThomas Gleixner * 318cf15f43aSThomas Gleixner * Return: The Linux interrupt number on success (> 0), 0 if not found 319cf15f43aSThomas Gleixner */ 320cf15f43aSThomas Gleixner unsigned int msi_get_virq(struct device *dev, unsigned int index) 321cf15f43aSThomas Gleixner { 322cf15f43aSThomas Gleixner struct msi_desc *desc; 323cf15f43aSThomas Gleixner bool pcimsi; 324cf15f43aSThomas Gleixner 325cf15f43aSThomas Gleixner if (!dev->msi.data) 326cf15f43aSThomas Gleixner return 0; 327cf15f43aSThomas Gleixner 328cf15f43aSThomas Gleixner pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false; 329cf15f43aSThomas Gleixner 330cf15f43aSThomas Gleixner for_each_msi_entry(desc, dev) { 331cf15f43aSThomas Gleixner /* PCI-MSI has only one descriptor for multiple interrupts. */ 332cf15f43aSThomas Gleixner if (pcimsi) { 333cf15f43aSThomas Gleixner if (desc->irq && index < desc->nvec_used) 334cf15f43aSThomas Gleixner return desc->irq + index; 335cf15f43aSThomas Gleixner break; 336cf15f43aSThomas Gleixner } 337cf15f43aSThomas Gleixner 338cf15f43aSThomas Gleixner /* 339cf15f43aSThomas Gleixner * PCI-MSIX and platform MSI use a descriptor per 340cf15f43aSThomas Gleixner * interrupt. 341cf15f43aSThomas Gleixner */ 342cf15f43aSThomas Gleixner if (desc->msi_index == index) 343cf15f43aSThomas Gleixner return desc->irq; 344cf15f43aSThomas Gleixner } 345cf15f43aSThomas Gleixner return 0; 346cf15f43aSThomas Gleixner } 347cf15f43aSThomas Gleixner EXPORT_SYMBOL_GPL(msi_get_virq); 348cf15f43aSThomas Gleixner 3491197528aSThomas Gleixner #ifdef CONFIG_SYSFS 3502f170814SBarry Song static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr, 3512f170814SBarry Song char *buf) 3522f170814SBarry Song { 3536ef7f771SThomas Gleixner /* MSI vs. MSIX is per device not per interrupt */ 3546ef7f771SThomas Gleixner bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false; 3552f170814SBarry Song 3562f170814SBarry Song return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi"); 3572f170814SBarry Song } 3582f170814SBarry Song 3592f170814SBarry Song /** 3602f170814SBarry Song * msi_populate_sysfs - Populate msi_irqs sysfs entries for devices 3612f170814SBarry Song * @dev: The device(PCI, platform etc) who will get sysfs entries 3622f170814SBarry Song */ 36324cff375SThomas Gleixner static const struct attribute_group **msi_populate_sysfs(struct device *dev) 3642f170814SBarry Song { 3652f170814SBarry Song const struct attribute_group **msi_irq_groups; 3662f170814SBarry Song struct attribute **msi_attrs, *msi_attr; 3672f170814SBarry Song struct device_attribute *msi_dev_attr; 3682f170814SBarry Song struct attribute_group *msi_irq_group; 3692f170814SBarry Song struct msi_desc *entry; 3702f170814SBarry Song int ret = -ENOMEM; 3712f170814SBarry Song int num_msi = 0; 3722f170814SBarry Song int count = 0; 3732f170814SBarry Song int i; 3742f170814SBarry Song 3752f170814SBarry Song /* Determine how many msi entries we have */ 3762f170814SBarry Song for_each_msi_entry(entry, dev) 3772f170814SBarry Song num_msi += entry->nvec_used; 3782f170814SBarry Song if (!num_msi) 3792f170814SBarry Song return NULL; 3802f170814SBarry Song 3812f170814SBarry Song /* Dynamically create the MSI attributes for the device */ 3822f170814SBarry Song msi_attrs = kcalloc(num_msi + 1, sizeof(void *), GFP_KERNEL); 3832f170814SBarry Song if (!msi_attrs) 3842f170814SBarry Song return ERR_PTR(-ENOMEM); 3852f170814SBarry Song 3862f170814SBarry Song for_each_msi_entry(entry, dev) { 3872f170814SBarry Song for (i = 0; i < entry->nvec_used; i++) { 3882f170814SBarry Song msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); 3892f170814SBarry Song if (!msi_dev_attr) 3902f170814SBarry Song goto error_attrs; 3912f170814SBarry Song msi_attrs[count] = &msi_dev_attr->attr; 3922f170814SBarry Song 3932f170814SBarry Song sysfs_attr_init(&msi_dev_attr->attr); 3942f170814SBarry Song msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d", 3952f170814SBarry Song entry->irq + i); 3962f170814SBarry Song if (!msi_dev_attr->attr.name) 3972f170814SBarry Song goto error_attrs; 3982f170814SBarry Song msi_dev_attr->attr.mode = 0444; 3992f170814SBarry Song msi_dev_attr->show = msi_mode_show; 4002f170814SBarry Song ++count; 4012f170814SBarry Song } 4022f170814SBarry Song } 4032f170814SBarry Song 4042f170814SBarry Song msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL); 4052f170814SBarry Song if (!msi_irq_group) 4062f170814SBarry Song goto error_attrs; 4072f170814SBarry Song msi_irq_group->name = "msi_irqs"; 4082f170814SBarry Song msi_irq_group->attrs = msi_attrs; 4092f170814SBarry Song 4102f170814SBarry Song msi_irq_groups = kcalloc(2, sizeof(void *), GFP_KERNEL); 4112f170814SBarry Song if (!msi_irq_groups) 4122f170814SBarry Song goto error_irq_group; 4132f170814SBarry Song msi_irq_groups[0] = msi_irq_group; 4142f170814SBarry Song 4152f170814SBarry Song ret = sysfs_create_groups(&dev->kobj, msi_irq_groups); 4162f170814SBarry Song if (ret) 4172f170814SBarry Song goto error_irq_groups; 4182f170814SBarry Song 4192f170814SBarry Song return msi_irq_groups; 4202f170814SBarry Song 4212f170814SBarry Song error_irq_groups: 4222f170814SBarry Song kfree(msi_irq_groups); 4232f170814SBarry Song error_irq_group: 4242f170814SBarry Song kfree(msi_irq_group); 4252f170814SBarry Song error_attrs: 4262f170814SBarry Song count = 0; 4272f170814SBarry Song msi_attr = msi_attrs[count]; 4282f170814SBarry Song while (msi_attr) { 4292f170814SBarry Song msi_dev_attr = container_of(msi_attr, struct device_attribute, attr); 4302f170814SBarry Song kfree(msi_attr->name); 4312f170814SBarry Song kfree(msi_dev_attr); 4322f170814SBarry Song ++count; 4332f170814SBarry Song msi_attr = msi_attrs[count]; 4342f170814SBarry Song } 4352f170814SBarry Song kfree(msi_attrs); 4362f170814SBarry Song return ERR_PTR(ret); 4372f170814SBarry Song } 4382f170814SBarry Song 4392f170814SBarry Song /** 440bf6e054eSThomas Gleixner * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device 441bf6e054eSThomas Gleixner * @dev: The device (PCI, platform etc) which will get sysfs entries 442bf6e054eSThomas Gleixner */ 443bf6e054eSThomas Gleixner int msi_device_populate_sysfs(struct device *dev) 444bf6e054eSThomas Gleixner { 445bf6e054eSThomas Gleixner const struct attribute_group **group = msi_populate_sysfs(dev); 446bf6e054eSThomas Gleixner 447bf6e054eSThomas Gleixner if (IS_ERR(group)) 448bf6e054eSThomas Gleixner return PTR_ERR(group); 449bf6e054eSThomas Gleixner dev->msi.data->attrs = group; 450bf6e054eSThomas Gleixner return 0; 451bf6e054eSThomas Gleixner } 452bf6e054eSThomas Gleixner 453bf6e054eSThomas Gleixner /** 45424cff375SThomas Gleixner * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device 45524cff375SThomas Gleixner * @dev: The device (PCI, platform etc) for which to remove 45624cff375SThomas Gleixner * sysfs entries 4572f170814SBarry Song */ 45824cff375SThomas Gleixner void msi_device_destroy_sysfs(struct device *dev) 4592f170814SBarry Song { 46024cff375SThomas Gleixner const struct attribute_group **msi_irq_groups = dev->msi.data->attrs; 4612f170814SBarry Song struct device_attribute *dev_attr; 4622f170814SBarry Song struct attribute **msi_attrs; 4632f170814SBarry Song int count = 0; 4642f170814SBarry Song 46524cff375SThomas Gleixner dev->msi.data->attrs = NULL; 46624cff375SThomas Gleixner if (!msi_irq_groups) 46724cff375SThomas Gleixner return; 46824cff375SThomas Gleixner 4692f170814SBarry Song sysfs_remove_groups(&dev->kobj, msi_irq_groups); 4702f170814SBarry Song msi_attrs = msi_irq_groups[0]->attrs; 4712f170814SBarry Song while (msi_attrs[count]) { 47224cff375SThomas Gleixner dev_attr = container_of(msi_attrs[count], struct device_attribute, attr); 4732f170814SBarry Song kfree(dev_attr->attr.name); 4742f170814SBarry Song kfree(dev_attr); 4752f170814SBarry Song ++count; 4762f170814SBarry Song } 4772f170814SBarry Song kfree(msi_attrs); 4782f170814SBarry Song kfree(msi_irq_groups[0]); 4792f170814SBarry Song kfree(msi_irq_groups); 4802f170814SBarry Song } 4811197528aSThomas Gleixner #endif 4822f170814SBarry Song 483f3cf8bb0SJiang Liu #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 48474faaf7aSThomas Gleixner static inline void irq_chip_write_msi_msg(struct irq_data *data, 48574faaf7aSThomas Gleixner struct msi_msg *msg) 48674faaf7aSThomas Gleixner { 48774faaf7aSThomas Gleixner data->chip->irq_write_msi_msg(data, msg); 48874faaf7aSThomas Gleixner } 48974faaf7aSThomas Gleixner 4900be8153cSMarc Zyngier static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg) 4910be8153cSMarc Zyngier { 4920be8153cSMarc Zyngier struct msi_domain_info *info = domain->host_data; 4930be8153cSMarc Zyngier 4940be8153cSMarc Zyngier /* 4950be8153cSMarc Zyngier * If the MSI provider has messed with the second message and 4960be8153cSMarc Zyngier * not advertized that it is level-capable, signal the breakage. 4970be8153cSMarc Zyngier */ 4980be8153cSMarc Zyngier WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) && 4990be8153cSMarc Zyngier (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) && 5000be8153cSMarc Zyngier (msg[1].address_lo || msg[1].address_hi || msg[1].data)); 5010be8153cSMarc Zyngier } 5020be8153cSMarc Zyngier 503f3cf8bb0SJiang Liu /** 504f3cf8bb0SJiang Liu * msi_domain_set_affinity - Generic affinity setter function for MSI domains 505f3cf8bb0SJiang Liu * @irq_data: The irq data associated to the interrupt 506f3cf8bb0SJiang Liu * @mask: The affinity mask to set 507f3cf8bb0SJiang Liu * @force: Flag to enforce setting (disable online checks) 508f3cf8bb0SJiang Liu * 509f3cf8bb0SJiang Liu * Intended to be used by MSI interrupt controllers which are 510f3cf8bb0SJiang Liu * implemented with hierarchical domains. 5113b35e7e6SRandy Dunlap * 5123b35e7e6SRandy Dunlap * Return: IRQ_SET_MASK_* result code 513f3cf8bb0SJiang Liu */ 514f3cf8bb0SJiang Liu int msi_domain_set_affinity(struct irq_data *irq_data, 515f3cf8bb0SJiang Liu const struct cpumask *mask, bool force) 516f3cf8bb0SJiang Liu { 517f3cf8bb0SJiang Liu struct irq_data *parent = irq_data->parent_data; 5180be8153cSMarc Zyngier struct msi_msg msg[2] = { [1] = { }, }; 519f3cf8bb0SJiang Liu int ret; 520f3cf8bb0SJiang Liu 521f3cf8bb0SJiang Liu ret = parent->chip->irq_set_affinity(parent, mask, force); 522f3cf8bb0SJiang Liu if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) { 5230be8153cSMarc Zyngier BUG_ON(irq_chip_compose_msi_msg(irq_data, msg)); 5240be8153cSMarc Zyngier msi_check_level(irq_data->domain, msg); 5250be8153cSMarc Zyngier irq_chip_write_msi_msg(irq_data, msg); 526f3cf8bb0SJiang Liu } 527f3cf8bb0SJiang Liu 528f3cf8bb0SJiang Liu return ret; 529f3cf8bb0SJiang Liu } 530f3cf8bb0SJiang Liu 53172491643SThomas Gleixner static int msi_domain_activate(struct irq_domain *domain, 53272491643SThomas Gleixner struct irq_data *irq_data, bool early) 533f3cf8bb0SJiang Liu { 5340be8153cSMarc Zyngier struct msi_msg msg[2] = { [1] = { }, }; 535f3cf8bb0SJiang Liu 5360be8153cSMarc Zyngier BUG_ON(irq_chip_compose_msi_msg(irq_data, msg)); 5370be8153cSMarc Zyngier msi_check_level(irq_data->domain, msg); 5380be8153cSMarc Zyngier irq_chip_write_msi_msg(irq_data, msg); 53972491643SThomas Gleixner return 0; 540f3cf8bb0SJiang Liu } 541f3cf8bb0SJiang Liu 542f3cf8bb0SJiang Liu static void msi_domain_deactivate(struct irq_domain *domain, 543f3cf8bb0SJiang Liu struct irq_data *irq_data) 544f3cf8bb0SJiang Liu { 5450be8153cSMarc Zyngier struct msi_msg msg[2]; 546f3cf8bb0SJiang Liu 5470be8153cSMarc Zyngier memset(msg, 0, sizeof(msg)); 5480be8153cSMarc Zyngier irq_chip_write_msi_msg(irq_data, msg); 549f3cf8bb0SJiang Liu } 550f3cf8bb0SJiang Liu 551f3cf8bb0SJiang Liu static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, 552f3cf8bb0SJiang Liu unsigned int nr_irqs, void *arg) 553f3cf8bb0SJiang Liu { 554f3cf8bb0SJiang Liu struct msi_domain_info *info = domain->host_data; 555f3cf8bb0SJiang Liu struct msi_domain_ops *ops = info->ops; 556f3cf8bb0SJiang Liu irq_hw_number_t hwirq = ops->get_hwirq(info, arg); 557f3cf8bb0SJiang Liu int i, ret; 558f3cf8bb0SJiang Liu 559f3cf8bb0SJiang Liu if (irq_find_mapping(domain, hwirq) > 0) 560f3cf8bb0SJiang Liu return -EEXIST; 561f3cf8bb0SJiang Liu 562bf6f869fSLiu Jiang if (domain->parent) { 563f3cf8bb0SJiang Liu ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 564f3cf8bb0SJiang Liu if (ret < 0) 565f3cf8bb0SJiang Liu return ret; 566bf6f869fSLiu Jiang } 567f3cf8bb0SJiang Liu 568f3cf8bb0SJiang Liu for (i = 0; i < nr_irqs; i++) { 569f3cf8bb0SJiang Liu ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); 570f3cf8bb0SJiang Liu if (ret < 0) { 571f3cf8bb0SJiang Liu if (ops->msi_free) { 572f3cf8bb0SJiang Liu for (i--; i > 0; i--) 573f3cf8bb0SJiang Liu ops->msi_free(domain, info, virq + i); 574f3cf8bb0SJiang Liu } 575f3cf8bb0SJiang Liu irq_domain_free_irqs_top(domain, virq, nr_irqs); 576f3cf8bb0SJiang Liu return ret; 577f3cf8bb0SJiang Liu } 578f3cf8bb0SJiang Liu } 579f3cf8bb0SJiang Liu 580f3cf8bb0SJiang Liu return 0; 581f3cf8bb0SJiang Liu } 582f3cf8bb0SJiang Liu 583f3cf8bb0SJiang Liu static void msi_domain_free(struct irq_domain *domain, unsigned int virq, 584f3cf8bb0SJiang Liu unsigned int nr_irqs) 585f3cf8bb0SJiang Liu { 586f3cf8bb0SJiang Liu struct msi_domain_info *info = domain->host_data; 587f3cf8bb0SJiang Liu int i; 588f3cf8bb0SJiang Liu 589f3cf8bb0SJiang Liu if (info->ops->msi_free) { 590f3cf8bb0SJiang Liu for (i = 0; i < nr_irqs; i++) 591f3cf8bb0SJiang Liu info->ops->msi_free(domain, info, virq + i); 592f3cf8bb0SJiang Liu } 593f3cf8bb0SJiang Liu irq_domain_free_irqs_top(domain, virq, nr_irqs); 594f3cf8bb0SJiang Liu } 595f3cf8bb0SJiang Liu 59601364028SKrzysztof Kozlowski static const struct irq_domain_ops msi_domain_ops = { 597f3cf8bb0SJiang Liu .alloc = msi_domain_alloc, 598f3cf8bb0SJiang Liu .free = msi_domain_free, 599f3cf8bb0SJiang Liu .activate = msi_domain_activate, 600f3cf8bb0SJiang Liu .deactivate = msi_domain_deactivate, 601f3cf8bb0SJiang Liu }; 602f3cf8bb0SJiang Liu 603aeeb5965SJiang Liu static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, 604aeeb5965SJiang Liu msi_alloc_info_t *arg) 605aeeb5965SJiang Liu { 606aeeb5965SJiang Liu return arg->hwirq; 607aeeb5965SJiang Liu } 608aeeb5965SJiang Liu 609aeeb5965SJiang Liu static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, 610aeeb5965SJiang Liu int nvec, msi_alloc_info_t *arg) 611aeeb5965SJiang Liu { 612aeeb5965SJiang Liu memset(arg, 0, sizeof(*arg)); 613aeeb5965SJiang Liu return 0; 614aeeb5965SJiang Liu } 615aeeb5965SJiang Liu 616aeeb5965SJiang Liu static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, 617aeeb5965SJiang Liu struct msi_desc *desc) 618aeeb5965SJiang Liu { 619aeeb5965SJiang Liu arg->desc = desc; 620aeeb5965SJiang Liu } 621aeeb5965SJiang Liu 622aeeb5965SJiang Liu static int msi_domain_ops_init(struct irq_domain *domain, 623aeeb5965SJiang Liu struct msi_domain_info *info, 624aeeb5965SJiang Liu unsigned int virq, irq_hw_number_t hwirq, 625aeeb5965SJiang Liu msi_alloc_info_t *arg) 626aeeb5965SJiang Liu { 627aeeb5965SJiang Liu irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, 628aeeb5965SJiang Liu info->chip_data); 629aeeb5965SJiang Liu if (info->handler && info->handler_name) { 630aeeb5965SJiang Liu __irq_set_handler(virq, info->handler, 0, info->handler_name); 631aeeb5965SJiang Liu if (info->handler_data) 632aeeb5965SJiang Liu irq_set_handler_data(virq, info->handler_data); 633aeeb5965SJiang Liu } 634aeeb5965SJiang Liu return 0; 635aeeb5965SJiang Liu } 636aeeb5965SJiang Liu 637aeeb5965SJiang Liu static int msi_domain_ops_check(struct irq_domain *domain, 638aeeb5965SJiang Liu struct msi_domain_info *info, 639aeeb5965SJiang Liu struct device *dev) 640aeeb5965SJiang Liu { 641aeeb5965SJiang Liu return 0; 642aeeb5965SJiang Liu } 643aeeb5965SJiang Liu 644aeeb5965SJiang Liu static struct msi_domain_ops msi_domain_ops_default = { 645aeeb5965SJiang Liu .get_hwirq = msi_domain_ops_get_hwirq, 646aeeb5965SJiang Liu .msi_init = msi_domain_ops_init, 647aeeb5965SJiang Liu .msi_check = msi_domain_ops_check, 648aeeb5965SJiang Liu .msi_prepare = msi_domain_ops_prepare, 649aeeb5965SJiang Liu .set_desc = msi_domain_ops_set_desc, 65043e9e705SThomas Gleixner .domain_alloc_irqs = __msi_domain_alloc_irqs, 65143e9e705SThomas Gleixner .domain_free_irqs = __msi_domain_free_irqs, 652aeeb5965SJiang Liu }; 653aeeb5965SJiang Liu 654aeeb5965SJiang Liu static void msi_domain_update_dom_ops(struct msi_domain_info *info) 655aeeb5965SJiang Liu { 656aeeb5965SJiang Liu struct msi_domain_ops *ops = info->ops; 657aeeb5965SJiang Liu 658aeeb5965SJiang Liu if (ops == NULL) { 659aeeb5965SJiang Liu info->ops = &msi_domain_ops_default; 660aeeb5965SJiang Liu return; 661aeeb5965SJiang Liu } 662aeeb5965SJiang Liu 66343e9e705SThomas Gleixner if (ops->domain_alloc_irqs == NULL) 66443e9e705SThomas Gleixner ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs; 66543e9e705SThomas Gleixner if (ops->domain_free_irqs == NULL) 66643e9e705SThomas Gleixner ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs; 66743e9e705SThomas Gleixner 66843e9e705SThomas Gleixner if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS)) 66943e9e705SThomas Gleixner return; 67043e9e705SThomas Gleixner 671aeeb5965SJiang Liu if (ops->get_hwirq == NULL) 672aeeb5965SJiang Liu ops->get_hwirq = msi_domain_ops_default.get_hwirq; 673aeeb5965SJiang Liu if (ops->msi_init == NULL) 674aeeb5965SJiang Liu ops->msi_init = msi_domain_ops_default.msi_init; 675aeeb5965SJiang Liu if (ops->msi_check == NULL) 676aeeb5965SJiang Liu ops->msi_check = msi_domain_ops_default.msi_check; 677aeeb5965SJiang Liu if (ops->msi_prepare == NULL) 678aeeb5965SJiang Liu ops->msi_prepare = msi_domain_ops_default.msi_prepare; 679aeeb5965SJiang Liu if (ops->set_desc == NULL) 680aeeb5965SJiang Liu ops->set_desc = msi_domain_ops_default.set_desc; 681aeeb5965SJiang Liu } 682aeeb5965SJiang Liu 683aeeb5965SJiang Liu static void msi_domain_update_chip_ops(struct msi_domain_info *info) 684aeeb5965SJiang Liu { 685aeeb5965SJiang Liu struct irq_chip *chip = info->chip; 686aeeb5965SJiang Liu 6870701c53eSMarc Zyngier BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); 688aeeb5965SJiang Liu if (!chip->irq_set_affinity) 689aeeb5965SJiang Liu chip->irq_set_affinity = msi_domain_set_affinity; 690aeeb5965SJiang Liu } 691aeeb5965SJiang Liu 692f3cf8bb0SJiang Liu /** 6933b35e7e6SRandy Dunlap * msi_create_irq_domain - Create an MSI interrupt domain 694be5436c8SMarc Zyngier * @fwnode: Optional fwnode of the interrupt controller 695f3cf8bb0SJiang Liu * @info: MSI domain info 696f3cf8bb0SJiang Liu * @parent: Parent irq domain 6973b35e7e6SRandy Dunlap * 6983b35e7e6SRandy Dunlap * Return: pointer to the created &struct irq_domain or %NULL on failure 699f3cf8bb0SJiang Liu */ 700be5436c8SMarc Zyngier struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, 701f3cf8bb0SJiang Liu struct msi_domain_info *info, 702f3cf8bb0SJiang Liu struct irq_domain *parent) 703f3cf8bb0SJiang Liu { 704a97b852bSMarc Zyngier struct irq_domain *domain; 705a97b852bSMarc Zyngier 706aeeb5965SJiang Liu msi_domain_update_dom_ops(info); 707aeeb5965SJiang Liu if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 708aeeb5965SJiang Liu msi_domain_update_chip_ops(info); 709f3cf8bb0SJiang Liu 710a97b852bSMarc Zyngier domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0, 71188156f00SEric Auger fwnode, &msi_domain_ops, info); 7120165308aSThomas Gleixner 7130165308aSThomas Gleixner if (domain && !domain->name && info->chip) 714a97b852bSMarc Zyngier domain->name = info->chip->name; 715a97b852bSMarc Zyngier 716a97b852bSMarc Zyngier return domain; 717f3cf8bb0SJiang Liu } 718f3cf8bb0SJiang Liu 719b2eba39bSMarc Zyngier int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, 720b2eba39bSMarc Zyngier int nvec, msi_alloc_info_t *arg) 721b2eba39bSMarc Zyngier { 722b2eba39bSMarc Zyngier struct msi_domain_info *info = domain->host_data; 723b2eba39bSMarc Zyngier struct msi_domain_ops *ops = info->ops; 724b2eba39bSMarc Zyngier int ret; 725b2eba39bSMarc Zyngier 726b2eba39bSMarc Zyngier ret = ops->msi_check(domain, info, dev); 727b2eba39bSMarc Zyngier if (ret == 0) 728b2eba39bSMarc Zyngier ret = ops->msi_prepare(domain, dev, nvec, arg); 729b2eba39bSMarc Zyngier 730b2eba39bSMarc Zyngier return ret; 731b2eba39bSMarc Zyngier } 732b2eba39bSMarc Zyngier 7332145ac93SMarc Zyngier int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, 734a80713feSThomas Gleixner int virq_base, int nvec, msi_alloc_info_t *arg) 7352145ac93SMarc Zyngier { 7362145ac93SMarc Zyngier struct msi_domain_info *info = domain->host_data; 7372145ac93SMarc Zyngier struct msi_domain_ops *ops = info->ops; 7382145ac93SMarc Zyngier struct msi_desc *desc; 739a80713feSThomas Gleixner int ret, virq; 7402145ac93SMarc Zyngier 741a80713feSThomas Gleixner msi_lock_descs(dev); 742a80713feSThomas Gleixner for (virq = virq_base; virq < virq_base + nvec; virq++) { 743a80713feSThomas Gleixner desc = alloc_msi_entry(dev, 1, NULL); 744a80713feSThomas Gleixner if (!desc) { 745a80713feSThomas Gleixner ret = -ENOMEM; 746a80713feSThomas Gleixner goto fail; 7472145ac93SMarc Zyngier } 7482145ac93SMarc Zyngier 749a80713feSThomas Gleixner desc->msi_index = virq; 750a80713feSThomas Gleixner desc->irq = virq; 751a80713feSThomas Gleixner list_add_tail(&desc->list, &dev->msi.data->list); 7522145ac93SMarc Zyngier 7532145ac93SMarc Zyngier ops->set_desc(arg, desc); 754a80713feSThomas Gleixner ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg); 7552145ac93SMarc Zyngier if (ret) 756a80713feSThomas Gleixner goto fail; 7572145ac93SMarc Zyngier 758a80713feSThomas Gleixner irq_set_msi_desc(virq, desc); 7592145ac93SMarc Zyngier } 760a80713feSThomas Gleixner msi_unlock_descs(dev); 761a80713feSThomas Gleixner return 0; 7622145ac93SMarc Zyngier 763a80713feSThomas Gleixner fail: 764a80713feSThomas Gleixner for (--virq; virq >= virq_base; virq--) 765a80713feSThomas Gleixner irq_domain_free_irqs_common(domain, virq, 1); 766a80713feSThomas Gleixner msi_free_msi_descs_range(dev, MSI_DESC_ALL, virq_base, virq_base + nvec - 1); 767a80713feSThomas Gleixner msi_unlock_descs(dev); 7682145ac93SMarc Zyngier return ret; 7692145ac93SMarc Zyngier } 7702145ac93SMarc Zyngier 771bc976233SThomas Gleixner /* 772bc976233SThomas Gleixner * Carefully check whether the device can use reservation mode. If 773bc976233SThomas Gleixner * reservation mode is enabled then the early activation will assign a 774bc976233SThomas Gleixner * dummy vector to the device. If the PCI/MSI device does not support 775bc976233SThomas Gleixner * masking of the entry then this can result in spurious interrupts when 776bc976233SThomas Gleixner * the device driver is not absolutely careful. But even then a malfunction 777bc976233SThomas Gleixner * of the hardware could result in a spurious interrupt on the dummy vector 778bc976233SThomas Gleixner * and render the device unusable. If the entry can be masked then the core 779bc976233SThomas Gleixner * logic will prevent the spurious interrupt and reservation mode can be 780bc976233SThomas Gleixner * used. For now reservation mode is restricted to PCI/MSI. 781bc976233SThomas Gleixner */ 782bc976233SThomas Gleixner static bool msi_check_reservation_mode(struct irq_domain *domain, 783bc976233SThomas Gleixner struct msi_domain_info *info, 784bc976233SThomas Gleixner struct device *dev) 785da5dd9e8SThomas Gleixner { 786bc976233SThomas Gleixner struct msi_desc *desc; 787bc976233SThomas Gleixner 788c6c9e283SThomas Gleixner switch(domain->bus_token) { 789c6c9e283SThomas Gleixner case DOMAIN_BUS_PCI_MSI: 790c6c9e283SThomas Gleixner case DOMAIN_BUS_VMD_MSI: 791c6c9e283SThomas Gleixner break; 792c6c9e283SThomas Gleixner default: 793bc976233SThomas Gleixner return false; 794c6c9e283SThomas Gleixner } 795bc976233SThomas Gleixner 796da5dd9e8SThomas Gleixner if (!(info->flags & MSI_FLAG_MUST_REACTIVATE)) 797da5dd9e8SThomas Gleixner return false; 798bc976233SThomas Gleixner 799bc976233SThomas Gleixner if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask) 800bc976233SThomas Gleixner return false; 801bc976233SThomas Gleixner 802bc976233SThomas Gleixner /* 803bc976233SThomas Gleixner * Checking the first MSI descriptor is sufficient. MSIX supports 8049c8e9c96SThomas Gleixner * masking and MSI does so when the can_mask attribute is set. 805bc976233SThomas Gleixner */ 806bc976233SThomas Gleixner desc = first_msi_entry(dev); 807e58f2259SThomas Gleixner return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask; 808da5dd9e8SThomas Gleixner } 809da5dd9e8SThomas Gleixner 81089033762SThomas Gleixner static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc, 81189033762SThomas Gleixner int allocated) 81289033762SThomas Gleixner { 81389033762SThomas Gleixner switch(domain->bus_token) { 81489033762SThomas Gleixner case DOMAIN_BUS_PCI_MSI: 81589033762SThomas Gleixner case DOMAIN_BUS_VMD_MSI: 81689033762SThomas Gleixner if (IS_ENABLED(CONFIG_PCI_MSI)) 81789033762SThomas Gleixner break; 81889033762SThomas Gleixner fallthrough; 81989033762SThomas Gleixner default: 82089033762SThomas Gleixner return -ENOSPC; 82189033762SThomas Gleixner } 82289033762SThomas Gleixner 82389033762SThomas Gleixner /* Let a failed PCI multi MSI allocation retry */ 82489033762SThomas Gleixner if (desc->nvec_used > 1) 82589033762SThomas Gleixner return 1; 82689033762SThomas Gleixner 82789033762SThomas Gleixner /* If there was a successful allocation let the caller know */ 82889033762SThomas Gleixner return allocated ? allocated : -ENOSPC; 82989033762SThomas Gleixner } 83089033762SThomas Gleixner 831*ef8dd015SThomas Gleixner #define VIRQ_CAN_RESERVE 0x01 832*ef8dd015SThomas Gleixner #define VIRQ_ACTIVATE 0x02 833*ef8dd015SThomas Gleixner #define VIRQ_NOMASK_QUIRK 0x04 834*ef8dd015SThomas Gleixner 835*ef8dd015SThomas Gleixner static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags) 836*ef8dd015SThomas Gleixner { 837*ef8dd015SThomas Gleixner struct irq_data *irqd = irq_domain_get_irq_data(domain, virq); 838*ef8dd015SThomas Gleixner int ret; 839*ef8dd015SThomas Gleixner 840*ef8dd015SThomas Gleixner if (!(vflags & VIRQ_CAN_RESERVE)) { 841*ef8dd015SThomas Gleixner irqd_clr_can_reserve(irqd); 842*ef8dd015SThomas Gleixner if (vflags & VIRQ_NOMASK_QUIRK) 843*ef8dd015SThomas Gleixner irqd_set_msi_nomask_quirk(irqd); 844*ef8dd015SThomas Gleixner } 845*ef8dd015SThomas Gleixner 846*ef8dd015SThomas Gleixner if (!(vflags & VIRQ_ACTIVATE)) 847*ef8dd015SThomas Gleixner return 0; 848*ef8dd015SThomas Gleixner 849*ef8dd015SThomas Gleixner ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE); 850*ef8dd015SThomas Gleixner if (ret) 851*ef8dd015SThomas Gleixner return ret; 852*ef8dd015SThomas Gleixner /* 853*ef8dd015SThomas Gleixner * If the interrupt uses reservation mode, clear the activated bit 854*ef8dd015SThomas Gleixner * so request_irq() will assign the final vector. 855*ef8dd015SThomas Gleixner */ 856*ef8dd015SThomas Gleixner if (vflags & VIRQ_CAN_RESERVE) 857*ef8dd015SThomas Gleixner irqd_clr_activated(irqd); 858*ef8dd015SThomas Gleixner return 0; 859*ef8dd015SThomas Gleixner } 860*ef8dd015SThomas Gleixner 86143e9e705SThomas Gleixner int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, 862d9109698SJiang Liu int nvec) 863d9109698SJiang Liu { 864d9109698SJiang Liu struct msi_domain_info *info = domain->host_data; 865d9109698SJiang Liu struct msi_domain_ops *ops = info->ops; 86606fde695SZenghui Yu msi_alloc_info_t arg = { }; 867*ef8dd015SThomas Gleixner unsigned int vflags = 0; 868*ef8dd015SThomas Gleixner struct msi_desc *desc; 86989033762SThomas Gleixner int allocated = 0; 870b6140914SThomas Gleixner int i, ret, virq; 871d9109698SJiang Liu 872b2eba39bSMarc Zyngier ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); 873d9109698SJiang Liu if (ret) 874d9109698SJiang Liu return ret; 875d9109698SJiang Liu 876*ef8dd015SThomas Gleixner /* 877*ef8dd015SThomas Gleixner * This flag is set by the PCI layer as we need to activate 878*ef8dd015SThomas Gleixner * the MSI entries before the PCI layer enables MSI in the 879*ef8dd015SThomas Gleixner * card. Otherwise the card latches a random msi message. 880*ef8dd015SThomas Gleixner */ 881*ef8dd015SThomas Gleixner if (info->flags & MSI_FLAG_ACTIVATE_EARLY) 882*ef8dd015SThomas Gleixner vflags |= VIRQ_ACTIVATE; 883*ef8dd015SThomas Gleixner 884*ef8dd015SThomas Gleixner /* 885*ef8dd015SThomas Gleixner * Interrupt can use a reserved vector and will not occupy 886*ef8dd015SThomas Gleixner * a real device vector until the interrupt is requested. 887*ef8dd015SThomas Gleixner */ 888*ef8dd015SThomas Gleixner if (msi_check_reservation_mode(domain, info, dev)) { 889*ef8dd015SThomas Gleixner vflags |= VIRQ_CAN_RESERVE; 890*ef8dd015SThomas Gleixner /* 891*ef8dd015SThomas Gleixner * MSI affinity setting requires a special quirk (X86) when 892*ef8dd015SThomas Gleixner * reservation mode is active. 893*ef8dd015SThomas Gleixner */ 894*ef8dd015SThomas Gleixner if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) 895*ef8dd015SThomas Gleixner vflags |= VIRQ_NOMASK_QUIRK; 896*ef8dd015SThomas Gleixner } 897*ef8dd015SThomas Gleixner 898*ef8dd015SThomas Gleixner msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) { 899d9109698SJiang Liu ops->set_desc(&arg, desc); 900d9109698SJiang Liu 901b6140914SThomas Gleixner virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used, 90206ee6d57SThomas Gleixner dev_to_node(dev), &arg, false, 9030972fa57SThomas Gleixner desc->affinity); 9040f62d941SThomas Gleixner if (virq < 0) 9050f62d941SThomas Gleixner return msi_handle_pci_fail(domain, desc, allocated); 906d9109698SJiang Liu 90707557ccbSThomas Gleixner for (i = 0; i < desc->nvec_used; i++) { 908d9109698SJiang Liu irq_set_msi_desc_off(virq, i, desc); 90907557ccbSThomas Gleixner irq_debugfs_copy_devname(virq + i, dev); 910*ef8dd015SThomas Gleixner ret = msi_init_virq(domain, virq + i, vflags); 911bb9b428aSThomas Gleixner if (ret) 9120f62d941SThomas Gleixner return ret; 913da5dd9e8SThomas Gleixner } 914*ef8dd015SThomas Gleixner allocated++; 915d9109698SJiang Liu } 916d9109698SJiang Liu return 0; 9170f62d941SThomas Gleixner } 9180f62d941SThomas Gleixner 919645474e2SThomas Gleixner static int msi_domain_add_simple_msi_descs(struct msi_domain_info *info, 920645474e2SThomas Gleixner struct device *dev, 921645474e2SThomas Gleixner unsigned int num_descs) 922645474e2SThomas Gleixner { 923645474e2SThomas Gleixner if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS)) 924645474e2SThomas Gleixner return 0; 925645474e2SThomas Gleixner 926645474e2SThomas Gleixner return msi_add_simple_msi_descs(dev, 0, num_descs); 927645474e2SThomas Gleixner } 928645474e2SThomas Gleixner 9290f62d941SThomas Gleixner /** 9300f62d941SThomas Gleixner * msi_domain_alloc_irqs_descs_locked - Allocate interrupts from a MSI interrupt domain 9310f62d941SThomas Gleixner * @domain: The domain to allocate from 9320f62d941SThomas Gleixner * @dev: Pointer to device struct of the device for which the interrupts 9330f62d941SThomas Gleixner * are allocated 9340f62d941SThomas Gleixner * @nvec: The number of interrupts to allocate 9350f62d941SThomas Gleixner * 9360f62d941SThomas Gleixner * Must be invoked from within a msi_lock_descs() / msi_unlock_descs() 9370f62d941SThomas Gleixner * pair. Use this for MSI irqdomains which implement their own vector 9380f62d941SThomas Gleixner * allocation/free. 9390f62d941SThomas Gleixner * 9400f62d941SThomas Gleixner * Return: %0 on success or an error code. 9410f62d941SThomas Gleixner */ 9420f62d941SThomas Gleixner int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev, 9430f62d941SThomas Gleixner int nvec) 9440f62d941SThomas Gleixner { 9450f62d941SThomas Gleixner struct msi_domain_info *info = domain->host_data; 9460f62d941SThomas Gleixner struct msi_domain_ops *ops = info->ops; 9470f62d941SThomas Gleixner int ret; 9480f62d941SThomas Gleixner 9490f62d941SThomas Gleixner lockdep_assert_held(&dev->msi.data->mutex); 9500f62d941SThomas Gleixner 951645474e2SThomas Gleixner ret = msi_domain_add_simple_msi_descs(info, dev, nvec); 952645474e2SThomas Gleixner if (ret) 953645474e2SThomas Gleixner return ret; 954645474e2SThomas Gleixner 9550f62d941SThomas Gleixner ret = ops->domain_alloc_irqs(domain, dev, nvec); 9560f62d941SThomas Gleixner if (ret) 9570f62d941SThomas Gleixner goto cleanup; 9580f62d941SThomas Gleixner 9590f62d941SThomas Gleixner if (!(info->flags & MSI_FLAG_DEV_SYSFS)) 9600f62d941SThomas Gleixner return 0; 9610f62d941SThomas Gleixner 9620f62d941SThomas Gleixner ret = msi_device_populate_sysfs(dev); 9630f62d941SThomas Gleixner if (ret) 9640f62d941SThomas Gleixner goto cleanup; 9650f62d941SThomas Gleixner return 0; 966bb9b428aSThomas Gleixner 967bb9b428aSThomas Gleixner cleanup: 9680f62d941SThomas Gleixner msi_domain_free_irqs_descs_locked(domain, dev); 969bb9b428aSThomas Gleixner return ret; 970d9109698SJiang Liu } 971d9109698SJiang Liu 972d9109698SJiang Liu /** 97343e9e705SThomas Gleixner * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain 97443e9e705SThomas Gleixner * @domain: The domain to allocate from 975d9109698SJiang Liu * @dev: Pointer to device struct of the device for which the interrupts 97643e9e705SThomas Gleixner * are allocated 97743e9e705SThomas Gleixner * @nvec: The number of interrupts to allocate 97843e9e705SThomas Gleixner * 9793b35e7e6SRandy Dunlap * Return: %0 on success or an error code. 980d9109698SJiang Liu */ 9810f62d941SThomas Gleixner int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec) 98243e9e705SThomas Gleixner { 983bf6e054eSThomas Gleixner int ret; 98443e9e705SThomas Gleixner 9850f62d941SThomas Gleixner msi_lock_descs(dev); 9860f62d941SThomas Gleixner ret = msi_domain_alloc_irqs_descs_locked(domain, dev, nvec); 9870f62d941SThomas Gleixner msi_unlock_descs(dev); 988bf6e054eSThomas Gleixner return ret; 98943e9e705SThomas Gleixner } 99043e9e705SThomas Gleixner 99143e9e705SThomas Gleixner void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) 992d9109698SJiang Liu { 993*ef8dd015SThomas Gleixner struct irq_data *irqd; 994d9109698SJiang Liu struct msi_desc *desc; 995dbbc9357SBixuan Cui int i; 996dbbc9357SBixuan Cui 997*ef8dd015SThomas Gleixner /* Only handle MSI entries which have an interrupt associated */ 998*ef8dd015SThomas Gleixner msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) { 999*ef8dd015SThomas Gleixner /* Make sure all interrupts are deactivated */ 1000*ef8dd015SThomas Gleixner for (i = 0; i < desc->nvec_used; i++) { 1001*ef8dd015SThomas Gleixner irqd = irq_domain_get_irq_data(domain, desc->irq + i); 1002*ef8dd015SThomas Gleixner if (irqd && irqd_is_activated(irqd)) 1003*ef8dd015SThomas Gleixner irq_domain_deactivate_irq(irqd); 1004dbbc9357SBixuan Cui } 1005d9109698SJiang Liu 1006d9109698SJiang Liu irq_domain_free_irqs(desc->irq, desc->nvec_used); 1007d9109698SJiang Liu desc->irq = 0; 1008d9109698SJiang Liu } 1009d9109698SJiang Liu } 1010d9109698SJiang Liu 1011645474e2SThomas Gleixner static void msi_domain_free_msi_descs(struct msi_domain_info *info, 1012645474e2SThomas Gleixner struct device *dev) 1013645474e2SThomas Gleixner { 1014645474e2SThomas Gleixner if (info->flags & MSI_FLAG_FREE_MSI_DESCS) 1015645474e2SThomas Gleixner msi_free_msi_descs(dev); 1016645474e2SThomas Gleixner } 1017645474e2SThomas Gleixner 1018d9109698SJiang Liu /** 10190f62d941SThomas Gleixner * msi_domain_free_irqs_descs_locked - Free interrupts from a MSI interrupt @domain associated to @dev 10200f62d941SThomas Gleixner * @domain: The domain to managing the interrupts 10210f62d941SThomas Gleixner * @dev: Pointer to device struct of the device for which the interrupts 10220f62d941SThomas Gleixner * are free 10230f62d941SThomas Gleixner * 10240f62d941SThomas Gleixner * Must be invoked from within a msi_lock_descs() / msi_unlock_descs() 10250f62d941SThomas Gleixner * pair. Use this for MSI irqdomains which implement their own vector 10260f62d941SThomas Gleixner * allocation. 10270f62d941SThomas Gleixner */ 10280f62d941SThomas Gleixner void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev) 10290f62d941SThomas Gleixner { 10300f62d941SThomas Gleixner struct msi_domain_info *info = domain->host_data; 10310f62d941SThomas Gleixner struct msi_domain_ops *ops = info->ops; 10320f62d941SThomas Gleixner 10330f62d941SThomas Gleixner lockdep_assert_held(&dev->msi.data->mutex); 10340f62d941SThomas Gleixner 10350f62d941SThomas Gleixner if (info->flags & MSI_FLAG_DEV_SYSFS) 10360f62d941SThomas Gleixner msi_device_destroy_sysfs(dev); 10370f62d941SThomas Gleixner ops->domain_free_irqs(domain, dev); 1038645474e2SThomas Gleixner msi_domain_free_msi_descs(info, dev); 10390f62d941SThomas Gleixner } 10400f62d941SThomas Gleixner 10410f62d941SThomas Gleixner /** 10423b35e7e6SRandy Dunlap * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev 104343e9e705SThomas Gleixner * @domain: The domain to managing the interrupts 104443e9e705SThomas Gleixner * @dev: Pointer to device struct of the device for which the interrupts 104543e9e705SThomas Gleixner * are free 104643e9e705SThomas Gleixner */ 104743e9e705SThomas Gleixner void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) 104843e9e705SThomas Gleixner { 10490f62d941SThomas Gleixner msi_lock_descs(dev); 10500f62d941SThomas Gleixner msi_domain_free_irqs_descs_locked(domain, dev); 10510f62d941SThomas Gleixner msi_unlock_descs(dev); 105243e9e705SThomas Gleixner } 105343e9e705SThomas Gleixner 105443e9e705SThomas Gleixner /** 1055f3cf8bb0SJiang Liu * msi_get_domain_info - Get the MSI interrupt domain info for @domain 1056f3cf8bb0SJiang Liu * @domain: The interrupt domain to retrieve data from 1057f3cf8bb0SJiang Liu * 10583b35e7e6SRandy Dunlap * Return: the pointer to the msi_domain_info stored in @domain->host_data. 1059f3cf8bb0SJiang Liu */ 1060f3cf8bb0SJiang Liu struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) 1061f3cf8bb0SJiang Liu { 1062f3cf8bb0SJiang Liu return (struct msi_domain_info *)domain->host_data; 1063f3cf8bb0SJiang Liu } 1064f3cf8bb0SJiang Liu 1065f3cf8bb0SJiang Liu #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ 1066