15ef3166eSFrederic Barrat // SPDX-License-Identifier: GPL-2.0+ 25ef3166eSFrederic Barrat // Copyright 2017 IBM Corp. 35ef3166eSFrederic Barrat #include <linux/sched/mm.h> 45ef3166eSFrederic Barrat #include <linux/mutex.h> 550a7ca3cSSouptick Joarder #include <linux/mm_types.h> 65ef3166eSFrederic Barrat #include <linux/mmu_context.h> 75ef3166eSFrederic Barrat #include <asm/copro.h> 85ef3166eSFrederic Barrat #include <asm/pnv-ocxl.h> 9280b983cSFrederic Barrat #include <misc/ocxl.h> 105ef3166eSFrederic Barrat #include "ocxl_internal.h" 1192add22eSFrederic Barrat #include "trace.h" 125ef3166eSFrederic Barrat 135ef3166eSFrederic Barrat 145ef3166eSFrederic Barrat #define SPA_PASID_BITS 15 155ef3166eSFrederic Barrat #define SPA_PASID_MAX ((1 << SPA_PASID_BITS) - 1) 165ef3166eSFrederic Barrat #define SPA_PE_MASK SPA_PASID_MAX 175ef3166eSFrederic Barrat #define SPA_SPA_SIZE_LOG 22 /* Each SPA is 4 Mb */ 185ef3166eSFrederic Barrat 195ef3166eSFrederic Barrat #define SPA_CFG_SF (1ull << (63-0)) 205ef3166eSFrederic Barrat #define SPA_CFG_TA (1ull << (63-1)) 215ef3166eSFrederic Barrat #define SPA_CFG_HV (1ull << (63-3)) 225ef3166eSFrederic Barrat #define SPA_CFG_UV (1ull << (63-4)) 235ef3166eSFrederic Barrat #define SPA_CFG_XLAT_hpt (0ull << (63-6)) /* Hashed page table (HPT) mode */ 245ef3166eSFrederic Barrat #define SPA_CFG_XLAT_roh (2ull << (63-6)) /* Radix on HPT mode */ 255ef3166eSFrederic Barrat #define SPA_CFG_XLAT_ror (3ull << (63-6)) /* Radix on Radix mode */ 265ef3166eSFrederic Barrat #define SPA_CFG_PR (1ull << (63-49)) 275ef3166eSFrederic Barrat #define SPA_CFG_TC (1ull << (63-54)) 285ef3166eSFrederic Barrat #define SPA_CFG_DR (1ull << (63-59)) 295ef3166eSFrederic Barrat 305ef3166eSFrederic Barrat #define SPA_XSL_TF (1ull << (63-3)) /* Translation fault */ 315ef3166eSFrederic Barrat #define SPA_XSL_S (1ull << (63-38)) /* Store operation */ 325ef3166eSFrederic Barrat 335ef3166eSFrederic Barrat #define SPA_PE_VALID 0x80000000 345ef3166eSFrederic Barrat 355ef3166eSFrederic Barrat 365ef3166eSFrederic Barrat struct pe_data { 375ef3166eSFrederic Barrat struct mm_struct *mm; 385ef3166eSFrederic Barrat /* callback to trigger when a translation fault occurs */ 395ef3166eSFrederic Barrat void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr); 405ef3166eSFrederic Barrat /* opaque pointer to be passed to the above callback */ 415ef3166eSFrederic Barrat void *xsl_err_data; 425ef3166eSFrederic Barrat struct rcu_head rcu; 435ef3166eSFrederic Barrat }; 445ef3166eSFrederic Barrat 455ef3166eSFrederic Barrat struct spa { 465ef3166eSFrederic Barrat struct ocxl_process_element *spa_mem; 475ef3166eSFrederic Barrat int spa_order; 485ef3166eSFrederic Barrat struct mutex spa_lock; 495ef3166eSFrederic Barrat struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */ 505ef3166eSFrederic Barrat char *irq_name; 515ef3166eSFrederic Barrat int virq; 525ef3166eSFrederic Barrat void __iomem *reg_dsisr; 535ef3166eSFrederic Barrat void __iomem *reg_dar; 545ef3166eSFrederic Barrat void __iomem *reg_tfc; 555ef3166eSFrederic Barrat void __iomem *reg_pe_handle; 565ef3166eSFrederic Barrat /* 575ef3166eSFrederic Barrat * The following field are used by the memory fault 585ef3166eSFrederic Barrat * interrupt handler. We can only have one interrupt at a 595ef3166eSFrederic Barrat * time. The NPU won't raise another interrupt until the 605ef3166eSFrederic Barrat * previous one has been ack'd by writing to the TFC register 615ef3166eSFrederic Barrat */ 625ef3166eSFrederic Barrat struct xsl_fault { 635ef3166eSFrederic Barrat struct work_struct fault_work; 645ef3166eSFrederic Barrat u64 pe; 655ef3166eSFrederic Barrat u64 dsisr; 665ef3166eSFrederic Barrat u64 dar; 675ef3166eSFrederic Barrat struct pe_data pe_data; 685ef3166eSFrederic Barrat } xsl_fault; 695ef3166eSFrederic Barrat }; 705ef3166eSFrederic Barrat 715ef3166eSFrederic Barrat /* 725ef3166eSFrederic Barrat * A opencapi link can be used be by several PCI functions. We have 735ef3166eSFrederic Barrat * one link per device slot. 745ef3166eSFrederic Barrat * 755ef3166eSFrederic Barrat * A linked list of opencapi links should suffice, as there's a 765ef3166eSFrederic Barrat * limited number of opencapi slots on a system and lookup is only 775ef3166eSFrederic Barrat * done when the device is probed 785ef3166eSFrederic Barrat */ 795ef3166eSFrederic Barrat struct link { 805ef3166eSFrederic Barrat struct list_head list; 815ef3166eSFrederic Barrat struct kref ref; 825ef3166eSFrederic Barrat int domain; 835ef3166eSFrederic Barrat int bus; 845ef3166eSFrederic Barrat int dev; 855ef3166eSFrederic Barrat atomic_t irq_available; 865ef3166eSFrederic Barrat struct spa *spa; 875ef3166eSFrederic Barrat void *platform_data; 885ef3166eSFrederic Barrat }; 895ef3166eSFrederic Barrat static struct list_head links_list = LIST_HEAD_INIT(links_list); 905ef3166eSFrederic Barrat static DEFINE_MUTEX(links_list_lock); 915ef3166eSFrederic Barrat 925ef3166eSFrederic Barrat enum xsl_response { 935ef3166eSFrederic Barrat CONTINUE, 945ef3166eSFrederic Barrat ADDRESS_ERROR, 955ef3166eSFrederic Barrat RESTART, 965ef3166eSFrederic Barrat }; 975ef3166eSFrederic Barrat 985ef3166eSFrederic Barrat 995ef3166eSFrederic Barrat static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe) 1005ef3166eSFrederic Barrat { 1015ef3166eSFrederic Barrat u64 reg; 1025ef3166eSFrederic Barrat 1035ef3166eSFrederic Barrat *dsisr = in_be64(spa->reg_dsisr); 1045ef3166eSFrederic Barrat *dar = in_be64(spa->reg_dar); 1055ef3166eSFrederic Barrat reg = in_be64(spa->reg_pe_handle); 1065ef3166eSFrederic Barrat *pe = reg & SPA_PE_MASK; 1075ef3166eSFrederic Barrat } 1085ef3166eSFrederic Barrat 1095ef3166eSFrederic Barrat static void ack_irq(struct spa *spa, enum xsl_response r) 1105ef3166eSFrederic Barrat { 1115ef3166eSFrederic Barrat u64 reg = 0; 1125ef3166eSFrederic Barrat 1135ef3166eSFrederic Barrat /* continue is not supported */ 1145ef3166eSFrederic Barrat if (r == RESTART) 1155ef3166eSFrederic Barrat reg = PPC_BIT(31); 1165ef3166eSFrederic Barrat else if (r == ADDRESS_ERROR) 1175ef3166eSFrederic Barrat reg = PPC_BIT(30); 1185ef3166eSFrederic Barrat else 1195ef3166eSFrederic Barrat WARN(1, "Invalid irq response %d\n", r); 1205ef3166eSFrederic Barrat 12192add22eSFrederic Barrat if (reg) { 12292add22eSFrederic Barrat trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe, 12392add22eSFrederic Barrat spa->xsl_fault.dsisr, spa->xsl_fault.dar, reg); 1245ef3166eSFrederic Barrat out_be64(spa->reg_tfc, reg); 1255ef3166eSFrederic Barrat } 12692add22eSFrederic Barrat } 1275ef3166eSFrederic Barrat 1285ef3166eSFrederic Barrat static void xsl_fault_handler_bh(struct work_struct *fault_work) 1295ef3166eSFrederic Barrat { 13050a7ca3cSSouptick Joarder vm_fault_t flt = 0; 1315ef3166eSFrederic Barrat unsigned long access, flags, inv_flags = 0; 1325ef3166eSFrederic Barrat enum xsl_response r; 1335ef3166eSFrederic Barrat struct xsl_fault *fault = container_of(fault_work, struct xsl_fault, 1345ef3166eSFrederic Barrat fault_work); 1355ef3166eSFrederic Barrat struct spa *spa = container_of(fault, struct spa, xsl_fault); 1365ef3166eSFrederic Barrat 1375ef3166eSFrederic Barrat int rc; 1385ef3166eSFrederic Barrat 1395ef3166eSFrederic Barrat /* 140d497ebf5SFrederic Barrat * We must release a reference on mm_users whenever exiting this 1415ef3166eSFrederic Barrat * function (taken in the memory fault interrupt handler) 1425ef3166eSFrederic Barrat */ 1435ef3166eSFrederic Barrat rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr, 1445ef3166eSFrederic Barrat &flt); 1455ef3166eSFrederic Barrat if (rc) { 1465ef3166eSFrederic Barrat pr_debug("copro_handle_mm_fault failed: %d\n", rc); 1475ef3166eSFrederic Barrat if (fault->pe_data.xsl_err_cb) { 1485ef3166eSFrederic Barrat fault->pe_data.xsl_err_cb( 1495ef3166eSFrederic Barrat fault->pe_data.xsl_err_data, 1505ef3166eSFrederic Barrat fault->dar, fault->dsisr); 1515ef3166eSFrederic Barrat } 1525ef3166eSFrederic Barrat r = ADDRESS_ERROR; 1535ef3166eSFrederic Barrat goto ack; 1545ef3166eSFrederic Barrat } 1555ef3166eSFrederic Barrat 1565ef3166eSFrederic Barrat if (!radix_enabled()) { 1575ef3166eSFrederic Barrat /* 1585ef3166eSFrederic Barrat * update_mmu_cache() will not have loaded the hash 1595ef3166eSFrederic Barrat * since current->trap is not a 0x400 or 0x300, so 1605ef3166eSFrederic Barrat * just call hash_page_mm() here. 1615ef3166eSFrederic Barrat */ 1625ef3166eSFrederic Barrat access = _PAGE_PRESENT | _PAGE_READ; 1635ef3166eSFrederic Barrat if (fault->dsisr & SPA_XSL_S) 1645ef3166eSFrederic Barrat access |= _PAGE_WRITE; 1655ef3166eSFrederic Barrat 1665ef3166eSFrederic Barrat if (REGION_ID(fault->dar) != USER_REGION_ID) 1675ef3166eSFrederic Barrat access |= _PAGE_PRIVILEGED; 1685ef3166eSFrederic Barrat 1695ef3166eSFrederic Barrat local_irq_save(flags); 1705ef3166eSFrederic Barrat hash_page_mm(fault->pe_data.mm, fault->dar, access, 0x300, 1715ef3166eSFrederic Barrat inv_flags); 1725ef3166eSFrederic Barrat local_irq_restore(flags); 1735ef3166eSFrederic Barrat } 1745ef3166eSFrederic Barrat r = RESTART; 1755ef3166eSFrederic Barrat ack: 176d497ebf5SFrederic Barrat mmput(fault->pe_data.mm); 1775ef3166eSFrederic Barrat ack_irq(spa, r); 1785ef3166eSFrederic Barrat } 1795ef3166eSFrederic Barrat 1805ef3166eSFrederic Barrat static irqreturn_t xsl_fault_handler(int irq, void *data) 1815ef3166eSFrederic Barrat { 1825ef3166eSFrederic Barrat struct link *link = (struct link *) data; 1835ef3166eSFrederic Barrat struct spa *spa = link->spa; 1845ef3166eSFrederic Barrat u64 dsisr, dar, pe_handle; 1855ef3166eSFrederic Barrat struct pe_data *pe_data; 1865ef3166eSFrederic Barrat struct ocxl_process_element *pe; 1875ef3166eSFrederic Barrat int lpid, pid, tid; 188d497ebf5SFrederic Barrat bool schedule = false; 1895ef3166eSFrederic Barrat 1905ef3166eSFrederic Barrat read_irq(spa, &dsisr, &dar, &pe_handle); 19192add22eSFrederic Barrat trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1); 1925ef3166eSFrederic Barrat 1935ef3166eSFrederic Barrat WARN_ON(pe_handle > SPA_PE_MASK); 1945ef3166eSFrederic Barrat pe = spa->spa_mem + pe_handle; 1955ef3166eSFrederic Barrat lpid = be32_to_cpu(pe->lpid); 1965ef3166eSFrederic Barrat pid = be32_to_cpu(pe->pid); 1975ef3166eSFrederic Barrat tid = be32_to_cpu(pe->tid); 1985ef3166eSFrederic Barrat /* We could be reading all null values here if the PE is being 1995ef3166eSFrederic Barrat * removed while an interrupt kicks in. It's not supposed to 2005ef3166eSFrederic Barrat * happen if the driver notified the AFU to terminate the 2015ef3166eSFrederic Barrat * PASID, and the AFU waited for pending operations before 2025ef3166eSFrederic Barrat * acknowledging. But even if it happens, we won't find a 2035ef3166eSFrederic Barrat * memory context below and fail silently, so it should be ok. 2045ef3166eSFrederic Barrat */ 2055ef3166eSFrederic Barrat if (!(dsisr & SPA_XSL_TF)) { 2065ef3166eSFrederic Barrat WARN(1, "Invalid xsl interrupt fault register %#llx\n", dsisr); 2075ef3166eSFrederic Barrat ack_irq(spa, ADDRESS_ERROR); 2085ef3166eSFrederic Barrat return IRQ_HANDLED; 2095ef3166eSFrederic Barrat } 2105ef3166eSFrederic Barrat 2115ef3166eSFrederic Barrat rcu_read_lock(); 2125ef3166eSFrederic Barrat pe_data = radix_tree_lookup(&spa->pe_tree, pe_handle); 2135ef3166eSFrederic Barrat if (!pe_data) { 2145ef3166eSFrederic Barrat /* 2155ef3166eSFrederic Barrat * Could only happen if the driver didn't notify the 2165ef3166eSFrederic Barrat * AFU about PASID termination before removing the PE, 2175ef3166eSFrederic Barrat * or the AFU didn't wait for all memory access to 2185ef3166eSFrederic Barrat * have completed. 2195ef3166eSFrederic Barrat * 2205ef3166eSFrederic Barrat * Either way, we fail early, but we shouldn't log an 2215ef3166eSFrederic Barrat * error message, as it is a valid (if unexpected) 2225ef3166eSFrederic Barrat * scenario 2235ef3166eSFrederic Barrat */ 2245ef3166eSFrederic Barrat rcu_read_unlock(); 2255ef3166eSFrederic Barrat pr_debug("Unknown mm context for xsl interrupt\n"); 2265ef3166eSFrederic Barrat ack_irq(spa, ADDRESS_ERROR); 2275ef3166eSFrederic Barrat return IRQ_HANDLED; 2285ef3166eSFrederic Barrat } 2295ef3166eSFrederic Barrat WARN_ON(pe_data->mm->context.id != pid); 2305ef3166eSFrederic Barrat 231d497ebf5SFrederic Barrat if (mmget_not_zero(pe_data->mm)) { 2325ef3166eSFrederic Barrat spa->xsl_fault.pe = pe_handle; 2335ef3166eSFrederic Barrat spa->xsl_fault.dar = dar; 2345ef3166eSFrederic Barrat spa->xsl_fault.dsisr = dsisr; 2355ef3166eSFrederic Barrat spa->xsl_fault.pe_data = *pe_data; 236d497ebf5SFrederic Barrat schedule = true; 237d497ebf5SFrederic Barrat /* mm_users count released by bottom half */ 238d497ebf5SFrederic Barrat } 2395ef3166eSFrederic Barrat rcu_read_unlock(); 240d497ebf5SFrederic Barrat if (schedule) 2415ef3166eSFrederic Barrat schedule_work(&spa->xsl_fault.fault_work); 242d497ebf5SFrederic Barrat else 243d497ebf5SFrederic Barrat ack_irq(spa, ADDRESS_ERROR); 2445ef3166eSFrederic Barrat return IRQ_HANDLED; 2455ef3166eSFrederic Barrat } 2465ef3166eSFrederic Barrat 2475ef3166eSFrederic Barrat static void unmap_irq_registers(struct spa *spa) 2485ef3166eSFrederic Barrat { 2495ef3166eSFrederic Barrat pnv_ocxl_unmap_xsl_regs(spa->reg_dsisr, spa->reg_dar, spa->reg_tfc, 2505ef3166eSFrederic Barrat spa->reg_pe_handle); 2515ef3166eSFrederic Barrat } 2525ef3166eSFrederic Barrat 2535ef3166eSFrederic Barrat static int map_irq_registers(struct pci_dev *dev, struct spa *spa) 2545ef3166eSFrederic Barrat { 2555ef3166eSFrederic Barrat return pnv_ocxl_map_xsl_regs(dev, &spa->reg_dsisr, &spa->reg_dar, 2565ef3166eSFrederic Barrat &spa->reg_tfc, &spa->reg_pe_handle); 2575ef3166eSFrederic Barrat } 2585ef3166eSFrederic Barrat 2595ef3166eSFrederic Barrat static int setup_xsl_irq(struct pci_dev *dev, struct link *link) 2605ef3166eSFrederic Barrat { 2615ef3166eSFrederic Barrat struct spa *spa = link->spa; 2625ef3166eSFrederic Barrat int rc; 2635ef3166eSFrederic Barrat int hwirq; 2645ef3166eSFrederic Barrat 2655ef3166eSFrederic Barrat rc = pnv_ocxl_get_xsl_irq(dev, &hwirq); 2665ef3166eSFrederic Barrat if (rc) 2675ef3166eSFrederic Barrat return rc; 2685ef3166eSFrederic Barrat 2695ef3166eSFrederic Barrat rc = map_irq_registers(dev, spa); 2705ef3166eSFrederic Barrat if (rc) 2715ef3166eSFrederic Barrat return rc; 2725ef3166eSFrederic Barrat 2735ef3166eSFrederic Barrat spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x", 2745ef3166eSFrederic Barrat link->domain, link->bus, link->dev); 2755ef3166eSFrederic Barrat if (!spa->irq_name) { 2765ef3166eSFrederic Barrat unmap_irq_registers(spa); 2775ef3166eSFrederic Barrat dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n"); 2785ef3166eSFrederic Barrat return -ENOMEM; 2795ef3166eSFrederic Barrat } 2805ef3166eSFrederic Barrat /* 2815ef3166eSFrederic Barrat * At some point, we'll need to look into allowing a higher 2825ef3166eSFrederic Barrat * number of interrupts. Could we have an IRQ domain per link? 2835ef3166eSFrederic Barrat */ 2845ef3166eSFrederic Barrat spa->virq = irq_create_mapping(NULL, hwirq); 2855ef3166eSFrederic Barrat if (!spa->virq) { 2865ef3166eSFrederic Barrat kfree(spa->irq_name); 2875ef3166eSFrederic Barrat unmap_irq_registers(spa); 2885ef3166eSFrederic Barrat dev_err(&dev->dev, 2895ef3166eSFrederic Barrat "irq_create_mapping failed for translation interrupt\n"); 2905ef3166eSFrederic Barrat return -EINVAL; 2915ef3166eSFrederic Barrat } 2925ef3166eSFrederic Barrat 2935ef3166eSFrederic Barrat dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq); 2945ef3166eSFrederic Barrat 2955ef3166eSFrederic Barrat rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name, 2965ef3166eSFrederic Barrat link); 2975ef3166eSFrederic Barrat if (rc) { 2985ef3166eSFrederic Barrat irq_dispose_mapping(spa->virq); 2995ef3166eSFrederic Barrat kfree(spa->irq_name); 3005ef3166eSFrederic Barrat unmap_irq_registers(spa); 3015ef3166eSFrederic Barrat dev_err(&dev->dev, 3025ef3166eSFrederic Barrat "request_irq failed for translation interrupt: %d\n", 3035ef3166eSFrederic Barrat rc); 3045ef3166eSFrederic Barrat return -EINVAL; 3055ef3166eSFrederic Barrat } 3065ef3166eSFrederic Barrat return 0; 3075ef3166eSFrederic Barrat } 3085ef3166eSFrederic Barrat 3095ef3166eSFrederic Barrat static void release_xsl_irq(struct link *link) 3105ef3166eSFrederic Barrat { 3115ef3166eSFrederic Barrat struct spa *spa = link->spa; 3125ef3166eSFrederic Barrat 3135ef3166eSFrederic Barrat if (spa->virq) { 3145ef3166eSFrederic Barrat free_irq(spa->virq, link); 3155ef3166eSFrederic Barrat irq_dispose_mapping(spa->virq); 3165ef3166eSFrederic Barrat } 3175ef3166eSFrederic Barrat kfree(spa->irq_name); 3185ef3166eSFrederic Barrat unmap_irq_registers(spa); 3195ef3166eSFrederic Barrat } 3205ef3166eSFrederic Barrat 3215ef3166eSFrederic Barrat static int alloc_spa(struct pci_dev *dev, struct link *link) 3225ef3166eSFrederic Barrat { 3235ef3166eSFrederic Barrat struct spa *spa; 3245ef3166eSFrederic Barrat 3255ef3166eSFrederic Barrat spa = kzalloc(sizeof(struct spa), GFP_KERNEL); 3265ef3166eSFrederic Barrat if (!spa) 3275ef3166eSFrederic Barrat return -ENOMEM; 3285ef3166eSFrederic Barrat 3295ef3166eSFrederic Barrat mutex_init(&spa->spa_lock); 3305ef3166eSFrederic Barrat INIT_RADIX_TREE(&spa->pe_tree, GFP_KERNEL); 3315ef3166eSFrederic Barrat INIT_WORK(&spa->xsl_fault.fault_work, xsl_fault_handler_bh); 3325ef3166eSFrederic Barrat 3335ef3166eSFrederic Barrat spa->spa_order = SPA_SPA_SIZE_LOG - PAGE_SHIFT; 3345ef3166eSFrederic Barrat spa->spa_mem = (struct ocxl_process_element *) 3355ef3166eSFrederic Barrat __get_free_pages(GFP_KERNEL | __GFP_ZERO, spa->spa_order); 3365ef3166eSFrederic Barrat if (!spa->spa_mem) { 3375ef3166eSFrederic Barrat dev_err(&dev->dev, "Can't allocate Shared Process Area\n"); 3385ef3166eSFrederic Barrat kfree(spa); 3395ef3166eSFrederic Barrat return -ENOMEM; 3405ef3166eSFrederic Barrat } 3415ef3166eSFrederic Barrat pr_debug("Allocated SPA for %x:%x:%x at %p\n", link->domain, link->bus, 3425ef3166eSFrederic Barrat link->dev, spa->spa_mem); 3435ef3166eSFrederic Barrat 3445ef3166eSFrederic Barrat link->spa = spa; 3455ef3166eSFrederic Barrat return 0; 3465ef3166eSFrederic Barrat } 3475ef3166eSFrederic Barrat 3485ef3166eSFrederic Barrat static void free_spa(struct link *link) 3495ef3166eSFrederic Barrat { 3505ef3166eSFrederic Barrat struct spa *spa = link->spa; 3515ef3166eSFrederic Barrat 3525ef3166eSFrederic Barrat pr_debug("Freeing SPA for %x:%x:%x\n", link->domain, link->bus, 3535ef3166eSFrederic Barrat link->dev); 3545ef3166eSFrederic Barrat 3555ef3166eSFrederic Barrat if (spa && spa->spa_mem) { 3565ef3166eSFrederic Barrat free_pages((unsigned long) spa->spa_mem, spa->spa_order); 3575ef3166eSFrederic Barrat kfree(spa); 3585ef3166eSFrederic Barrat link->spa = NULL; 3595ef3166eSFrederic Barrat } 3605ef3166eSFrederic Barrat } 3615ef3166eSFrederic Barrat 3625ef3166eSFrederic Barrat static int alloc_link(struct pci_dev *dev, int PE_mask, struct link **out_link) 3635ef3166eSFrederic Barrat { 3645ef3166eSFrederic Barrat struct link *link; 3655ef3166eSFrederic Barrat int rc; 3665ef3166eSFrederic Barrat 3675ef3166eSFrederic Barrat link = kzalloc(sizeof(struct link), GFP_KERNEL); 3685ef3166eSFrederic Barrat if (!link) 3695ef3166eSFrederic Barrat return -ENOMEM; 3705ef3166eSFrederic Barrat 3715ef3166eSFrederic Barrat kref_init(&link->ref); 3725ef3166eSFrederic Barrat link->domain = pci_domain_nr(dev->bus); 3735ef3166eSFrederic Barrat link->bus = dev->bus->number; 3745ef3166eSFrederic Barrat link->dev = PCI_SLOT(dev->devfn); 3755ef3166eSFrederic Barrat atomic_set(&link->irq_available, MAX_IRQ_PER_LINK); 3765ef3166eSFrederic Barrat 3775ef3166eSFrederic Barrat rc = alloc_spa(dev, link); 3785ef3166eSFrederic Barrat if (rc) 3795ef3166eSFrederic Barrat goto err_free; 3805ef3166eSFrederic Barrat 3815ef3166eSFrederic Barrat rc = setup_xsl_irq(dev, link); 3825ef3166eSFrederic Barrat if (rc) 3835ef3166eSFrederic Barrat goto err_spa; 3845ef3166eSFrederic Barrat 3855ef3166eSFrederic Barrat /* platform specific hook */ 3865ef3166eSFrederic Barrat rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask, 3875ef3166eSFrederic Barrat &link->platform_data); 3885ef3166eSFrederic Barrat if (rc) 3895ef3166eSFrederic Barrat goto err_xsl_irq; 3905ef3166eSFrederic Barrat 3915ef3166eSFrederic Barrat *out_link = link; 3925ef3166eSFrederic Barrat return 0; 3935ef3166eSFrederic Barrat 3945ef3166eSFrederic Barrat err_xsl_irq: 3955ef3166eSFrederic Barrat release_xsl_irq(link); 3965ef3166eSFrederic Barrat err_spa: 3975ef3166eSFrederic Barrat free_spa(link); 3985ef3166eSFrederic Barrat err_free: 3995ef3166eSFrederic Barrat kfree(link); 4005ef3166eSFrederic Barrat return rc; 4015ef3166eSFrederic Barrat } 4025ef3166eSFrederic Barrat 4035ef3166eSFrederic Barrat static void free_link(struct link *link) 4045ef3166eSFrederic Barrat { 4055ef3166eSFrederic Barrat release_xsl_irq(link); 4065ef3166eSFrederic Barrat free_spa(link); 4075ef3166eSFrederic Barrat kfree(link); 4085ef3166eSFrederic Barrat } 4095ef3166eSFrederic Barrat 4105ef3166eSFrederic Barrat int ocxl_link_setup(struct pci_dev *dev, int PE_mask, void **link_handle) 4115ef3166eSFrederic Barrat { 4125ef3166eSFrederic Barrat int rc = 0; 4135ef3166eSFrederic Barrat struct link *link; 4145ef3166eSFrederic Barrat 4155ef3166eSFrederic Barrat mutex_lock(&links_list_lock); 4165ef3166eSFrederic Barrat list_for_each_entry(link, &links_list, list) { 4175ef3166eSFrederic Barrat /* The functions of a device all share the same link */ 4185ef3166eSFrederic Barrat if (link->domain == pci_domain_nr(dev->bus) && 4195ef3166eSFrederic Barrat link->bus == dev->bus->number && 4205ef3166eSFrederic Barrat link->dev == PCI_SLOT(dev->devfn)) { 4215ef3166eSFrederic Barrat kref_get(&link->ref); 4225ef3166eSFrederic Barrat *link_handle = link; 4235ef3166eSFrederic Barrat goto unlock; 4245ef3166eSFrederic Barrat } 4255ef3166eSFrederic Barrat } 4265ef3166eSFrederic Barrat rc = alloc_link(dev, PE_mask, &link); 4275ef3166eSFrederic Barrat if (rc) 4285ef3166eSFrederic Barrat goto unlock; 4295ef3166eSFrederic Barrat 4305ef3166eSFrederic Barrat list_add(&link->list, &links_list); 4315ef3166eSFrederic Barrat *link_handle = link; 4325ef3166eSFrederic Barrat unlock: 4335ef3166eSFrederic Barrat mutex_unlock(&links_list_lock); 4345ef3166eSFrederic Barrat return rc; 4355ef3166eSFrederic Barrat } 436280b983cSFrederic Barrat EXPORT_SYMBOL_GPL(ocxl_link_setup); 4375ef3166eSFrederic Barrat 4385ef3166eSFrederic Barrat static void release_xsl(struct kref *ref) 4395ef3166eSFrederic Barrat { 4405ef3166eSFrederic Barrat struct link *link = container_of(ref, struct link, ref); 4415ef3166eSFrederic Barrat 4425ef3166eSFrederic Barrat list_del(&link->list); 4435ef3166eSFrederic Barrat /* call platform code before releasing data */ 4445ef3166eSFrederic Barrat pnv_ocxl_spa_release(link->platform_data); 4455ef3166eSFrederic Barrat free_link(link); 4465ef3166eSFrederic Barrat } 4475ef3166eSFrederic Barrat 4485ef3166eSFrederic Barrat void ocxl_link_release(struct pci_dev *dev, void *link_handle) 4495ef3166eSFrederic Barrat { 4505ef3166eSFrederic Barrat struct link *link = (struct link *) link_handle; 4515ef3166eSFrederic Barrat 4525ef3166eSFrederic Barrat mutex_lock(&links_list_lock); 4535ef3166eSFrederic Barrat kref_put(&link->ref, release_xsl); 4545ef3166eSFrederic Barrat mutex_unlock(&links_list_lock); 4555ef3166eSFrederic Barrat } 456280b983cSFrederic Barrat EXPORT_SYMBOL_GPL(ocxl_link_release); 4575ef3166eSFrederic Barrat 4585ef3166eSFrederic Barrat static u64 calculate_cfg_state(bool kernel) 4595ef3166eSFrederic Barrat { 4605ef3166eSFrederic Barrat u64 state; 4615ef3166eSFrederic Barrat 4625ef3166eSFrederic Barrat state = SPA_CFG_DR; 4635ef3166eSFrederic Barrat if (mfspr(SPRN_LPCR) & LPCR_TC) 4645ef3166eSFrederic Barrat state |= SPA_CFG_TC; 4655ef3166eSFrederic Barrat if (radix_enabled()) 4665ef3166eSFrederic Barrat state |= SPA_CFG_XLAT_ror; 4675ef3166eSFrederic Barrat else 4685ef3166eSFrederic Barrat state |= SPA_CFG_XLAT_hpt; 4695ef3166eSFrederic Barrat state |= SPA_CFG_HV; 4705ef3166eSFrederic Barrat if (kernel) { 4715ef3166eSFrederic Barrat if (mfmsr() & MSR_SF) 4725ef3166eSFrederic Barrat state |= SPA_CFG_SF; 4735ef3166eSFrederic Barrat } else { 4745ef3166eSFrederic Barrat state |= SPA_CFG_PR; 4755ef3166eSFrederic Barrat if (!test_tsk_thread_flag(current, TIF_32BIT)) 4765ef3166eSFrederic Barrat state |= SPA_CFG_SF; 4775ef3166eSFrederic Barrat } 4785ef3166eSFrederic Barrat return state; 4795ef3166eSFrederic Barrat } 4805ef3166eSFrederic Barrat 4815ef3166eSFrederic Barrat int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr, 4825ef3166eSFrederic Barrat u64 amr, struct mm_struct *mm, 4835ef3166eSFrederic Barrat void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr), 4845ef3166eSFrederic Barrat void *xsl_err_data) 4855ef3166eSFrederic Barrat { 4865ef3166eSFrederic Barrat struct link *link = (struct link *) link_handle; 4875ef3166eSFrederic Barrat struct spa *spa = link->spa; 4885ef3166eSFrederic Barrat struct ocxl_process_element *pe; 4895ef3166eSFrederic Barrat int pe_handle, rc = 0; 4905ef3166eSFrederic Barrat struct pe_data *pe_data; 4915ef3166eSFrederic Barrat 4925ef3166eSFrederic Barrat BUILD_BUG_ON(sizeof(struct ocxl_process_element) != 128); 4935ef3166eSFrederic Barrat if (pasid > SPA_PASID_MAX) 4945ef3166eSFrederic Barrat return -EINVAL; 4955ef3166eSFrederic Barrat 4965ef3166eSFrederic Barrat mutex_lock(&spa->spa_lock); 4975ef3166eSFrederic Barrat pe_handle = pasid & SPA_PE_MASK; 4985ef3166eSFrederic Barrat pe = spa->spa_mem + pe_handle; 4995ef3166eSFrederic Barrat 5005ef3166eSFrederic Barrat if (pe->software_state) { 5015ef3166eSFrederic Barrat rc = -EBUSY; 5025ef3166eSFrederic Barrat goto unlock; 5035ef3166eSFrederic Barrat } 5045ef3166eSFrederic Barrat 5055ef3166eSFrederic Barrat pe_data = kmalloc(sizeof(*pe_data), GFP_KERNEL); 5065ef3166eSFrederic Barrat if (!pe_data) { 5075ef3166eSFrederic Barrat rc = -ENOMEM; 5085ef3166eSFrederic Barrat goto unlock; 5095ef3166eSFrederic Barrat } 5105ef3166eSFrederic Barrat 5115ef3166eSFrederic Barrat pe_data->mm = mm; 5125ef3166eSFrederic Barrat pe_data->xsl_err_cb = xsl_err_cb; 5135ef3166eSFrederic Barrat pe_data->xsl_err_data = xsl_err_data; 5145ef3166eSFrederic Barrat 5155ef3166eSFrederic Barrat memset(pe, 0, sizeof(struct ocxl_process_element)); 5165ef3166eSFrederic Barrat pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0)); 5175ef3166eSFrederic Barrat pe->lpid = cpu_to_be32(mfspr(SPRN_LPID)); 5185ef3166eSFrederic Barrat pe->pid = cpu_to_be32(pidr); 5195ef3166eSFrederic Barrat pe->tid = cpu_to_be32(tidr); 5205ef3166eSFrederic Barrat pe->amr = cpu_to_be64(amr); 5215ef3166eSFrederic Barrat pe->software_state = cpu_to_be32(SPA_PE_VALID); 5225ef3166eSFrederic Barrat 5235ef3166eSFrederic Barrat mm_context_add_copro(mm); 5245ef3166eSFrederic Barrat /* 5255ef3166eSFrederic Barrat * Barrier is to make sure PE is visible in the SPA before it 5265ef3166eSFrederic Barrat * is used by the device. It also helps with the global TLBI 5275ef3166eSFrederic Barrat * invalidation 5285ef3166eSFrederic Barrat */ 5295ef3166eSFrederic Barrat mb(); 5305ef3166eSFrederic Barrat radix_tree_insert(&spa->pe_tree, pe_handle, pe_data); 5315ef3166eSFrederic Barrat 5325ef3166eSFrederic Barrat /* 5335ef3166eSFrederic Barrat * The mm must stay valid for as long as the device uses it. We 5345ef3166eSFrederic Barrat * lower the count when the context is removed from the SPA. 5355ef3166eSFrederic Barrat * 5365ef3166eSFrederic Barrat * We grab mm_count (and not mm_users), as we don't want to 5375ef3166eSFrederic Barrat * end up in a circular dependency if a process mmaps its 5385ef3166eSFrederic Barrat * mmio, therefore incrementing the file ref count when 5395ef3166eSFrederic Barrat * calling mmap(), and forgets to unmap before exiting. In 5405ef3166eSFrederic Barrat * that scenario, when the kernel handles the death of the 5415ef3166eSFrederic Barrat * process, the file is not cleaned because unmap was not 5425ef3166eSFrederic Barrat * called, and the mm wouldn't be freed because we would still 5435ef3166eSFrederic Barrat * have a reference on mm_users. Incrementing mm_count solves 5445ef3166eSFrederic Barrat * the problem. 5455ef3166eSFrederic Barrat */ 5465ef3166eSFrederic Barrat mmgrab(mm); 54792add22eSFrederic Barrat trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr); 5485ef3166eSFrederic Barrat unlock: 5495ef3166eSFrederic Barrat mutex_unlock(&spa->spa_lock); 5505ef3166eSFrederic Barrat return rc; 5515ef3166eSFrederic Barrat } 552280b983cSFrederic Barrat EXPORT_SYMBOL_GPL(ocxl_link_add_pe); 5535ef3166eSFrederic Barrat 554e948e06fSAlastair D'Silva int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid) 555e948e06fSAlastair D'Silva { 556e948e06fSAlastair D'Silva struct link *link = (struct link *) link_handle; 557e948e06fSAlastair D'Silva struct spa *spa = link->spa; 558e948e06fSAlastair D'Silva struct ocxl_process_element *pe; 559e948e06fSAlastair D'Silva int pe_handle, rc; 560e948e06fSAlastair D'Silva 561e948e06fSAlastair D'Silva if (pasid > SPA_PASID_MAX) 562e948e06fSAlastair D'Silva return -EINVAL; 563e948e06fSAlastair D'Silva 564e948e06fSAlastair D'Silva pe_handle = pasid & SPA_PE_MASK; 565e948e06fSAlastair D'Silva pe = spa->spa_mem + pe_handle; 566e948e06fSAlastair D'Silva 567e948e06fSAlastair D'Silva mutex_lock(&spa->spa_lock); 568e948e06fSAlastair D'Silva 569*e1e71e20SGreg Kurz pe->tid = cpu_to_be32(tid); 570e948e06fSAlastair D'Silva 571e948e06fSAlastair D'Silva /* 572e948e06fSAlastair D'Silva * The barrier makes sure the PE is updated 573e948e06fSAlastair D'Silva * before we clear the NPU context cache below, so that the 574e948e06fSAlastair D'Silva * old PE cannot be reloaded erroneously. 575e948e06fSAlastair D'Silva */ 576e948e06fSAlastair D'Silva mb(); 577e948e06fSAlastair D'Silva 578e948e06fSAlastair D'Silva /* 579e948e06fSAlastair D'Silva * hook to platform code 580e948e06fSAlastair D'Silva * On powerpc, the entry needs to be cleared from the context 581e948e06fSAlastair D'Silva * cache of the NPU. 582e948e06fSAlastair D'Silva */ 583e948e06fSAlastair D'Silva rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle); 584e948e06fSAlastair D'Silva WARN_ON(rc); 585e948e06fSAlastair D'Silva 586e948e06fSAlastair D'Silva mutex_unlock(&spa->spa_lock); 587e948e06fSAlastair D'Silva return rc; 588e948e06fSAlastair D'Silva } 589e948e06fSAlastair D'Silva 5905ef3166eSFrederic Barrat int ocxl_link_remove_pe(void *link_handle, int pasid) 5915ef3166eSFrederic Barrat { 5925ef3166eSFrederic Barrat struct link *link = (struct link *) link_handle; 5935ef3166eSFrederic Barrat struct spa *spa = link->spa; 5945ef3166eSFrederic Barrat struct ocxl_process_element *pe; 5955ef3166eSFrederic Barrat struct pe_data *pe_data; 5965ef3166eSFrederic Barrat int pe_handle, rc; 5975ef3166eSFrederic Barrat 5985ef3166eSFrederic Barrat if (pasid > SPA_PASID_MAX) 5995ef3166eSFrederic Barrat return -EINVAL; 6005ef3166eSFrederic Barrat 6015ef3166eSFrederic Barrat /* 6025ef3166eSFrederic Barrat * About synchronization with our memory fault handler: 6035ef3166eSFrederic Barrat * 6045ef3166eSFrederic Barrat * Before removing the PE, the driver is supposed to have 6055ef3166eSFrederic Barrat * notified the AFU, which should have cleaned up and make 6065ef3166eSFrederic Barrat * sure the PASID is no longer in use, including pending 6075ef3166eSFrederic Barrat * interrupts. However, there's no way to be sure... 6085ef3166eSFrederic Barrat * 6095ef3166eSFrederic Barrat * We clear the PE and remove the context from our radix 6105ef3166eSFrederic Barrat * tree. From that point on, any new interrupt for that 6115ef3166eSFrederic Barrat * context will fail silently, which is ok. As mentioned 6125ef3166eSFrederic Barrat * above, that's not expected, but it could happen if the 6135ef3166eSFrederic Barrat * driver or AFU didn't do the right thing. 6145ef3166eSFrederic Barrat * 6155ef3166eSFrederic Barrat * There could still be a bottom half running, but we don't 6165ef3166eSFrederic Barrat * need to wait/flush, as it is managing a reference count on 6175ef3166eSFrederic Barrat * the mm it reads from the radix tree. 6185ef3166eSFrederic Barrat */ 6195ef3166eSFrederic Barrat pe_handle = pasid & SPA_PE_MASK; 6205ef3166eSFrederic Barrat pe = spa->spa_mem + pe_handle; 6215ef3166eSFrederic Barrat 6225ef3166eSFrederic Barrat mutex_lock(&spa->spa_lock); 6235ef3166eSFrederic Barrat 6245ef3166eSFrederic Barrat if (!(be32_to_cpu(pe->software_state) & SPA_PE_VALID)) { 6255ef3166eSFrederic Barrat rc = -EINVAL; 6265ef3166eSFrederic Barrat goto unlock; 6275ef3166eSFrederic Barrat } 6285ef3166eSFrederic Barrat 62992add22eSFrederic Barrat trace_ocxl_context_remove(current->pid, spa->spa_mem, pasid, 63092add22eSFrederic Barrat be32_to_cpu(pe->pid), be32_to_cpu(pe->tid)); 63192add22eSFrederic Barrat 6325ef3166eSFrederic Barrat memset(pe, 0, sizeof(struct ocxl_process_element)); 6335ef3166eSFrederic Barrat /* 6345ef3166eSFrederic Barrat * The barrier makes sure the PE is removed from the SPA 6355ef3166eSFrederic Barrat * before we clear the NPU context cache below, so that the 6365ef3166eSFrederic Barrat * old PE cannot be reloaded erroneously. 6375ef3166eSFrederic Barrat */ 6385ef3166eSFrederic Barrat mb(); 6395ef3166eSFrederic Barrat 6405ef3166eSFrederic Barrat /* 6415ef3166eSFrederic Barrat * hook to platform code 6425ef3166eSFrederic Barrat * On powerpc, the entry needs to be cleared from the context 6435ef3166eSFrederic Barrat * cache of the NPU. 6445ef3166eSFrederic Barrat */ 64519df3958SAlastair D'Silva rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle); 6465ef3166eSFrederic Barrat WARN_ON(rc); 6475ef3166eSFrederic Barrat 6485ef3166eSFrederic Barrat pe_data = radix_tree_delete(&spa->pe_tree, pe_handle); 6495ef3166eSFrederic Barrat if (!pe_data) { 6505ef3166eSFrederic Barrat WARN(1, "Couldn't find pe data when removing PE\n"); 6515ef3166eSFrederic Barrat } else { 6525ef3166eSFrederic Barrat mm_context_remove_copro(pe_data->mm); 6535ef3166eSFrederic Barrat mmdrop(pe_data->mm); 6545ef3166eSFrederic Barrat kfree_rcu(pe_data, rcu); 6555ef3166eSFrederic Barrat } 6565ef3166eSFrederic Barrat unlock: 6575ef3166eSFrederic Barrat mutex_unlock(&spa->spa_lock); 6585ef3166eSFrederic Barrat return rc; 6595ef3166eSFrederic Barrat } 660280b983cSFrederic Barrat EXPORT_SYMBOL_GPL(ocxl_link_remove_pe); 661aeddad17SFrederic Barrat 662aeddad17SFrederic Barrat int ocxl_link_irq_alloc(void *link_handle, int *hw_irq, u64 *trigger_addr) 663aeddad17SFrederic Barrat { 664aeddad17SFrederic Barrat struct link *link = (struct link *) link_handle; 665aeddad17SFrederic Barrat int rc, irq; 666aeddad17SFrederic Barrat u64 addr; 667aeddad17SFrederic Barrat 668aeddad17SFrederic Barrat if (atomic_dec_if_positive(&link->irq_available) < 0) 669aeddad17SFrederic Barrat return -ENOSPC; 670aeddad17SFrederic Barrat 671aeddad17SFrederic Barrat rc = pnv_ocxl_alloc_xive_irq(&irq, &addr); 672aeddad17SFrederic Barrat if (rc) { 673aeddad17SFrederic Barrat atomic_inc(&link->irq_available); 674aeddad17SFrederic Barrat return rc; 675aeddad17SFrederic Barrat } 676aeddad17SFrederic Barrat 677aeddad17SFrederic Barrat *hw_irq = irq; 678aeddad17SFrederic Barrat *trigger_addr = addr; 679aeddad17SFrederic Barrat return 0; 680aeddad17SFrederic Barrat } 681280b983cSFrederic Barrat EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc); 682aeddad17SFrederic Barrat 683aeddad17SFrederic Barrat void ocxl_link_free_irq(void *link_handle, int hw_irq) 684aeddad17SFrederic Barrat { 685aeddad17SFrederic Barrat struct link *link = (struct link *) link_handle; 686aeddad17SFrederic Barrat 687aeddad17SFrederic Barrat pnv_ocxl_free_xive_irq(hw_irq); 688aeddad17SFrederic Barrat atomic_inc(&link->irq_available); 689aeddad17SFrederic Barrat } 690280b983cSFrederic Barrat EXPORT_SYMBOL_GPL(ocxl_link_free_irq); 691