1727fd72fSBen Skeggs /* 2727fd72fSBen Skeggs * Copyright 2021 Red Hat Inc. 3727fd72fSBen Skeggs * 4727fd72fSBen Skeggs * Permission is hereby granted, free of charge, to any person obtaining a 5727fd72fSBen Skeggs * copy of this software and associated documentation files (the "Software"), 6727fd72fSBen Skeggs * to deal in the Software without restriction, including without limitation 7727fd72fSBen Skeggs * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8727fd72fSBen Skeggs * and/or sell copies of the Software, and to permit persons to whom the 9727fd72fSBen Skeggs * Software is furnished to do so, subject to the following conditions: 10727fd72fSBen Skeggs * 11727fd72fSBen Skeggs * The above copyright notice and this permission notice shall be included in 12727fd72fSBen Skeggs * all copies or substantial portions of the Software. 13727fd72fSBen Skeggs * 14727fd72fSBen Skeggs * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15727fd72fSBen Skeggs * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16727fd72fSBen Skeggs * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17727fd72fSBen Skeggs * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18727fd72fSBen Skeggs * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19727fd72fSBen Skeggs * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20727fd72fSBen Skeggs * OTHER DEALINGS IN THE SOFTWARE. 21727fd72fSBen Skeggs */ 22727fd72fSBen Skeggs #include <core/intr.h> 23*3ebd64aaSBen Skeggs #include <core/device.h> 24*3ebd64aaSBen Skeggs #include <core/subdev.h> 25727fd72fSBen Skeggs #include <subdev/pci.h> 26*3ebd64aaSBen Skeggs #include <subdev/top.h> 27*3ebd64aaSBen Skeggs 28727fd72fSBen Skeggs #include <subdev/mc.h> 29727fd72fSBen Skeggs 30*3ebd64aaSBen Skeggs static int 31*3ebd64aaSBen Skeggs nvkm_intr_xlat(struct nvkm_subdev *subdev, struct nvkm_intr *intr, 32*3ebd64aaSBen Skeggs enum nvkm_intr_type type, int *leaf, u32 *mask) 33*3ebd64aaSBen Skeggs { 34*3ebd64aaSBen Skeggs struct nvkm_device *device = subdev->device; 35*3ebd64aaSBen Skeggs 36*3ebd64aaSBen Skeggs if (type < NVKM_INTR_VECTOR_0) { 37*3ebd64aaSBen Skeggs if (type == NVKM_INTR_SUBDEV) { 38*3ebd64aaSBen Skeggs const struct nvkm_intr_data *data = intr->data; 39*3ebd64aaSBen Skeggs struct nvkm_top_device *tdev; 40*3ebd64aaSBen Skeggs 41*3ebd64aaSBen Skeggs while (data && data->mask) { 42*3ebd64aaSBen Skeggs if (data->type == NVKM_SUBDEV_TOP) { 43*3ebd64aaSBen Skeggs list_for_each_entry(tdev, &device->top->device, head) { 44*3ebd64aaSBen Skeggs if (tdev->intr >= 0 && 45*3ebd64aaSBen Skeggs tdev->type == subdev->type && 46*3ebd64aaSBen Skeggs tdev->inst == subdev->inst) { 47*3ebd64aaSBen Skeggs if (data->mask & BIT(tdev->intr)) { 48*3ebd64aaSBen Skeggs *leaf = data->leaf; 49*3ebd64aaSBen Skeggs *mask = BIT(tdev->intr); 50*3ebd64aaSBen Skeggs return 0; 51*3ebd64aaSBen Skeggs } 52*3ebd64aaSBen Skeggs } 53*3ebd64aaSBen Skeggs } 54*3ebd64aaSBen Skeggs } else 55*3ebd64aaSBen Skeggs if (data->type == subdev->type && data->inst == subdev->inst) { 56*3ebd64aaSBen Skeggs *leaf = data->leaf; 57*3ebd64aaSBen Skeggs *mask = data->mask; 58*3ebd64aaSBen Skeggs return 0; 59*3ebd64aaSBen Skeggs } 60*3ebd64aaSBen Skeggs 61*3ebd64aaSBen Skeggs data++; 62*3ebd64aaSBen Skeggs } 63*3ebd64aaSBen Skeggs } else { 64*3ebd64aaSBen Skeggs return -ENOSYS; 65*3ebd64aaSBen Skeggs } 66*3ebd64aaSBen Skeggs } else { 67*3ebd64aaSBen Skeggs if (type < intr->leaves * sizeof(*intr->stat) * 8) { 68*3ebd64aaSBen Skeggs *leaf = type / 32; 69*3ebd64aaSBen Skeggs *mask = BIT(type % 32); 70*3ebd64aaSBen Skeggs return 0; 71*3ebd64aaSBen Skeggs } 72*3ebd64aaSBen Skeggs } 73*3ebd64aaSBen Skeggs 74*3ebd64aaSBen Skeggs return -EINVAL; 75*3ebd64aaSBen Skeggs } 76*3ebd64aaSBen Skeggs 77*3ebd64aaSBen Skeggs static struct nvkm_intr * 78*3ebd64aaSBen Skeggs nvkm_intr_find(struct nvkm_subdev *subdev, enum nvkm_intr_type type, int *leaf, u32 *mask) 79*3ebd64aaSBen Skeggs { 80*3ebd64aaSBen Skeggs struct nvkm_intr *intr; 81*3ebd64aaSBen Skeggs int ret; 82*3ebd64aaSBen Skeggs 83*3ebd64aaSBen Skeggs list_for_each_entry(intr, &subdev->device->intr.intr, head) { 84*3ebd64aaSBen Skeggs ret = nvkm_intr_xlat(subdev, intr, type, leaf, mask); 85*3ebd64aaSBen Skeggs if (ret == 0) 86*3ebd64aaSBen Skeggs return intr; 87*3ebd64aaSBen Skeggs } 88*3ebd64aaSBen Skeggs 89*3ebd64aaSBen Skeggs return NULL; 90*3ebd64aaSBen Skeggs } 91*3ebd64aaSBen Skeggs 92*3ebd64aaSBen Skeggs static void 93*3ebd64aaSBen Skeggs nvkm_intr_allow_locked(struct nvkm_intr *intr, int leaf, u32 mask) 94*3ebd64aaSBen Skeggs { 95*3ebd64aaSBen Skeggs intr->mask[leaf] |= mask; 96*3ebd64aaSBen Skeggs if (intr->func->allow) { 97*3ebd64aaSBen Skeggs if (intr->func->reset) 98*3ebd64aaSBen Skeggs intr->func->reset(intr, leaf, mask); 99*3ebd64aaSBen Skeggs intr->func->allow(intr, leaf, mask); 100*3ebd64aaSBen Skeggs } 101*3ebd64aaSBen Skeggs } 102*3ebd64aaSBen Skeggs 103*3ebd64aaSBen Skeggs void 104*3ebd64aaSBen Skeggs nvkm_intr_allow(struct nvkm_subdev *subdev, enum nvkm_intr_type type) 105*3ebd64aaSBen Skeggs { 106*3ebd64aaSBen Skeggs struct nvkm_device *device = subdev->device; 107*3ebd64aaSBen Skeggs struct nvkm_intr *intr; 108*3ebd64aaSBen Skeggs unsigned long flags; 109*3ebd64aaSBen Skeggs int leaf; 110*3ebd64aaSBen Skeggs u32 mask; 111*3ebd64aaSBen Skeggs 112*3ebd64aaSBen Skeggs intr = nvkm_intr_find(subdev, type, &leaf, &mask); 113*3ebd64aaSBen Skeggs if (intr) { 114*3ebd64aaSBen Skeggs nvkm_debug(intr->subdev, "intr %d/%08x allowed by %s\n", leaf, mask, subdev->name); 115*3ebd64aaSBen Skeggs spin_lock_irqsave(&device->intr.lock, flags); 116*3ebd64aaSBen Skeggs nvkm_intr_allow_locked(intr, leaf, mask); 117*3ebd64aaSBen Skeggs spin_unlock_irqrestore(&device->intr.lock, flags); 118*3ebd64aaSBen Skeggs } 119*3ebd64aaSBen Skeggs } 120*3ebd64aaSBen Skeggs 121*3ebd64aaSBen Skeggs static void 122*3ebd64aaSBen Skeggs nvkm_intr_block_locked(struct nvkm_intr *intr, int leaf, u32 mask) 123*3ebd64aaSBen Skeggs { 124*3ebd64aaSBen Skeggs intr->mask[leaf] &= ~mask; 125*3ebd64aaSBen Skeggs if (intr->func->block) 126*3ebd64aaSBen Skeggs intr->func->block(intr, leaf, mask); 127*3ebd64aaSBen Skeggs } 128*3ebd64aaSBen Skeggs 129*3ebd64aaSBen Skeggs void 130*3ebd64aaSBen Skeggs nvkm_intr_block(struct nvkm_subdev *subdev, enum nvkm_intr_type type) 131*3ebd64aaSBen Skeggs { 132*3ebd64aaSBen Skeggs struct nvkm_device *device = subdev->device; 133*3ebd64aaSBen Skeggs struct nvkm_intr *intr; 134*3ebd64aaSBen Skeggs unsigned long flags; 135*3ebd64aaSBen Skeggs int leaf; 136*3ebd64aaSBen Skeggs u32 mask; 137*3ebd64aaSBen Skeggs 138*3ebd64aaSBen Skeggs intr = nvkm_intr_find(subdev, type, &leaf, &mask); 139*3ebd64aaSBen Skeggs if (intr) { 140*3ebd64aaSBen Skeggs nvkm_debug(intr->subdev, "intr %d/%08x blocked by %s\n", leaf, mask, subdev->name); 141*3ebd64aaSBen Skeggs spin_lock_irqsave(&device->intr.lock, flags); 142*3ebd64aaSBen Skeggs nvkm_intr_block_locked(intr, leaf, mask); 143*3ebd64aaSBen Skeggs spin_unlock_irqrestore(&device->intr.lock, flags); 144*3ebd64aaSBen Skeggs } 145*3ebd64aaSBen Skeggs } 146*3ebd64aaSBen Skeggs 147727fd72fSBen Skeggs static void 148727fd72fSBen Skeggs nvkm_intr_rearm_locked(struct nvkm_device *device) 149727fd72fSBen Skeggs { 150*3ebd64aaSBen Skeggs struct nvkm_intr *intr; 151*3ebd64aaSBen Skeggs 152*3ebd64aaSBen Skeggs list_for_each_entry(intr, &device->intr.intr, head) 153*3ebd64aaSBen Skeggs intr->func->rearm(intr); 154727fd72fSBen Skeggs nvkm_mc_intr_rearm(device); 155727fd72fSBen Skeggs } 156727fd72fSBen Skeggs 157727fd72fSBen Skeggs static void 158727fd72fSBen Skeggs nvkm_intr_unarm_locked(struct nvkm_device *device) 159727fd72fSBen Skeggs { 160*3ebd64aaSBen Skeggs struct nvkm_intr *intr; 161*3ebd64aaSBen Skeggs 162*3ebd64aaSBen Skeggs list_for_each_entry(intr, &device->intr.intr, head) 163*3ebd64aaSBen Skeggs intr->func->unarm(intr); 164727fd72fSBen Skeggs nvkm_mc_intr_unarm(device); 165727fd72fSBen Skeggs } 166727fd72fSBen Skeggs 167727fd72fSBen Skeggs static irqreturn_t 168727fd72fSBen Skeggs nvkm_intr(int irq, void *arg) 169727fd72fSBen Skeggs { 170727fd72fSBen Skeggs struct nvkm_device *device = arg; 171*3ebd64aaSBen Skeggs struct nvkm_intr *intr; 172*3ebd64aaSBen Skeggs struct nvkm_inth *inth; 173727fd72fSBen Skeggs irqreturn_t ret = IRQ_NONE; 174*3ebd64aaSBen Skeggs bool pending = false, handled; 175*3ebd64aaSBen Skeggs int prio, leaf; 176727fd72fSBen Skeggs 177*3ebd64aaSBen Skeggs /* Disable all top-level interrupt sources, and re-arm MSI interrupts. */ 178727fd72fSBen Skeggs spin_lock(&device->intr.lock); 179727fd72fSBen Skeggs if (!device->intr.armed) 180727fd72fSBen Skeggs goto done_unlock; 181727fd72fSBen Skeggs 182727fd72fSBen Skeggs nvkm_intr_unarm_locked(device); 183727fd72fSBen Skeggs nvkm_pci_msi_rearm(device); 184727fd72fSBen Skeggs 185*3ebd64aaSBen Skeggs /* Fetch pending interrupt masks. */ 186*3ebd64aaSBen Skeggs list_for_each_entry(intr, &device->intr.intr, head) { 187*3ebd64aaSBen Skeggs if (intr->func->pending(intr)) 188*3ebd64aaSBen Skeggs pending = true; 189*3ebd64aaSBen Skeggs } 190*3ebd64aaSBen Skeggs 191727fd72fSBen Skeggs nvkm_mc_intr(device, &handled); 192727fd72fSBen Skeggs if (handled) 193727fd72fSBen Skeggs ret = IRQ_HANDLED; 194727fd72fSBen Skeggs 195*3ebd64aaSBen Skeggs if (!pending) 196*3ebd64aaSBen Skeggs goto done; 197*3ebd64aaSBen Skeggs 198*3ebd64aaSBen Skeggs /* Check that GPU is still on the bus by reading NV_PMC_BOOT_0. */ 199*3ebd64aaSBen Skeggs if (WARN_ON(nvkm_rd32(device, 0x000000) == 0xffffffff)) 200*3ebd64aaSBen Skeggs goto done; 201*3ebd64aaSBen Skeggs 202*3ebd64aaSBen Skeggs /* Execute handlers. */ 203*3ebd64aaSBen Skeggs for (prio = 0; prio < ARRAY_SIZE(device->intr.prio); prio++) { 204*3ebd64aaSBen Skeggs list_for_each_entry(inth, &device->intr.prio[prio], head) { 205*3ebd64aaSBen Skeggs struct nvkm_intr *intr = inth->intr; 206*3ebd64aaSBen Skeggs 207*3ebd64aaSBen Skeggs if (intr->stat[inth->leaf] & inth->mask) { 208*3ebd64aaSBen Skeggs if (atomic_read(&inth->allowed)) { 209*3ebd64aaSBen Skeggs if (intr->func->reset) 210*3ebd64aaSBen Skeggs intr->func->reset(intr, inth->leaf, inth->mask); 211*3ebd64aaSBen Skeggs if (inth->func(inth) == IRQ_HANDLED) 212*3ebd64aaSBen Skeggs ret = IRQ_HANDLED; 213*3ebd64aaSBen Skeggs } 214*3ebd64aaSBen Skeggs } 215*3ebd64aaSBen Skeggs } 216*3ebd64aaSBen Skeggs } 217*3ebd64aaSBen Skeggs 218*3ebd64aaSBen Skeggs /* Nothing handled? Some debugging/protection from IRQ storms is in order... */ 219*3ebd64aaSBen Skeggs if (ret == IRQ_NONE) { 220*3ebd64aaSBen Skeggs list_for_each_entry(intr, &device->intr.intr, head) { 221*3ebd64aaSBen Skeggs for (leaf = 0; leaf < intr->leaves; leaf++) { 222*3ebd64aaSBen Skeggs if (intr->stat[leaf]) { 223*3ebd64aaSBen Skeggs nvkm_warn(intr->subdev, "intr%d: %08x\n", 224*3ebd64aaSBen Skeggs leaf, intr->stat[leaf]); 225*3ebd64aaSBen Skeggs nvkm_intr_block_locked(intr, leaf, intr->stat[leaf]); 226*3ebd64aaSBen Skeggs } 227*3ebd64aaSBen Skeggs } 228*3ebd64aaSBen Skeggs } 229*3ebd64aaSBen Skeggs } 230*3ebd64aaSBen Skeggs 231*3ebd64aaSBen Skeggs done: 232*3ebd64aaSBen Skeggs /* Re-enable all top-level interrupt sources. */ 233727fd72fSBen Skeggs nvkm_intr_rearm_locked(device); 234727fd72fSBen Skeggs done_unlock: 235727fd72fSBen Skeggs spin_unlock(&device->intr.lock); 236727fd72fSBen Skeggs return ret; 237727fd72fSBen Skeggs } 238727fd72fSBen Skeggs 239*3ebd64aaSBen Skeggs int 240*3ebd64aaSBen Skeggs nvkm_intr_add(const struct nvkm_intr_func *func, const struct nvkm_intr_data *data, 241*3ebd64aaSBen Skeggs struct nvkm_subdev *subdev, int leaves, struct nvkm_intr *intr) 242*3ebd64aaSBen Skeggs { 243*3ebd64aaSBen Skeggs struct nvkm_device *device = subdev->device; 244*3ebd64aaSBen Skeggs int i; 245*3ebd64aaSBen Skeggs 246*3ebd64aaSBen Skeggs intr->func = func; 247*3ebd64aaSBen Skeggs intr->data = data; 248*3ebd64aaSBen Skeggs intr->subdev = subdev; 249*3ebd64aaSBen Skeggs intr->leaves = leaves; 250*3ebd64aaSBen Skeggs intr->stat = kcalloc(leaves, sizeof(*intr->stat), GFP_KERNEL); 251*3ebd64aaSBen Skeggs intr->mask = kcalloc(leaves, sizeof(*intr->mask), GFP_KERNEL); 252*3ebd64aaSBen Skeggs if (!intr->stat || !intr->mask) { 253*3ebd64aaSBen Skeggs kfree(intr->stat); 254*3ebd64aaSBen Skeggs return -ENOMEM; 255*3ebd64aaSBen Skeggs } 256*3ebd64aaSBen Skeggs 257*3ebd64aaSBen Skeggs if (intr->subdev->debug >= NV_DBG_DEBUG) { 258*3ebd64aaSBen Skeggs for (i = 0; i < intr->leaves; i++) 259*3ebd64aaSBen Skeggs intr->mask[i] = ~0; 260*3ebd64aaSBen Skeggs } 261*3ebd64aaSBen Skeggs 262*3ebd64aaSBen Skeggs spin_lock_irq(&device->intr.lock); 263*3ebd64aaSBen Skeggs list_add_tail(&intr->head, &device->intr.intr); 264*3ebd64aaSBen Skeggs spin_unlock_irq(&device->intr.lock); 265*3ebd64aaSBen Skeggs return 0; 266*3ebd64aaSBen Skeggs } 267*3ebd64aaSBen Skeggs 268727fd72fSBen Skeggs void 269727fd72fSBen Skeggs nvkm_intr_rearm(struct nvkm_device *device) 270727fd72fSBen Skeggs { 271*3ebd64aaSBen Skeggs struct nvkm_intr *intr; 272*3ebd64aaSBen Skeggs int i; 273*3ebd64aaSBen Skeggs 274727fd72fSBen Skeggs spin_lock_irq(&device->intr.lock); 275*3ebd64aaSBen Skeggs list_for_each_entry(intr, &device->intr.intr, head) { 276*3ebd64aaSBen Skeggs for (i = 0; intr->func->block && i < intr->leaves; i++) { 277*3ebd64aaSBen Skeggs intr->func->block(intr, i, ~0); 278*3ebd64aaSBen Skeggs intr->func->allow(intr, i, intr->mask[i]); 279*3ebd64aaSBen Skeggs } 280*3ebd64aaSBen Skeggs } 281*3ebd64aaSBen Skeggs 282727fd72fSBen Skeggs nvkm_intr_rearm_locked(device); 283727fd72fSBen Skeggs device->intr.armed = true; 284727fd72fSBen Skeggs spin_unlock_irq(&device->intr.lock); 285727fd72fSBen Skeggs } 286727fd72fSBen Skeggs 287727fd72fSBen Skeggs void 288727fd72fSBen Skeggs nvkm_intr_unarm(struct nvkm_device *device) 289727fd72fSBen Skeggs { 290727fd72fSBen Skeggs spin_lock_irq(&device->intr.lock); 291727fd72fSBen Skeggs nvkm_intr_unarm_locked(device); 292727fd72fSBen Skeggs device->intr.armed = false; 293727fd72fSBen Skeggs spin_unlock_irq(&device->intr.lock); 294727fd72fSBen Skeggs } 295727fd72fSBen Skeggs 296727fd72fSBen Skeggs int 297727fd72fSBen Skeggs nvkm_intr_install(struct nvkm_device *device) 298727fd72fSBen Skeggs { 299727fd72fSBen Skeggs int ret; 300727fd72fSBen Skeggs 301727fd72fSBen Skeggs device->intr.irq = device->func->irq(device); 302727fd72fSBen Skeggs if (device->intr.irq < 0) 303727fd72fSBen Skeggs return device->intr.irq; 304727fd72fSBen Skeggs 305727fd72fSBen Skeggs ret = request_irq(device->intr.irq, nvkm_intr, IRQF_SHARED, "nvkm", device); 306727fd72fSBen Skeggs if (ret) 307727fd72fSBen Skeggs return ret; 308727fd72fSBen Skeggs 309727fd72fSBen Skeggs device->intr.alloc = true; 310727fd72fSBen Skeggs return 0; 311727fd72fSBen Skeggs } 312727fd72fSBen Skeggs 313727fd72fSBen Skeggs void 314727fd72fSBen Skeggs nvkm_intr_dtor(struct nvkm_device *device) 315727fd72fSBen Skeggs { 316*3ebd64aaSBen Skeggs struct nvkm_intr *intr, *intt; 317*3ebd64aaSBen Skeggs 318*3ebd64aaSBen Skeggs list_for_each_entry_safe(intr, intt, &device->intr.intr, head) { 319*3ebd64aaSBen Skeggs list_del(&intr->head); 320*3ebd64aaSBen Skeggs kfree(intr->mask); 321*3ebd64aaSBen Skeggs kfree(intr->stat); 322*3ebd64aaSBen Skeggs } 323*3ebd64aaSBen Skeggs 324727fd72fSBen Skeggs if (device->intr.alloc) 325727fd72fSBen Skeggs free_irq(device->intr.irq, device); 326727fd72fSBen Skeggs } 327727fd72fSBen Skeggs 328727fd72fSBen Skeggs void 329727fd72fSBen Skeggs nvkm_intr_ctor(struct nvkm_device *device) 330727fd72fSBen Skeggs { 331*3ebd64aaSBen Skeggs int i; 332*3ebd64aaSBen Skeggs 333*3ebd64aaSBen Skeggs INIT_LIST_HEAD(&device->intr.intr); 334*3ebd64aaSBen Skeggs for (i = 0; i < ARRAY_SIZE(device->intr.prio); i++) 335*3ebd64aaSBen Skeggs INIT_LIST_HEAD(&device->intr.prio[i]); 336*3ebd64aaSBen Skeggs 337727fd72fSBen Skeggs spin_lock_init(&device->intr.lock); 338*3ebd64aaSBen Skeggs device->intr.armed = false; 339*3ebd64aaSBen Skeggs } 340*3ebd64aaSBen Skeggs 341*3ebd64aaSBen Skeggs void 342*3ebd64aaSBen Skeggs nvkm_inth_block(struct nvkm_inth *inth) 343*3ebd64aaSBen Skeggs { 344*3ebd64aaSBen Skeggs if (unlikely(!inth->intr)) 345*3ebd64aaSBen Skeggs return; 346*3ebd64aaSBen Skeggs 347*3ebd64aaSBen Skeggs atomic_set(&inth->allowed, 0); 348*3ebd64aaSBen Skeggs } 349*3ebd64aaSBen Skeggs 350*3ebd64aaSBen Skeggs void 351*3ebd64aaSBen Skeggs nvkm_inth_allow(struct nvkm_inth *inth) 352*3ebd64aaSBen Skeggs { 353*3ebd64aaSBen Skeggs struct nvkm_intr *intr = inth->intr; 354*3ebd64aaSBen Skeggs unsigned long flags; 355*3ebd64aaSBen Skeggs 356*3ebd64aaSBen Skeggs if (unlikely(!inth->intr)) 357*3ebd64aaSBen Skeggs return; 358*3ebd64aaSBen Skeggs 359*3ebd64aaSBen Skeggs spin_lock_irqsave(&intr->subdev->device->intr.lock, flags); 360*3ebd64aaSBen Skeggs if (!atomic_xchg(&inth->allowed, 1)) { 361*3ebd64aaSBen Skeggs if ((intr->mask[inth->leaf] & inth->mask) != inth->mask) 362*3ebd64aaSBen Skeggs nvkm_intr_allow_locked(intr, inth->leaf, inth->mask); 363*3ebd64aaSBen Skeggs } 364*3ebd64aaSBen Skeggs spin_unlock_irqrestore(&intr->subdev->device->intr.lock, flags); 365*3ebd64aaSBen Skeggs } 366*3ebd64aaSBen Skeggs 367*3ebd64aaSBen Skeggs int 368*3ebd64aaSBen Skeggs nvkm_inth_add(struct nvkm_intr *intr, enum nvkm_intr_type type, enum nvkm_intr_prio prio, 369*3ebd64aaSBen Skeggs struct nvkm_subdev *subdev, nvkm_inth_func func, struct nvkm_inth *inth) 370*3ebd64aaSBen Skeggs { 371*3ebd64aaSBen Skeggs struct nvkm_device *device = subdev->device; 372*3ebd64aaSBen Skeggs int ret; 373*3ebd64aaSBen Skeggs 374*3ebd64aaSBen Skeggs if (WARN_ON(inth->mask)) 375*3ebd64aaSBen Skeggs return -EBUSY; 376*3ebd64aaSBen Skeggs 377*3ebd64aaSBen Skeggs ret = nvkm_intr_xlat(subdev, intr, type, &inth->leaf, &inth->mask); 378*3ebd64aaSBen Skeggs if (ret) 379*3ebd64aaSBen Skeggs return ret; 380*3ebd64aaSBen Skeggs 381*3ebd64aaSBen Skeggs nvkm_debug(intr->subdev, "intr %d/%08x requested by %s\n", 382*3ebd64aaSBen Skeggs inth->leaf, inth->mask, subdev->name); 383*3ebd64aaSBen Skeggs 384*3ebd64aaSBen Skeggs inth->intr = intr; 385*3ebd64aaSBen Skeggs inth->func = func; 386*3ebd64aaSBen Skeggs atomic_set(&inth->allowed, 0); 387*3ebd64aaSBen Skeggs list_add_tail(&inth->head, &device->intr.prio[prio]); 388*3ebd64aaSBen Skeggs return 0; 389727fd72fSBen Skeggs } 390