109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 216603153SAndreas Noever /* 315c6784cSMika Westerberg * Thunderbolt driver - NHI driver 416603153SAndreas Noever * 516603153SAndreas Noever * The NHI (native host interface) is the pci device that allows us to send and 616603153SAndreas Noever * receive frames from the thunderbolt bus. 716603153SAndreas Noever * 816603153SAndreas Noever * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 915c6784cSMika Westerberg * Copyright (C) 2018, Intel Corporation 1016603153SAndreas Noever */ 1116603153SAndreas Noever 1223dd5bb4SAndreas Noever #include <linux/pm_runtime.h> 1316603153SAndreas Noever #include <linux/slab.h> 1416603153SAndreas Noever #include <linux/errno.h> 1516603153SAndreas Noever #include <linux/pci.h> 1616603153SAndreas Noever #include <linux/interrupt.h> 1716603153SAndreas Noever #include <linux/module.h> 18cd446ee2SMika Westerberg #include <linux/delay.h> 193cdb9446SMika Westerberg #include <linux/property.h> 202b9941e0SMika Westerberg #include <linux/platform_data/x86/apple.h> 2116603153SAndreas Noever 2216603153SAndreas Noever #include "nhi.h" 2316603153SAndreas Noever #include "nhi_regs.h" 24d6cc51cdSAndreas Noever #include "tb.h" 2516603153SAndreas Noever 2616603153SAndreas Noever #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") 2716603153SAndreas Noever 2853f13319SMika Westerberg #define RING_FIRST_USABLE_HOPID 1 299fb1e654SMika Westerberg 309fb1e654SMika Westerberg /* 31046bee1fSMika Westerberg * Minimal number of vectors when we use MSI-X. Two for control channel 32046bee1fSMika Westerberg * Rx/Tx and the rest four are for cross domain DMA paths. 33046bee1fSMika Westerberg */ 34046bee1fSMika Westerberg #define MSIX_MIN_VECS 6 35046bee1fSMika Westerberg #define MSIX_MAX_VECS 16 3616603153SAndreas Noever 37cd446ee2SMika Westerberg #define NHI_MAILBOX_TIMEOUT 500 /* ms */ 38cd446ee2SMika Westerberg 3916603153SAndreas Noever static int ring_interrupt_index(struct tb_ring *ring) 4016603153SAndreas Noever { 4116603153SAndreas Noever int bit = ring->hop; 4216603153SAndreas Noever if (!ring->is_tx) 4316603153SAndreas Noever bit += ring->nhi->hop_count; 4416603153SAndreas Noever return bit; 4516603153SAndreas Noever } 4616603153SAndreas Noever 4716603153SAndreas Noever /** 4816603153SAndreas Noever * ring_interrupt_active() - activate/deactivate interrupts for a single ring 4916603153SAndreas Noever * 5016603153SAndreas Noever * ring->nhi->lock must be held. 5116603153SAndreas Noever */ 5216603153SAndreas Noever static void ring_interrupt_active(struct tb_ring *ring, bool active) 5316603153SAndreas Noever { 5419bf4d4fSLukas Wunner int reg = REG_RING_INTERRUPT_BASE + 5519bf4d4fSLukas Wunner ring_interrupt_index(ring) / 32 * 4; 5616603153SAndreas Noever int bit = ring_interrupt_index(ring) & 31; 5716603153SAndreas Noever int mask = 1 << bit; 5816603153SAndreas Noever u32 old, new; 59046bee1fSMika Westerberg 60046bee1fSMika Westerberg if (ring->irq > 0) { 61046bee1fSMika Westerberg u32 step, shift, ivr, misc; 62046bee1fSMika Westerberg void __iomem *ivr_base; 63046bee1fSMika Westerberg int index; 64046bee1fSMika Westerberg 65046bee1fSMika Westerberg if (ring->is_tx) 66046bee1fSMika Westerberg index = ring->hop; 67046bee1fSMika Westerberg else 68046bee1fSMika Westerberg index = ring->hop + ring->nhi->hop_count; 69046bee1fSMika Westerberg 70046bee1fSMika Westerberg /* 71046bee1fSMika Westerberg * Ask the hardware to clear interrupt status bits automatically 72046bee1fSMika Westerberg * since we already know which interrupt was triggered. 73046bee1fSMika Westerberg */ 74046bee1fSMika Westerberg misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); 75046bee1fSMika Westerberg if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) { 76046bee1fSMika Westerberg misc |= REG_DMA_MISC_INT_AUTO_CLEAR; 77046bee1fSMika Westerberg iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); 78046bee1fSMika Westerberg } 79046bee1fSMika Westerberg 80046bee1fSMika Westerberg ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; 81046bee1fSMika Westerberg step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; 82046bee1fSMika Westerberg shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; 83046bee1fSMika Westerberg ivr = ioread32(ivr_base + step); 84046bee1fSMika Westerberg ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift); 85046bee1fSMika Westerberg if (active) 86046bee1fSMika Westerberg ivr |= ring->vector << shift; 87046bee1fSMika Westerberg iowrite32(ivr, ivr_base + step); 88046bee1fSMika Westerberg } 89046bee1fSMika Westerberg 9016603153SAndreas Noever old = ioread32(ring->nhi->iobase + reg); 9116603153SAndreas Noever if (active) 9216603153SAndreas Noever new = old | mask; 9316603153SAndreas Noever else 9416603153SAndreas Noever new = old & ~mask; 9516603153SAndreas Noever 96daa5140fSMika Westerberg dev_dbg(&ring->nhi->pdev->dev, 9716603153SAndreas Noever "%s interrupt at register %#x bit %d (%#x -> %#x)\n", 9816603153SAndreas Noever active ? "enabling" : "disabling", reg, bit, old, new); 9916603153SAndreas Noever 10016603153SAndreas Noever if (new == old) 10116603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, 10216603153SAndreas Noever "interrupt for %s %d is already %s\n", 10316603153SAndreas Noever RING_TYPE(ring), ring->hop, 10416603153SAndreas Noever active ? "enabled" : "disabled"); 10516603153SAndreas Noever iowrite32(new, ring->nhi->iobase + reg); 10616603153SAndreas Noever } 10716603153SAndreas Noever 10816603153SAndreas Noever /** 10916603153SAndreas Noever * nhi_disable_interrupts() - disable interrupts for all rings 11016603153SAndreas Noever * 11116603153SAndreas Noever * Use only during init and shutdown. 11216603153SAndreas Noever */ 11316603153SAndreas Noever static void nhi_disable_interrupts(struct tb_nhi *nhi) 11416603153SAndreas Noever { 11516603153SAndreas Noever int i = 0; 11616603153SAndreas Noever /* disable interrupts */ 11716603153SAndreas Noever for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) 11816603153SAndreas Noever iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); 11916603153SAndreas Noever 12016603153SAndreas Noever /* clear interrupt status bits */ 12116603153SAndreas Noever for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) 12216603153SAndreas Noever ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); 12316603153SAndreas Noever } 12416603153SAndreas Noever 12516603153SAndreas Noever /* ring helper methods */ 12616603153SAndreas Noever 12716603153SAndreas Noever static void __iomem *ring_desc_base(struct tb_ring *ring) 12816603153SAndreas Noever { 12916603153SAndreas Noever void __iomem *io = ring->nhi->iobase; 13016603153SAndreas Noever io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; 13116603153SAndreas Noever io += ring->hop * 16; 13216603153SAndreas Noever return io; 13316603153SAndreas Noever } 13416603153SAndreas Noever 13516603153SAndreas Noever static void __iomem *ring_options_base(struct tb_ring *ring) 13616603153SAndreas Noever { 13716603153SAndreas Noever void __iomem *io = ring->nhi->iobase; 13816603153SAndreas Noever io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; 13916603153SAndreas Noever io += ring->hop * 32; 14016603153SAndreas Noever return io; 14116603153SAndreas Noever } 14216603153SAndreas Noever 14394379521SMika Westerberg static void ring_iowrite_cons(struct tb_ring *ring, u16 cons) 14416603153SAndreas Noever { 14594379521SMika Westerberg /* 14694379521SMika Westerberg * The other 16-bits in the register is read-only and writes to it 14794379521SMika Westerberg * are ignored by the hardware so we can save one ioread32() by 14894379521SMika Westerberg * filling the read-only bits with zeroes. 14994379521SMika Westerberg */ 15094379521SMika Westerberg iowrite32(cons, ring_desc_base(ring) + 8); 15194379521SMika Westerberg } 15294379521SMika Westerberg 15394379521SMika Westerberg static void ring_iowrite_prod(struct tb_ring *ring, u16 prod) 15494379521SMika Westerberg { 15594379521SMika Westerberg /* See ring_iowrite_cons() above for explanation */ 15694379521SMika Westerberg iowrite32(prod << 16, ring_desc_base(ring) + 8); 15716603153SAndreas Noever } 15816603153SAndreas Noever 15916603153SAndreas Noever static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) 16016603153SAndreas Noever { 16116603153SAndreas Noever iowrite32(value, ring_desc_base(ring) + offset); 16216603153SAndreas Noever } 16316603153SAndreas Noever 16416603153SAndreas Noever static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) 16516603153SAndreas Noever { 16616603153SAndreas Noever iowrite32(value, ring_desc_base(ring) + offset); 16716603153SAndreas Noever iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); 16816603153SAndreas Noever } 16916603153SAndreas Noever 17016603153SAndreas Noever static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) 17116603153SAndreas Noever { 17216603153SAndreas Noever iowrite32(value, ring_options_base(ring) + offset); 17316603153SAndreas Noever } 17416603153SAndreas Noever 17516603153SAndreas Noever static bool ring_full(struct tb_ring *ring) 17616603153SAndreas Noever { 17716603153SAndreas Noever return ((ring->head + 1) % ring->size) == ring->tail; 17816603153SAndreas Noever } 17916603153SAndreas Noever 18016603153SAndreas Noever static bool ring_empty(struct tb_ring *ring) 18116603153SAndreas Noever { 18216603153SAndreas Noever return ring->head == ring->tail; 18316603153SAndreas Noever } 18416603153SAndreas Noever 18516603153SAndreas Noever /** 18616603153SAndreas Noever * ring_write_descriptors() - post frames from ring->queue to the controller 18716603153SAndreas Noever * 18816603153SAndreas Noever * ring->lock is held. 18916603153SAndreas Noever */ 19016603153SAndreas Noever static void ring_write_descriptors(struct tb_ring *ring) 19116603153SAndreas Noever { 19216603153SAndreas Noever struct ring_frame *frame, *n; 19316603153SAndreas Noever struct ring_desc *descriptor; 19416603153SAndreas Noever list_for_each_entry_safe(frame, n, &ring->queue, list) { 19516603153SAndreas Noever if (ring_full(ring)) 19616603153SAndreas Noever break; 19716603153SAndreas Noever list_move_tail(&frame->list, &ring->in_flight); 19816603153SAndreas Noever descriptor = &ring->descriptors[ring->head]; 19916603153SAndreas Noever descriptor->phys = frame->buffer_phy; 20016603153SAndreas Noever descriptor->time = 0; 20116603153SAndreas Noever descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT; 20216603153SAndreas Noever if (ring->is_tx) { 20316603153SAndreas Noever descriptor->length = frame->size; 20416603153SAndreas Noever descriptor->eof = frame->eof; 20516603153SAndreas Noever descriptor->sof = frame->sof; 20616603153SAndreas Noever } 20716603153SAndreas Noever ring->head = (ring->head + 1) % ring->size; 20894379521SMika Westerberg if (ring->is_tx) 20994379521SMika Westerberg ring_iowrite_prod(ring, ring->head); 21094379521SMika Westerberg else 21194379521SMika Westerberg ring_iowrite_cons(ring, ring->head); 21216603153SAndreas Noever } 21316603153SAndreas Noever } 21416603153SAndreas Noever 21516603153SAndreas Noever /** 21616603153SAndreas Noever * ring_work() - progress completed frames 21716603153SAndreas Noever * 21816603153SAndreas Noever * If the ring is shutting down then all frames are marked as canceled and 21916603153SAndreas Noever * their callbacks are invoked. 22016603153SAndreas Noever * 22116603153SAndreas Noever * Otherwise we collect all completed frame from the ring buffer, write new 22216603153SAndreas Noever * frame to the ring buffer and invoke the callbacks for the completed frames. 22316603153SAndreas Noever */ 22416603153SAndreas Noever static void ring_work(struct work_struct *work) 22516603153SAndreas Noever { 22616603153SAndreas Noever struct tb_ring *ring = container_of(work, typeof(*ring), work); 22716603153SAndreas Noever struct ring_frame *frame; 22816603153SAndreas Noever bool canceled = false; 22922b7de10SMika Westerberg unsigned long flags; 23016603153SAndreas Noever LIST_HEAD(done); 23122b7de10SMika Westerberg 23222b7de10SMika Westerberg spin_lock_irqsave(&ring->lock, flags); 23316603153SAndreas Noever 23416603153SAndreas Noever if (!ring->running) { 23516603153SAndreas Noever /* Move all frames to done and mark them as canceled. */ 23616603153SAndreas Noever list_splice_tail_init(&ring->in_flight, &done); 23716603153SAndreas Noever list_splice_tail_init(&ring->queue, &done); 23816603153SAndreas Noever canceled = true; 23916603153SAndreas Noever goto invoke_callback; 24016603153SAndreas Noever } 24116603153SAndreas Noever 24216603153SAndreas Noever while (!ring_empty(ring)) { 24316603153SAndreas Noever if (!(ring->descriptors[ring->tail].flags 24416603153SAndreas Noever & RING_DESC_COMPLETED)) 24516603153SAndreas Noever break; 24616603153SAndreas Noever frame = list_first_entry(&ring->in_flight, typeof(*frame), 24716603153SAndreas Noever list); 24816603153SAndreas Noever list_move_tail(&frame->list, &done); 24916603153SAndreas Noever if (!ring->is_tx) { 25016603153SAndreas Noever frame->size = ring->descriptors[ring->tail].length; 25116603153SAndreas Noever frame->eof = ring->descriptors[ring->tail].eof; 25216603153SAndreas Noever frame->sof = ring->descriptors[ring->tail].sof; 25316603153SAndreas Noever frame->flags = ring->descriptors[ring->tail].flags; 25416603153SAndreas Noever } 25516603153SAndreas Noever ring->tail = (ring->tail + 1) % ring->size; 25616603153SAndreas Noever } 25716603153SAndreas Noever ring_write_descriptors(ring); 25816603153SAndreas Noever 25916603153SAndreas Noever invoke_callback: 26022b7de10SMika Westerberg /* allow callbacks to schedule new work */ 26122b7de10SMika Westerberg spin_unlock_irqrestore(&ring->lock, flags); 26216603153SAndreas Noever while (!list_empty(&done)) { 26316603153SAndreas Noever frame = list_first_entry(&done, typeof(*frame), list); 26416603153SAndreas Noever /* 26516603153SAndreas Noever * The callback may reenqueue or delete frame. 26616603153SAndreas Noever * Do not hold on to it. 26716603153SAndreas Noever */ 26816603153SAndreas Noever list_del_init(&frame->list); 2694ffe722eSMika Westerberg if (frame->callback) 27016603153SAndreas Noever frame->callback(ring, frame, canceled); 27116603153SAndreas Noever } 27216603153SAndreas Noever } 27316603153SAndreas Noever 2743b3d9f4dSMika Westerberg int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) 27516603153SAndreas Noever { 27622b7de10SMika Westerberg unsigned long flags; 27716603153SAndreas Noever int ret = 0; 27822b7de10SMika Westerberg 27922b7de10SMika Westerberg spin_lock_irqsave(&ring->lock, flags); 28016603153SAndreas Noever if (ring->running) { 28116603153SAndreas Noever list_add_tail(&frame->list, &ring->queue); 28216603153SAndreas Noever ring_write_descriptors(ring); 28316603153SAndreas Noever } else { 28416603153SAndreas Noever ret = -ESHUTDOWN; 28516603153SAndreas Noever } 28622b7de10SMika Westerberg spin_unlock_irqrestore(&ring->lock, flags); 28716603153SAndreas Noever return ret; 28816603153SAndreas Noever } 2893b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(__tb_ring_enqueue); 29016603153SAndreas Noever 2914ffe722eSMika Westerberg /** 2924ffe722eSMika Westerberg * tb_ring_poll() - Poll one completed frame from the ring 2934ffe722eSMika Westerberg * @ring: Ring to poll 2944ffe722eSMika Westerberg * 2954ffe722eSMika Westerberg * This function can be called when @start_poll callback of the @ring 2964ffe722eSMika Westerberg * has been called. It will read one completed frame from the ring and 2974ffe722eSMika Westerberg * return it to the caller. Returns %NULL if there is no more completed 2984ffe722eSMika Westerberg * frames. 2994ffe722eSMika Westerberg */ 3004ffe722eSMika Westerberg struct ring_frame *tb_ring_poll(struct tb_ring *ring) 3014ffe722eSMika Westerberg { 3024ffe722eSMika Westerberg struct ring_frame *frame = NULL; 3034ffe722eSMika Westerberg unsigned long flags; 3044ffe722eSMika Westerberg 3054ffe722eSMika Westerberg spin_lock_irqsave(&ring->lock, flags); 3064ffe722eSMika Westerberg if (!ring->running) 3074ffe722eSMika Westerberg goto unlock; 3084ffe722eSMika Westerberg if (ring_empty(ring)) 3094ffe722eSMika Westerberg goto unlock; 3104ffe722eSMika Westerberg 3114ffe722eSMika Westerberg if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) { 3124ffe722eSMika Westerberg frame = list_first_entry(&ring->in_flight, typeof(*frame), 3134ffe722eSMika Westerberg list); 3144ffe722eSMika Westerberg list_del_init(&frame->list); 3154ffe722eSMika Westerberg 3164ffe722eSMika Westerberg if (!ring->is_tx) { 3174ffe722eSMika Westerberg frame->size = ring->descriptors[ring->tail].length; 3184ffe722eSMika Westerberg frame->eof = ring->descriptors[ring->tail].eof; 3194ffe722eSMika Westerberg frame->sof = ring->descriptors[ring->tail].sof; 3204ffe722eSMika Westerberg frame->flags = ring->descriptors[ring->tail].flags; 3214ffe722eSMika Westerberg } 3224ffe722eSMika Westerberg 3234ffe722eSMika Westerberg ring->tail = (ring->tail + 1) % ring->size; 3244ffe722eSMika Westerberg } 3254ffe722eSMika Westerberg 3264ffe722eSMika Westerberg unlock: 3274ffe722eSMika Westerberg spin_unlock_irqrestore(&ring->lock, flags); 3284ffe722eSMika Westerberg return frame; 3294ffe722eSMika Westerberg } 3304ffe722eSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_poll); 3314ffe722eSMika Westerberg 3324ffe722eSMika Westerberg static void __ring_interrupt_mask(struct tb_ring *ring, bool mask) 3334ffe722eSMika Westerberg { 3344ffe722eSMika Westerberg int idx = ring_interrupt_index(ring); 3354ffe722eSMika Westerberg int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4; 3364ffe722eSMika Westerberg int bit = idx % 32; 3374ffe722eSMika Westerberg u32 val; 3384ffe722eSMika Westerberg 3394ffe722eSMika Westerberg val = ioread32(ring->nhi->iobase + reg); 3404ffe722eSMika Westerberg if (mask) 3414ffe722eSMika Westerberg val &= ~BIT(bit); 3424ffe722eSMika Westerberg else 3434ffe722eSMika Westerberg val |= BIT(bit); 3444ffe722eSMika Westerberg iowrite32(val, ring->nhi->iobase + reg); 3454ffe722eSMika Westerberg } 3464ffe722eSMika Westerberg 3474ffe722eSMika Westerberg /* Both @nhi->lock and @ring->lock should be held */ 3484ffe722eSMika Westerberg static void __ring_interrupt(struct tb_ring *ring) 3494ffe722eSMika Westerberg { 3504ffe722eSMika Westerberg if (!ring->running) 3514ffe722eSMika Westerberg return; 3524ffe722eSMika Westerberg 3534ffe722eSMika Westerberg if (ring->start_poll) { 35474657181SMika Westerberg __ring_interrupt_mask(ring, true); 3554ffe722eSMika Westerberg ring->start_poll(ring->poll_data); 3564ffe722eSMika Westerberg } else { 3574ffe722eSMika Westerberg schedule_work(&ring->work); 3584ffe722eSMika Westerberg } 3594ffe722eSMika Westerberg } 3604ffe722eSMika Westerberg 3614ffe722eSMika Westerberg /** 3624ffe722eSMika Westerberg * tb_ring_poll_complete() - Re-start interrupt for the ring 3634ffe722eSMika Westerberg * @ring: Ring to re-start the interrupt 3644ffe722eSMika Westerberg * 3654ffe722eSMika Westerberg * This will re-start (unmask) the ring interrupt once the user is done 3664ffe722eSMika Westerberg * with polling. 3674ffe722eSMika Westerberg */ 3684ffe722eSMika Westerberg void tb_ring_poll_complete(struct tb_ring *ring) 3694ffe722eSMika Westerberg { 3704ffe722eSMika Westerberg unsigned long flags; 3714ffe722eSMika Westerberg 3724ffe722eSMika Westerberg spin_lock_irqsave(&ring->nhi->lock, flags); 3734ffe722eSMika Westerberg spin_lock(&ring->lock); 3744ffe722eSMika Westerberg if (ring->start_poll) 3754ffe722eSMika Westerberg __ring_interrupt_mask(ring, false); 3764ffe722eSMika Westerberg spin_unlock(&ring->lock); 3774ffe722eSMika Westerberg spin_unlock_irqrestore(&ring->nhi->lock, flags); 3784ffe722eSMika Westerberg } 3794ffe722eSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_poll_complete); 3804ffe722eSMika Westerberg 381046bee1fSMika Westerberg static irqreturn_t ring_msix(int irq, void *data) 382046bee1fSMika Westerberg { 383046bee1fSMika Westerberg struct tb_ring *ring = data; 384046bee1fSMika Westerberg 3854ffe722eSMika Westerberg spin_lock(&ring->nhi->lock); 3864ffe722eSMika Westerberg spin_lock(&ring->lock); 3874ffe722eSMika Westerberg __ring_interrupt(ring); 3884ffe722eSMika Westerberg spin_unlock(&ring->lock); 3894ffe722eSMika Westerberg spin_unlock(&ring->nhi->lock); 3904ffe722eSMika Westerberg 391046bee1fSMika Westerberg return IRQ_HANDLED; 392046bee1fSMika Westerberg } 393046bee1fSMika Westerberg 394046bee1fSMika Westerberg static int ring_request_msix(struct tb_ring *ring, bool no_suspend) 395046bee1fSMika Westerberg { 396046bee1fSMika Westerberg struct tb_nhi *nhi = ring->nhi; 397046bee1fSMika Westerberg unsigned long irqflags; 398046bee1fSMika Westerberg int ret; 399046bee1fSMika Westerberg 400046bee1fSMika Westerberg if (!nhi->pdev->msix_enabled) 401046bee1fSMika Westerberg return 0; 402046bee1fSMika Westerberg 403046bee1fSMika Westerberg ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL); 404046bee1fSMika Westerberg if (ret < 0) 405046bee1fSMika Westerberg return ret; 406046bee1fSMika Westerberg 407046bee1fSMika Westerberg ring->vector = ret; 408046bee1fSMika Westerberg 409*7342ca34SJing Xiangfeng ret = pci_irq_vector(ring->nhi->pdev, ring->vector); 410*7342ca34SJing Xiangfeng if (ret < 0) 411*7342ca34SJing Xiangfeng goto err_ida_remove; 412*7342ca34SJing Xiangfeng 413*7342ca34SJing Xiangfeng ring->irq = ret; 414046bee1fSMika Westerberg 415046bee1fSMika Westerberg irqflags = no_suspend ? IRQF_NO_SUSPEND : 0; 416*7342ca34SJing Xiangfeng ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); 417*7342ca34SJing Xiangfeng if (ret) 418*7342ca34SJing Xiangfeng goto err_ida_remove; 419*7342ca34SJing Xiangfeng 420*7342ca34SJing Xiangfeng return 0; 421*7342ca34SJing Xiangfeng 422*7342ca34SJing Xiangfeng err_ida_remove: 423*7342ca34SJing Xiangfeng ida_simple_remove(&nhi->msix_ida, ring->vector); 424*7342ca34SJing Xiangfeng 425*7342ca34SJing Xiangfeng return ret; 426046bee1fSMika Westerberg } 427046bee1fSMika Westerberg 428046bee1fSMika Westerberg static void ring_release_msix(struct tb_ring *ring) 429046bee1fSMika Westerberg { 430046bee1fSMika Westerberg if (ring->irq <= 0) 431046bee1fSMika Westerberg return; 432046bee1fSMika Westerberg 433046bee1fSMika Westerberg free_irq(ring->irq, ring); 434046bee1fSMika Westerberg ida_simple_remove(&ring->nhi->msix_ida, ring->vector); 435046bee1fSMika Westerberg ring->vector = 0; 436046bee1fSMika Westerberg ring->irq = 0; 437046bee1fSMika Westerberg } 438046bee1fSMika Westerberg 4399a01c7c2SMika Westerberg static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) 4409a01c7c2SMika Westerberg { 4419a01c7c2SMika Westerberg int ret = 0; 4429a01c7c2SMika Westerberg 4439a01c7c2SMika Westerberg spin_lock_irq(&nhi->lock); 4449a01c7c2SMika Westerberg 4459a01c7c2SMika Westerberg if (ring->hop < 0) { 4469a01c7c2SMika Westerberg unsigned int i; 4479a01c7c2SMika Westerberg 4489a01c7c2SMika Westerberg /* 4499a01c7c2SMika Westerberg * Automatically allocate HopID from the non-reserved 45053f13319SMika Westerberg * range 1 .. hop_count - 1. 4519a01c7c2SMika Westerberg */ 4529a01c7c2SMika Westerberg for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) { 4539a01c7c2SMika Westerberg if (ring->is_tx) { 4549a01c7c2SMika Westerberg if (!nhi->tx_rings[i]) { 4559a01c7c2SMika Westerberg ring->hop = i; 4569a01c7c2SMika Westerberg break; 4579a01c7c2SMika Westerberg } 4589a01c7c2SMika Westerberg } else { 4599a01c7c2SMika Westerberg if (!nhi->rx_rings[i]) { 4609a01c7c2SMika Westerberg ring->hop = i; 4619a01c7c2SMika Westerberg break; 4629a01c7c2SMika Westerberg } 4639a01c7c2SMika Westerberg } 4649a01c7c2SMika Westerberg } 4659a01c7c2SMika Westerberg } 4669a01c7c2SMika Westerberg 4679a01c7c2SMika Westerberg if (ring->hop < 0 || ring->hop >= nhi->hop_count) { 4689a01c7c2SMika Westerberg dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); 4699a01c7c2SMika Westerberg ret = -EINVAL; 4709a01c7c2SMika Westerberg goto err_unlock; 4719a01c7c2SMika Westerberg } 4729a01c7c2SMika Westerberg if (ring->is_tx && nhi->tx_rings[ring->hop]) { 4739a01c7c2SMika Westerberg dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n", 4749a01c7c2SMika Westerberg ring->hop); 4759a01c7c2SMika Westerberg ret = -EBUSY; 4769a01c7c2SMika Westerberg goto err_unlock; 4779a01c7c2SMika Westerberg } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) { 4789a01c7c2SMika Westerberg dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n", 4799a01c7c2SMika Westerberg ring->hop); 4809a01c7c2SMika Westerberg ret = -EBUSY; 4819a01c7c2SMika Westerberg goto err_unlock; 4829a01c7c2SMika Westerberg } 4839a01c7c2SMika Westerberg 4849a01c7c2SMika Westerberg if (ring->is_tx) 4859a01c7c2SMika Westerberg nhi->tx_rings[ring->hop] = ring; 4869a01c7c2SMika Westerberg else 4879a01c7c2SMika Westerberg nhi->rx_rings[ring->hop] = ring; 4889a01c7c2SMika Westerberg 4899a01c7c2SMika Westerberg err_unlock: 4909a01c7c2SMika Westerberg spin_unlock_irq(&nhi->lock); 4919a01c7c2SMika Westerberg 4929a01c7c2SMika Westerberg return ret; 4939a01c7c2SMika Westerberg } 4949a01c7c2SMika Westerberg 4953b3d9f4dSMika Westerberg static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, 4969fb1e654SMika Westerberg bool transmit, unsigned int flags, 4974ffe722eSMika Westerberg u16 sof_mask, u16 eof_mask, 4984ffe722eSMika Westerberg void (*start_poll)(void *), 4994ffe722eSMika Westerberg void *poll_data) 50016603153SAndreas Noever { 50116603153SAndreas Noever struct tb_ring *ring = NULL; 502daa5140fSMika Westerberg 503daa5140fSMika Westerberg dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", 50416603153SAndreas Noever transmit ? "TX" : "RX", hop, size); 50516603153SAndreas Noever 50616603153SAndreas Noever ring = kzalloc(sizeof(*ring), GFP_KERNEL); 50716603153SAndreas Noever if (!ring) 50859120e06SMika Westerberg return NULL; 50916603153SAndreas Noever 51022b7de10SMika Westerberg spin_lock_init(&ring->lock); 51116603153SAndreas Noever INIT_LIST_HEAD(&ring->queue); 51216603153SAndreas Noever INIT_LIST_HEAD(&ring->in_flight); 51316603153SAndreas Noever INIT_WORK(&ring->work, ring_work); 51416603153SAndreas Noever 51516603153SAndreas Noever ring->nhi = nhi; 51616603153SAndreas Noever ring->hop = hop; 51716603153SAndreas Noever ring->is_tx = transmit; 51816603153SAndreas Noever ring->size = size; 519046bee1fSMika Westerberg ring->flags = flags; 5209fb1e654SMika Westerberg ring->sof_mask = sof_mask; 5219fb1e654SMika Westerberg ring->eof_mask = eof_mask; 52216603153SAndreas Noever ring->head = 0; 52316603153SAndreas Noever ring->tail = 0; 52416603153SAndreas Noever ring->running = false; 5254ffe722eSMika Westerberg ring->start_poll = start_poll; 5264ffe722eSMika Westerberg ring->poll_data = poll_data; 527046bee1fSMika Westerberg 52816603153SAndreas Noever ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, 52916603153SAndreas Noever size * sizeof(*ring->descriptors), 53016603153SAndreas Noever &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); 53116603153SAndreas Noever if (!ring->descriptors) 53259120e06SMika Westerberg goto err_free_ring; 53316603153SAndreas Noever 53459120e06SMika Westerberg if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND)) 53559120e06SMika Westerberg goto err_free_descs; 53659120e06SMika Westerberg 5379a01c7c2SMika Westerberg if (nhi_alloc_hop(nhi, ring)) 53859120e06SMika Westerberg goto err_release_msix; 53959120e06SMika Westerberg 54016603153SAndreas Noever return ring; 54116603153SAndreas Noever 54259120e06SMika Westerberg err_release_msix: 54359120e06SMika Westerberg ring_release_msix(ring); 54459120e06SMika Westerberg err_free_descs: 54559120e06SMika Westerberg dma_free_coherent(&ring->nhi->pdev->dev, 54659120e06SMika Westerberg ring->size * sizeof(*ring->descriptors), 54759120e06SMika Westerberg ring->descriptors, ring->descriptors_dma); 54859120e06SMika Westerberg err_free_ring: 54916603153SAndreas Noever kfree(ring); 55059120e06SMika Westerberg 55116603153SAndreas Noever return NULL; 55216603153SAndreas Noever } 55316603153SAndreas Noever 5543b3d9f4dSMika Westerberg /** 5553b3d9f4dSMika Westerberg * tb_ring_alloc_tx() - Allocate DMA ring for transmit 5563b3d9f4dSMika Westerberg * @nhi: Pointer to the NHI the ring is to be allocated 5573b3d9f4dSMika Westerberg * @hop: HopID (ring) to allocate 5583b3d9f4dSMika Westerberg * @size: Number of entries in the ring 5593b3d9f4dSMika Westerberg * @flags: Flags for the ring 5603b3d9f4dSMika Westerberg */ 5613b3d9f4dSMika Westerberg struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, 562046bee1fSMika Westerberg unsigned int flags) 56316603153SAndreas Noever { 5644ffe722eSMika Westerberg return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL); 56516603153SAndreas Noever } 5663b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); 56716603153SAndreas Noever 56816603153SAndreas Noever /** 5693b3d9f4dSMika Westerberg * tb_ring_alloc_rx() - Allocate DMA ring for receive 5703b3d9f4dSMika Westerberg * @nhi: Pointer to the NHI the ring is to be allocated 5719a01c7c2SMika Westerberg * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation. 5723b3d9f4dSMika Westerberg * @size: Number of entries in the ring 5733b3d9f4dSMika Westerberg * @flags: Flags for the ring 5743b3d9f4dSMika Westerberg * @sof_mask: Mask of PDF values that start a frame 5753b3d9f4dSMika Westerberg * @eof_mask: Mask of PDF values that end a frame 5764ffe722eSMika Westerberg * @start_poll: If not %NULL the ring will call this function when an 5774ffe722eSMika Westerberg * interrupt is triggered and masked, instead of callback 5784ffe722eSMika Westerberg * in each Rx frame. 5794ffe722eSMika Westerberg * @poll_data: Optional data passed to @start_poll 58016603153SAndreas Noever */ 5813b3d9f4dSMika Westerberg struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, 5824ffe722eSMika Westerberg unsigned int flags, u16 sof_mask, u16 eof_mask, 5834ffe722eSMika Westerberg void (*start_poll)(void *), void *poll_data) 5843b3d9f4dSMika Westerberg { 5854ffe722eSMika Westerberg return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask, 5864ffe722eSMika Westerberg start_poll, poll_data); 5873b3d9f4dSMika Westerberg } 5883b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_alloc_rx); 5893b3d9f4dSMika Westerberg 5903b3d9f4dSMika Westerberg /** 5913b3d9f4dSMika Westerberg * tb_ring_start() - enable a ring 5923b3d9f4dSMika Westerberg * 5933b3d9f4dSMika Westerberg * Must not be invoked in parallel with tb_ring_stop(). 5943b3d9f4dSMika Westerberg */ 5953b3d9f4dSMika Westerberg void tb_ring_start(struct tb_ring *ring) 59616603153SAndreas Noever { 5979fb1e654SMika Westerberg u16 frame_size; 5989fb1e654SMika Westerberg u32 flags; 5999fb1e654SMika Westerberg 60059120e06SMika Westerberg spin_lock_irq(&ring->nhi->lock); 60159120e06SMika Westerberg spin_lock(&ring->lock); 602bdccf295SMika Westerberg if (ring->nhi->going_away) 603bdccf295SMika Westerberg goto err; 60416603153SAndreas Noever if (ring->running) { 60516603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); 60616603153SAndreas Noever goto err; 60716603153SAndreas Noever } 608daa5140fSMika Westerberg dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n", 60916603153SAndreas Noever RING_TYPE(ring), ring->hop); 61016603153SAndreas Noever 6119fb1e654SMika Westerberg if (ring->flags & RING_FLAG_FRAME) { 6129fb1e654SMika Westerberg /* Means 4096 */ 6139fb1e654SMika Westerberg frame_size = 0; 6149fb1e654SMika Westerberg flags = RING_FLAG_ENABLE; 6159fb1e654SMika Westerberg } else { 6169fb1e654SMika Westerberg frame_size = TB_FRAME_SIZE; 6179fb1e654SMika Westerberg flags = RING_FLAG_ENABLE | RING_FLAG_RAW; 6189fb1e654SMika Westerberg } 6199fb1e654SMika Westerberg 62016603153SAndreas Noever ring_iowrite64desc(ring, ring->descriptors_dma, 0); 62116603153SAndreas Noever if (ring->is_tx) { 62216603153SAndreas Noever ring_iowrite32desc(ring, ring->size, 12); 62316603153SAndreas Noever ring_iowrite32options(ring, 0, 4); /* time releated ? */ 6249fb1e654SMika Westerberg ring_iowrite32options(ring, flags, 0); 62516603153SAndreas Noever } else { 6269fb1e654SMika Westerberg u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask; 6279fb1e654SMika Westerberg 6289fb1e654SMika Westerberg ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12); 6299fb1e654SMika Westerberg ring_iowrite32options(ring, sof_eof_mask, 4); 6309fb1e654SMika Westerberg ring_iowrite32options(ring, flags, 0); 63116603153SAndreas Noever } 63216603153SAndreas Noever ring_interrupt_active(ring, true); 63316603153SAndreas Noever ring->running = true; 63416603153SAndreas Noever err: 63559120e06SMika Westerberg spin_unlock(&ring->lock); 63659120e06SMika Westerberg spin_unlock_irq(&ring->nhi->lock); 63716603153SAndreas Noever } 6383b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_start); 63916603153SAndreas Noever 64016603153SAndreas Noever /** 6413b3d9f4dSMika Westerberg * tb_ring_stop() - shutdown a ring 64216603153SAndreas Noever * 64316603153SAndreas Noever * Must not be invoked from a callback. 64416603153SAndreas Noever * 6453b3d9f4dSMika Westerberg * This method will disable the ring. Further calls to 6463b3d9f4dSMika Westerberg * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been 6473b3d9f4dSMika Westerberg * called. 64816603153SAndreas Noever * 64916603153SAndreas Noever * All enqueued frames will be canceled and their callbacks will be executed 65016603153SAndreas Noever * with frame->canceled set to true (on the callback thread). This method 65116603153SAndreas Noever * returns only after all callback invocations have finished. 65216603153SAndreas Noever */ 6533b3d9f4dSMika Westerberg void tb_ring_stop(struct tb_ring *ring) 65416603153SAndreas Noever { 65559120e06SMika Westerberg spin_lock_irq(&ring->nhi->lock); 65659120e06SMika Westerberg spin_lock(&ring->lock); 657daa5140fSMika Westerberg dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n", 65816603153SAndreas Noever RING_TYPE(ring), ring->hop); 659bdccf295SMika Westerberg if (ring->nhi->going_away) 660bdccf295SMika Westerberg goto err; 66116603153SAndreas Noever if (!ring->running) { 66216603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", 66316603153SAndreas Noever RING_TYPE(ring), ring->hop); 66416603153SAndreas Noever goto err; 66516603153SAndreas Noever } 66616603153SAndreas Noever ring_interrupt_active(ring, false); 66716603153SAndreas Noever 66816603153SAndreas Noever ring_iowrite32options(ring, 0, 0); 66916603153SAndreas Noever ring_iowrite64desc(ring, 0, 0); 67094379521SMika Westerberg ring_iowrite32desc(ring, 0, 8); 67116603153SAndreas Noever ring_iowrite32desc(ring, 0, 12); 67216603153SAndreas Noever ring->head = 0; 67316603153SAndreas Noever ring->tail = 0; 67416603153SAndreas Noever ring->running = false; 67516603153SAndreas Noever 67616603153SAndreas Noever err: 67759120e06SMika Westerberg spin_unlock(&ring->lock); 67859120e06SMika Westerberg spin_unlock_irq(&ring->nhi->lock); 67916603153SAndreas Noever 68016603153SAndreas Noever /* 68116603153SAndreas Noever * schedule ring->work to invoke callbacks on all remaining frames. 68216603153SAndreas Noever */ 68316603153SAndreas Noever schedule_work(&ring->work); 68416603153SAndreas Noever flush_work(&ring->work); 68516603153SAndreas Noever } 6863b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_stop); 68716603153SAndreas Noever 68816603153SAndreas Noever /* 6893b3d9f4dSMika Westerberg * tb_ring_free() - free ring 69016603153SAndreas Noever * 69116603153SAndreas Noever * When this method returns all invocations of ring->callback will have 69216603153SAndreas Noever * finished. 69316603153SAndreas Noever * 69416603153SAndreas Noever * Ring must be stopped. 69516603153SAndreas Noever * 69616603153SAndreas Noever * Must NOT be called from ring_frame->callback! 69716603153SAndreas Noever */ 6983b3d9f4dSMika Westerberg void tb_ring_free(struct tb_ring *ring) 69916603153SAndreas Noever { 70059120e06SMika Westerberg spin_lock_irq(&ring->nhi->lock); 70116603153SAndreas Noever /* 70216603153SAndreas Noever * Dissociate the ring from the NHI. This also ensures that 70316603153SAndreas Noever * nhi_interrupt_work cannot reschedule ring->work. 70416603153SAndreas Noever */ 70516603153SAndreas Noever if (ring->is_tx) 70616603153SAndreas Noever ring->nhi->tx_rings[ring->hop] = NULL; 70716603153SAndreas Noever else 70816603153SAndreas Noever ring->nhi->rx_rings[ring->hop] = NULL; 70916603153SAndreas Noever 71016603153SAndreas Noever if (ring->running) { 71116603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", 71216603153SAndreas Noever RING_TYPE(ring), ring->hop); 71316603153SAndreas Noever } 7144ffe722eSMika Westerberg spin_unlock_irq(&ring->nhi->lock); 71516603153SAndreas Noever 716046bee1fSMika Westerberg ring_release_msix(ring); 717046bee1fSMika Westerberg 71816603153SAndreas Noever dma_free_coherent(&ring->nhi->pdev->dev, 71916603153SAndreas Noever ring->size * sizeof(*ring->descriptors), 72016603153SAndreas Noever ring->descriptors, ring->descriptors_dma); 72116603153SAndreas Noever 722f19b72c6SSachin Kamat ring->descriptors = NULL; 72316603153SAndreas Noever ring->descriptors_dma = 0; 72416603153SAndreas Noever 72516603153SAndreas Noever 726daa5140fSMika Westerberg dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring), 72716603153SAndreas Noever ring->hop); 72816603153SAndreas Noever 72916603153SAndreas Noever /** 730046bee1fSMika Westerberg * ring->work can no longer be scheduled (it is scheduled only 731046bee1fSMika Westerberg * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it 732046bee1fSMika Westerberg * to finish before freeing the ring. 73316603153SAndreas Noever */ 73416603153SAndreas Noever flush_work(&ring->work); 73516603153SAndreas Noever kfree(ring); 73616603153SAndreas Noever } 7373b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_free); 73816603153SAndreas Noever 739cd446ee2SMika Westerberg /** 740cd446ee2SMika Westerberg * nhi_mailbox_cmd() - Send a command through NHI mailbox 741cd446ee2SMika Westerberg * @nhi: Pointer to the NHI structure 742cd446ee2SMika Westerberg * @cmd: Command to send 743cd446ee2SMika Westerberg * @data: Data to be send with the command 744cd446ee2SMika Westerberg * 745cd446ee2SMika Westerberg * Sends mailbox command to the firmware running on NHI. Returns %0 in 746cd446ee2SMika Westerberg * case of success and negative errno in case of failure. 747cd446ee2SMika Westerberg */ 748cd446ee2SMika Westerberg int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data) 749cd446ee2SMika Westerberg { 750cd446ee2SMika Westerberg ktime_t timeout; 751cd446ee2SMika Westerberg u32 val; 752cd446ee2SMika Westerberg 753cd446ee2SMika Westerberg iowrite32(data, nhi->iobase + REG_INMAIL_DATA); 754cd446ee2SMika Westerberg 755cd446ee2SMika Westerberg val = ioread32(nhi->iobase + REG_INMAIL_CMD); 756cd446ee2SMika Westerberg val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR); 757cd446ee2SMika Westerberg val |= REG_INMAIL_OP_REQUEST | cmd; 758cd446ee2SMika Westerberg iowrite32(val, nhi->iobase + REG_INMAIL_CMD); 759cd446ee2SMika Westerberg 760cd446ee2SMika Westerberg timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT); 761cd446ee2SMika Westerberg do { 762cd446ee2SMika Westerberg val = ioread32(nhi->iobase + REG_INMAIL_CMD); 763cd446ee2SMika Westerberg if (!(val & REG_INMAIL_OP_REQUEST)) 764cd446ee2SMika Westerberg break; 765cd446ee2SMika Westerberg usleep_range(10, 20); 766cd446ee2SMika Westerberg } while (ktime_before(ktime_get(), timeout)); 767cd446ee2SMika Westerberg 768cd446ee2SMika Westerberg if (val & REG_INMAIL_OP_REQUEST) 769cd446ee2SMika Westerberg return -ETIMEDOUT; 770cd446ee2SMika Westerberg if (val & REG_INMAIL_ERROR) 771cd446ee2SMika Westerberg return -EIO; 772cd446ee2SMika Westerberg 773cd446ee2SMika Westerberg return 0; 774cd446ee2SMika Westerberg } 775cd446ee2SMika Westerberg 776cd446ee2SMika Westerberg /** 777cd446ee2SMika Westerberg * nhi_mailbox_mode() - Return current firmware operation mode 778cd446ee2SMika Westerberg * @nhi: Pointer to the NHI structure 779cd446ee2SMika Westerberg * 780cd446ee2SMika Westerberg * The function reads current firmware operation mode using NHI mailbox 781cd446ee2SMika Westerberg * registers and returns it to the caller. 782cd446ee2SMika Westerberg */ 783cd446ee2SMika Westerberg enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi) 784cd446ee2SMika Westerberg { 785cd446ee2SMika Westerberg u32 val; 786cd446ee2SMika Westerberg 787cd446ee2SMika Westerberg val = ioread32(nhi->iobase + REG_OUTMAIL_CMD); 788cd446ee2SMika Westerberg val &= REG_OUTMAIL_CMD_OPMODE_MASK; 789cd446ee2SMika Westerberg val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT; 790cd446ee2SMika Westerberg 791cd446ee2SMika Westerberg return (enum nhi_fw_mode)val; 792cd446ee2SMika Westerberg } 793cd446ee2SMika Westerberg 79416603153SAndreas Noever static void nhi_interrupt_work(struct work_struct *work) 79516603153SAndreas Noever { 79616603153SAndreas Noever struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); 79716603153SAndreas Noever int value = 0; /* Suppress uninitialized usage warning. */ 79816603153SAndreas Noever int bit; 79916603153SAndreas Noever int hop = -1; 80016603153SAndreas Noever int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */ 80116603153SAndreas Noever struct tb_ring *ring; 80216603153SAndreas Noever 80359120e06SMika Westerberg spin_lock_irq(&nhi->lock); 80416603153SAndreas Noever 80516603153SAndreas Noever /* 80616603153SAndreas Noever * Starting at REG_RING_NOTIFY_BASE there are three status bitfields 80716603153SAndreas Noever * (TX, RX, RX overflow). We iterate over the bits and read a new 80816603153SAndreas Noever * dwords as required. The registers are cleared on read. 80916603153SAndreas Noever */ 81016603153SAndreas Noever for (bit = 0; bit < 3 * nhi->hop_count; bit++) { 81116603153SAndreas Noever if (bit % 32 == 0) 81216603153SAndreas Noever value = ioread32(nhi->iobase 81316603153SAndreas Noever + REG_RING_NOTIFY_BASE 81416603153SAndreas Noever + 4 * (bit / 32)); 81516603153SAndreas Noever if (++hop == nhi->hop_count) { 81616603153SAndreas Noever hop = 0; 81716603153SAndreas Noever type++; 81816603153SAndreas Noever } 81916603153SAndreas Noever if ((value & (1 << (bit % 32))) == 0) 82016603153SAndreas Noever continue; 82116603153SAndreas Noever if (type == 2) { 82216603153SAndreas Noever dev_warn(&nhi->pdev->dev, 82316603153SAndreas Noever "RX overflow for ring %d\n", 82416603153SAndreas Noever hop); 82516603153SAndreas Noever continue; 82616603153SAndreas Noever } 82716603153SAndreas Noever if (type == 0) 82816603153SAndreas Noever ring = nhi->tx_rings[hop]; 82916603153SAndreas Noever else 83016603153SAndreas Noever ring = nhi->rx_rings[hop]; 83116603153SAndreas Noever if (ring == NULL) { 83216603153SAndreas Noever dev_warn(&nhi->pdev->dev, 83316603153SAndreas Noever "got interrupt for inactive %s ring %d\n", 83416603153SAndreas Noever type ? "RX" : "TX", 83516603153SAndreas Noever hop); 83616603153SAndreas Noever continue; 83716603153SAndreas Noever } 8384ffe722eSMika Westerberg 8394ffe722eSMika Westerberg spin_lock(&ring->lock); 8404ffe722eSMika Westerberg __ring_interrupt(ring); 8414ffe722eSMika Westerberg spin_unlock(&ring->lock); 84216603153SAndreas Noever } 84359120e06SMika Westerberg spin_unlock_irq(&nhi->lock); 84416603153SAndreas Noever } 84516603153SAndreas Noever 84616603153SAndreas Noever static irqreturn_t nhi_msi(int irq, void *data) 84716603153SAndreas Noever { 84816603153SAndreas Noever struct tb_nhi *nhi = data; 84916603153SAndreas Noever schedule_work(&nhi->interrupt_work); 85016603153SAndreas Noever return IRQ_HANDLED; 85116603153SAndreas Noever } 85216603153SAndreas Noever 8533cdb9446SMika Westerberg static int __nhi_suspend_noirq(struct device *dev, bool wakeup) 85423dd5bb4SAndreas Noever { 85523dd5bb4SAndreas Noever struct pci_dev *pdev = to_pci_dev(dev); 85623dd5bb4SAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 8573cdb9446SMika Westerberg struct tb_nhi *nhi = tb->nhi; 8583cdb9446SMika Westerberg int ret; 8599d3cce0bSMika Westerberg 8603cdb9446SMika Westerberg ret = tb_domain_suspend_noirq(tb); 8613cdb9446SMika Westerberg if (ret) 8623cdb9446SMika Westerberg return ret; 8633cdb9446SMika Westerberg 8643cdb9446SMika Westerberg if (nhi->ops && nhi->ops->suspend_noirq) { 8653cdb9446SMika Westerberg ret = nhi->ops->suspend_noirq(tb->nhi, wakeup); 8663cdb9446SMika Westerberg if (ret) 8673cdb9446SMika Westerberg return ret; 8683cdb9446SMika Westerberg } 8693cdb9446SMika Westerberg 8703cdb9446SMika Westerberg return 0; 8713cdb9446SMika Westerberg } 8723cdb9446SMika Westerberg 8733cdb9446SMika Westerberg static int nhi_suspend_noirq(struct device *dev) 8743cdb9446SMika Westerberg { 8753cdb9446SMika Westerberg return __nhi_suspend_noirq(dev, device_may_wakeup(dev)); 8763cdb9446SMika Westerberg } 8773cdb9446SMika Westerberg 878884e4d57SMika Westerberg static int nhi_freeze_noirq(struct device *dev) 879884e4d57SMika Westerberg { 880884e4d57SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 881884e4d57SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 882884e4d57SMika Westerberg 883884e4d57SMika Westerberg return tb_domain_freeze_noirq(tb); 884884e4d57SMika Westerberg } 885884e4d57SMika Westerberg 886884e4d57SMika Westerberg static int nhi_thaw_noirq(struct device *dev) 887884e4d57SMika Westerberg { 888884e4d57SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 889884e4d57SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 890884e4d57SMika Westerberg 891884e4d57SMika Westerberg return tb_domain_thaw_noirq(tb); 892884e4d57SMika Westerberg } 893884e4d57SMika Westerberg 8943cdb9446SMika Westerberg static bool nhi_wake_supported(struct pci_dev *pdev) 8953cdb9446SMika Westerberg { 8963cdb9446SMika Westerberg u8 val; 8973cdb9446SMika Westerberg 8983cdb9446SMika Westerberg /* 8993cdb9446SMika Westerberg * If power rails are sustainable for wakeup from S4 this 9003cdb9446SMika Westerberg * property is set by the BIOS. 9013cdb9446SMika Westerberg */ 9023cdb9446SMika Westerberg if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val)) 9033cdb9446SMika Westerberg return !!val; 9043cdb9446SMika Westerberg 9053cdb9446SMika Westerberg return true; 9063cdb9446SMika Westerberg } 9073cdb9446SMika Westerberg 9083cdb9446SMika Westerberg static int nhi_poweroff_noirq(struct device *dev) 9093cdb9446SMika Westerberg { 9103cdb9446SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 9113cdb9446SMika Westerberg bool wakeup; 9123cdb9446SMika Westerberg 9133cdb9446SMika Westerberg wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev); 9143cdb9446SMika Westerberg return __nhi_suspend_noirq(dev, wakeup); 91523dd5bb4SAndreas Noever } 91623dd5bb4SAndreas Noever 9178c6bba10SMika Westerberg static void nhi_enable_int_throttling(struct tb_nhi *nhi) 9188c6bba10SMika Westerberg { 9198c6bba10SMika Westerberg /* Throttling is specified in 256ns increments */ 9208c6bba10SMika Westerberg u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256); 9218c6bba10SMika Westerberg unsigned int i; 9228c6bba10SMika Westerberg 9238c6bba10SMika Westerberg /* 9248c6bba10SMika Westerberg * Configure interrupt throttling for all vectors even if we 9258c6bba10SMika Westerberg * only use few. 9268c6bba10SMika Westerberg */ 9278c6bba10SMika Westerberg for (i = 0; i < MSIX_MAX_VECS; i++) { 9288c6bba10SMika Westerberg u32 reg = REG_INT_THROTTLING_RATE + i * 4; 9298c6bba10SMika Westerberg iowrite32(throttle, nhi->iobase + reg); 9308c6bba10SMika Westerberg } 9318c6bba10SMika Westerberg } 9328c6bba10SMika Westerberg 93323dd5bb4SAndreas Noever static int nhi_resume_noirq(struct device *dev) 93423dd5bb4SAndreas Noever { 93523dd5bb4SAndreas Noever struct pci_dev *pdev = to_pci_dev(dev); 93623dd5bb4SAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 9373cdb9446SMika Westerberg struct tb_nhi *nhi = tb->nhi; 9383cdb9446SMika Westerberg int ret; 9399d3cce0bSMika Westerberg 940bdccf295SMika Westerberg /* 941bdccf295SMika Westerberg * Check that the device is still there. It may be that the user 942bdccf295SMika Westerberg * unplugged last device which causes the host controller to go 943bdccf295SMika Westerberg * away on PCs. 944bdccf295SMika Westerberg */ 9453cdb9446SMika Westerberg if (!pci_device_is_present(pdev)) { 9463cdb9446SMika Westerberg nhi->going_away = true; 9473cdb9446SMika Westerberg } else { 9483cdb9446SMika Westerberg if (nhi->ops && nhi->ops->resume_noirq) { 9493cdb9446SMika Westerberg ret = nhi->ops->resume_noirq(nhi); 9503cdb9446SMika Westerberg if (ret) 9513cdb9446SMika Westerberg return ret; 9523cdb9446SMika Westerberg } 9538c6bba10SMika Westerberg nhi_enable_int_throttling(tb->nhi); 9543cdb9446SMika Westerberg } 955bdccf295SMika Westerberg 9569d3cce0bSMika Westerberg return tb_domain_resume_noirq(tb); 95723dd5bb4SAndreas Noever } 95823dd5bb4SAndreas Noever 959f67cf491SMika Westerberg static int nhi_suspend(struct device *dev) 960f67cf491SMika Westerberg { 961f67cf491SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 962f67cf491SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 963f67cf491SMika Westerberg 964f67cf491SMika Westerberg return tb_domain_suspend(tb); 965f67cf491SMika Westerberg } 966f67cf491SMika Westerberg 967f67cf491SMika Westerberg static void nhi_complete(struct device *dev) 968f67cf491SMika Westerberg { 969f67cf491SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 970f67cf491SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 971f67cf491SMika Westerberg 9722d8ff0b5SMika Westerberg /* 9732d8ff0b5SMika Westerberg * If we were runtime suspended when system suspend started, 9742d8ff0b5SMika Westerberg * schedule runtime resume now. It should bring the domain back 9752d8ff0b5SMika Westerberg * to functional state. 9762d8ff0b5SMika Westerberg */ 9772d8ff0b5SMika Westerberg if (pm_runtime_suspended(&pdev->dev)) 9782d8ff0b5SMika Westerberg pm_runtime_resume(&pdev->dev); 9792d8ff0b5SMika Westerberg else 980f67cf491SMika Westerberg tb_domain_complete(tb); 981f67cf491SMika Westerberg } 982f67cf491SMika Westerberg 9832d8ff0b5SMika Westerberg static int nhi_runtime_suspend(struct device *dev) 9842d8ff0b5SMika Westerberg { 9852d8ff0b5SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 9862d8ff0b5SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 9873cdb9446SMika Westerberg struct tb_nhi *nhi = tb->nhi; 9883cdb9446SMika Westerberg int ret; 9892d8ff0b5SMika Westerberg 9903cdb9446SMika Westerberg ret = tb_domain_runtime_suspend(tb); 9913cdb9446SMika Westerberg if (ret) 9923cdb9446SMika Westerberg return ret; 9933cdb9446SMika Westerberg 9943cdb9446SMika Westerberg if (nhi->ops && nhi->ops->runtime_suspend) { 9953cdb9446SMika Westerberg ret = nhi->ops->runtime_suspend(tb->nhi); 9963cdb9446SMika Westerberg if (ret) 9973cdb9446SMika Westerberg return ret; 9983cdb9446SMika Westerberg } 9993cdb9446SMika Westerberg return 0; 10002d8ff0b5SMika Westerberg } 10012d8ff0b5SMika Westerberg 10022d8ff0b5SMika Westerberg static int nhi_runtime_resume(struct device *dev) 10032d8ff0b5SMika Westerberg { 10042d8ff0b5SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 10052d8ff0b5SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 10063cdb9446SMika Westerberg struct tb_nhi *nhi = tb->nhi; 10073cdb9446SMika Westerberg int ret; 10082d8ff0b5SMika Westerberg 10093cdb9446SMika Westerberg if (nhi->ops && nhi->ops->runtime_resume) { 10103cdb9446SMika Westerberg ret = nhi->ops->runtime_resume(nhi); 10113cdb9446SMika Westerberg if (ret) 10123cdb9446SMika Westerberg return ret; 10133cdb9446SMika Westerberg } 10143cdb9446SMika Westerberg 10153cdb9446SMika Westerberg nhi_enable_int_throttling(nhi); 10162d8ff0b5SMika Westerberg return tb_domain_runtime_resume(tb); 10172d8ff0b5SMika Westerberg } 10182d8ff0b5SMika Westerberg 101916603153SAndreas Noever static void nhi_shutdown(struct tb_nhi *nhi) 102016603153SAndreas Noever { 102116603153SAndreas Noever int i; 1022daa5140fSMika Westerberg 1023daa5140fSMika Westerberg dev_dbg(&nhi->pdev->dev, "shutdown\n"); 102416603153SAndreas Noever 102516603153SAndreas Noever for (i = 0; i < nhi->hop_count; i++) { 102616603153SAndreas Noever if (nhi->tx_rings[i]) 102716603153SAndreas Noever dev_WARN(&nhi->pdev->dev, 102816603153SAndreas Noever "TX ring %d is still active\n", i); 102916603153SAndreas Noever if (nhi->rx_rings[i]) 103016603153SAndreas Noever dev_WARN(&nhi->pdev->dev, 103116603153SAndreas Noever "RX ring %d is still active\n", i); 103216603153SAndreas Noever } 103316603153SAndreas Noever nhi_disable_interrupts(nhi); 103416603153SAndreas Noever /* 103516603153SAndreas Noever * We have to release the irq before calling flush_work. Otherwise an 103616603153SAndreas Noever * already executing IRQ handler could call schedule_work again. 103716603153SAndreas Noever */ 1038046bee1fSMika Westerberg if (!nhi->pdev->msix_enabled) { 103916603153SAndreas Noever devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); 104016603153SAndreas Noever flush_work(&nhi->interrupt_work); 1041046bee1fSMika Westerberg } 1042046bee1fSMika Westerberg ida_destroy(&nhi->msix_ida); 10433cdb9446SMika Westerberg 10443cdb9446SMika Westerberg if (nhi->ops && nhi->ops->shutdown) 10453cdb9446SMika Westerberg nhi->ops->shutdown(nhi); 1046046bee1fSMika Westerberg } 1047046bee1fSMika Westerberg 1048046bee1fSMika Westerberg static int nhi_init_msi(struct tb_nhi *nhi) 1049046bee1fSMika Westerberg { 1050046bee1fSMika Westerberg struct pci_dev *pdev = nhi->pdev; 1051046bee1fSMika Westerberg int res, irq, nvec; 1052046bee1fSMika Westerberg 1053046bee1fSMika Westerberg /* In case someone left them on. */ 1054046bee1fSMika Westerberg nhi_disable_interrupts(nhi); 1055046bee1fSMika Westerberg 10568c6bba10SMika Westerberg nhi_enable_int_throttling(nhi); 10578c6bba10SMika Westerberg 1058046bee1fSMika Westerberg ida_init(&nhi->msix_ida); 1059046bee1fSMika Westerberg 1060046bee1fSMika Westerberg /* 1061046bee1fSMika Westerberg * The NHI has 16 MSI-X vectors or a single MSI. We first try to 1062046bee1fSMika Westerberg * get all MSI-X vectors and if we succeed, each ring will have 1063046bee1fSMika Westerberg * one MSI-X. If for some reason that does not work out, we 1064046bee1fSMika Westerberg * fallback to a single MSI. 1065046bee1fSMika Westerberg */ 1066046bee1fSMika Westerberg nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS, 1067046bee1fSMika Westerberg PCI_IRQ_MSIX); 1068046bee1fSMika Westerberg if (nvec < 0) { 1069046bee1fSMika Westerberg nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 1070046bee1fSMika Westerberg if (nvec < 0) 1071046bee1fSMika Westerberg return nvec; 1072046bee1fSMika Westerberg 1073046bee1fSMika Westerberg INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); 1074046bee1fSMika Westerberg 1075046bee1fSMika Westerberg irq = pci_irq_vector(nhi->pdev, 0); 1076046bee1fSMika Westerberg if (irq < 0) 1077046bee1fSMika Westerberg return irq; 1078046bee1fSMika Westerberg 1079046bee1fSMika Westerberg res = devm_request_irq(&pdev->dev, irq, nhi_msi, 1080046bee1fSMika Westerberg IRQF_NO_SUSPEND, "thunderbolt", nhi); 1081046bee1fSMika Westerberg if (res) { 1082046bee1fSMika Westerberg dev_err(&pdev->dev, "request_irq failed, aborting\n"); 1083046bee1fSMika Westerberg return res; 1084046bee1fSMika Westerberg } 1085046bee1fSMika Westerberg } 1086046bee1fSMika Westerberg 1087046bee1fSMika Westerberg return 0; 108816603153SAndreas Noever } 108916603153SAndreas Noever 10903cdb9446SMika Westerberg static bool nhi_imr_valid(struct pci_dev *pdev) 10913cdb9446SMika Westerberg { 10923cdb9446SMika Westerberg u8 val; 10933cdb9446SMika Westerberg 10943cdb9446SMika Westerberg if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val)) 10953cdb9446SMika Westerberg return !!val; 10963cdb9446SMika Westerberg 10973cdb9446SMika Westerberg return true; 10983cdb9446SMika Westerberg } 10993cdb9446SMika Westerberg 11002b9941e0SMika Westerberg /* 11012b9941e0SMika Westerberg * During suspend the Thunderbolt controller is reset and all PCIe 11022b9941e0SMika Westerberg * tunnels are lost. The NHI driver will try to reestablish all tunnels 11032b9941e0SMika Westerberg * during resume. This adds device links between the tunneled PCIe 11042b9941e0SMika Westerberg * downstream ports and the NHI so that the device core will make sure 11052b9941e0SMika Westerberg * NHI is resumed first before the rest. 11062b9941e0SMika Westerberg */ 11072b9941e0SMika Westerberg static void tb_apple_add_links(struct tb_nhi *nhi) 11082b9941e0SMika Westerberg { 11092b9941e0SMika Westerberg struct pci_dev *upstream, *pdev; 11102b9941e0SMika Westerberg 11112b9941e0SMika Westerberg if (!x86_apple_machine) 11122b9941e0SMika Westerberg return; 11132b9941e0SMika Westerberg 11142b9941e0SMika Westerberg switch (nhi->pdev->device) { 11152b9941e0SMika Westerberg case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 11162b9941e0SMika Westerberg case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 11172b9941e0SMika Westerberg case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: 11182b9941e0SMika Westerberg case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: 11192b9941e0SMika Westerberg break; 11202b9941e0SMika Westerberg default: 11212b9941e0SMika Westerberg return; 11222b9941e0SMika Westerberg } 11232b9941e0SMika Westerberg 11242b9941e0SMika Westerberg upstream = pci_upstream_bridge(nhi->pdev); 11252b9941e0SMika Westerberg while (upstream) { 11262b9941e0SMika Westerberg if (!pci_is_pcie(upstream)) 11272b9941e0SMika Westerberg return; 11282b9941e0SMika Westerberg if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM) 11292b9941e0SMika Westerberg break; 11302b9941e0SMika Westerberg upstream = pci_upstream_bridge(upstream); 11312b9941e0SMika Westerberg } 11322b9941e0SMika Westerberg 11332b9941e0SMika Westerberg if (!upstream) 11342b9941e0SMika Westerberg return; 11352b9941e0SMika Westerberg 11362b9941e0SMika Westerberg /* 11372b9941e0SMika Westerberg * For each hotplug downstream port, create add device link 11382b9941e0SMika Westerberg * back to NHI so that PCIe tunnels can be re-established after 11392b9941e0SMika Westerberg * sleep. 11402b9941e0SMika Westerberg */ 11412b9941e0SMika Westerberg for_each_pci_bridge(pdev, upstream->subordinate) { 11422b9941e0SMika Westerberg const struct device_link *link; 11432b9941e0SMika Westerberg 11442b9941e0SMika Westerberg if (!pci_is_pcie(pdev)) 11452b9941e0SMika Westerberg continue; 11462b9941e0SMika Westerberg if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM || 11472b9941e0SMika Westerberg !pdev->is_hotplug_bridge) 11482b9941e0SMika Westerberg continue; 11492b9941e0SMika Westerberg 11502b9941e0SMika Westerberg link = device_link_add(&pdev->dev, &nhi->pdev->dev, 11512b9941e0SMika Westerberg DL_FLAG_AUTOREMOVE_SUPPLIER | 11522b9941e0SMika Westerberg DL_FLAG_PM_RUNTIME); 11532b9941e0SMika Westerberg if (link) { 11542b9941e0SMika Westerberg dev_dbg(&nhi->pdev->dev, "created link from %s\n", 11552b9941e0SMika Westerberg dev_name(&pdev->dev)); 11562b9941e0SMika Westerberg } else { 11572b9941e0SMika Westerberg dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", 11582b9941e0SMika Westerberg dev_name(&pdev->dev)); 11592b9941e0SMika Westerberg } 11602b9941e0SMika Westerberg } 11612b9941e0SMika Westerberg } 11622b9941e0SMika Westerberg 116316603153SAndreas Noever static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) 116416603153SAndreas Noever { 116516603153SAndreas Noever struct tb_nhi *nhi; 1166d6cc51cdSAndreas Noever struct tb *tb; 116716603153SAndreas Noever int res; 116816603153SAndreas Noever 11693cdb9446SMika Westerberg if (!nhi_imr_valid(pdev)) { 11703cdb9446SMika Westerberg dev_warn(&pdev->dev, "firmware image not valid, aborting\n"); 11713cdb9446SMika Westerberg return -ENODEV; 11723cdb9446SMika Westerberg } 11733cdb9446SMika Westerberg 117416603153SAndreas Noever res = pcim_enable_device(pdev); 117516603153SAndreas Noever if (res) { 117616603153SAndreas Noever dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); 117716603153SAndreas Noever return res; 117816603153SAndreas Noever } 117916603153SAndreas Noever 118016603153SAndreas Noever res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt"); 118116603153SAndreas Noever if (res) { 118216603153SAndreas Noever dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n"); 118316603153SAndreas Noever return res; 118416603153SAndreas Noever } 118516603153SAndreas Noever 118616603153SAndreas Noever nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); 118716603153SAndreas Noever if (!nhi) 118816603153SAndreas Noever return -ENOMEM; 118916603153SAndreas Noever 119016603153SAndreas Noever nhi->pdev = pdev; 11913cdb9446SMika Westerberg nhi->ops = (const struct tb_nhi_ops *)id->driver_data; 119216603153SAndreas Noever /* cannot fail - table is allocated bin pcim_iomap_regions */ 119316603153SAndreas Noever nhi->iobase = pcim_iomap_table(pdev)[0]; 119416603153SAndreas Noever nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; 1195177aa362SMika Westerberg dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count); 119616603153SAndreas Noever 11972a211f32SHimangi Saraogi nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, 11982a211f32SHimangi Saraogi sizeof(*nhi->tx_rings), GFP_KERNEL); 11992a211f32SHimangi Saraogi nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, 12002a211f32SHimangi Saraogi sizeof(*nhi->rx_rings), GFP_KERNEL); 120116603153SAndreas Noever if (!nhi->tx_rings || !nhi->rx_rings) 120216603153SAndreas Noever return -ENOMEM; 120316603153SAndreas Noever 1204046bee1fSMika Westerberg res = nhi_init_msi(nhi); 120516603153SAndreas Noever if (res) { 1206046bee1fSMika Westerberg dev_err(&pdev->dev, "cannot enable MSI, aborting\n"); 120716603153SAndreas Noever return res; 120816603153SAndreas Noever } 120916603153SAndreas Noever 121059120e06SMika Westerberg spin_lock_init(&nhi->lock); 121116603153SAndreas Noever 1212dba3caf6SMika Westerberg res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1213dba3caf6SMika Westerberg if (res) 1214dba3caf6SMika Westerberg res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1215dba3caf6SMika Westerberg if (res) { 1216dba3caf6SMika Westerberg dev_err(&pdev->dev, "failed to set DMA mask\n"); 1217dba3caf6SMika Westerberg return res; 1218dba3caf6SMika Westerberg } 1219dba3caf6SMika Westerberg 122016603153SAndreas Noever pci_set_master(pdev); 122116603153SAndreas Noever 12223cdb9446SMika Westerberg if (nhi->ops && nhi->ops->init) { 12233cdb9446SMika Westerberg res = nhi->ops->init(nhi); 12243cdb9446SMika Westerberg if (res) 12253cdb9446SMika Westerberg return res; 12263cdb9446SMika Westerberg } 12273cdb9446SMika Westerberg 12282b9941e0SMika Westerberg tb_apple_add_links(nhi); 1229b2be2b05SMika Westerberg tb_acpi_add_links(nhi); 12302b9941e0SMika Westerberg 1231f67cf491SMika Westerberg tb = icm_probe(nhi); 12329d3cce0bSMika Westerberg if (!tb) 1233f67cf491SMika Westerberg tb = tb_probe(nhi); 1234f67cf491SMika Westerberg if (!tb) { 1235f67cf491SMika Westerberg dev_err(&nhi->pdev->dev, 1236f67cf491SMika Westerberg "failed to determine connection manager, aborting\n"); 12379d3cce0bSMika Westerberg return -ENODEV; 1238f67cf491SMika Westerberg } 1239f67cf491SMika Westerberg 1240daa5140fSMika Westerberg dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); 12419d3cce0bSMika Westerberg 12429d3cce0bSMika Westerberg res = tb_domain_add(tb); 12439d3cce0bSMika Westerberg if (res) { 1244d6cc51cdSAndreas Noever /* 1245d6cc51cdSAndreas Noever * At this point the RX/TX rings might already have been 1246d6cc51cdSAndreas Noever * activated. Do a proper shutdown. 1247d6cc51cdSAndreas Noever */ 12489d3cce0bSMika Westerberg tb_domain_put(tb); 1249d6cc51cdSAndreas Noever nhi_shutdown(nhi); 125068a7a2acSMika Westerberg return res; 1251d6cc51cdSAndreas Noever } 1252d6cc51cdSAndreas Noever pci_set_drvdata(pdev, tb); 125316603153SAndreas Noever 1254b2911a59SMika Westerberg device_wakeup_enable(&pdev->dev); 1255b2911a59SMika Westerberg 12562d8ff0b5SMika Westerberg pm_runtime_allow(&pdev->dev); 12572d8ff0b5SMika Westerberg pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY); 12582d8ff0b5SMika Westerberg pm_runtime_use_autosuspend(&pdev->dev); 12592d8ff0b5SMika Westerberg pm_runtime_put_autosuspend(&pdev->dev); 12602d8ff0b5SMika Westerberg 126116603153SAndreas Noever return 0; 126216603153SAndreas Noever } 126316603153SAndreas Noever 126416603153SAndreas Noever static void nhi_remove(struct pci_dev *pdev) 126516603153SAndreas Noever { 1266d6cc51cdSAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 1267d6cc51cdSAndreas Noever struct tb_nhi *nhi = tb->nhi; 12689d3cce0bSMika Westerberg 12692d8ff0b5SMika Westerberg pm_runtime_get_sync(&pdev->dev); 12702d8ff0b5SMika Westerberg pm_runtime_dont_use_autosuspend(&pdev->dev); 12712d8ff0b5SMika Westerberg pm_runtime_forbid(&pdev->dev); 12722d8ff0b5SMika Westerberg 12739d3cce0bSMika Westerberg tb_domain_remove(tb); 127416603153SAndreas Noever nhi_shutdown(nhi); 127516603153SAndreas Noever } 127616603153SAndreas Noever 127723dd5bb4SAndreas Noever /* 127823dd5bb4SAndreas Noever * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable 127923dd5bb4SAndreas Noever * the tunnels asap. A corresponding pci quirk blocks the downstream bridges 128023dd5bb4SAndreas Noever * resume_noirq until we are done. 128123dd5bb4SAndreas Noever */ 128223dd5bb4SAndreas Noever static const struct dev_pm_ops nhi_pm_ops = { 128323dd5bb4SAndreas Noever .suspend_noirq = nhi_suspend_noirq, 128423dd5bb4SAndreas Noever .resume_noirq = nhi_resume_noirq, 1285884e4d57SMika Westerberg .freeze_noirq = nhi_freeze_noirq, /* 128623dd5bb4SAndreas Noever * we just disable hotplug, the 128723dd5bb4SAndreas Noever * pci-tunnels stay alive. 128823dd5bb4SAndreas Noever */ 1289884e4d57SMika Westerberg .thaw_noirq = nhi_thaw_noirq, 129023dd5bb4SAndreas Noever .restore_noirq = nhi_resume_noirq, 1291f67cf491SMika Westerberg .suspend = nhi_suspend, 12923cdb9446SMika Westerberg .poweroff_noirq = nhi_poweroff_noirq, 1293f67cf491SMika Westerberg .poweroff = nhi_suspend, 1294f67cf491SMika Westerberg .complete = nhi_complete, 12952d8ff0b5SMika Westerberg .runtime_suspend = nhi_runtime_suspend, 12962d8ff0b5SMika Westerberg .runtime_resume = nhi_runtime_resume, 129723dd5bb4SAndreas Noever }; 129823dd5bb4SAndreas Noever 1299620863f7SSachin Kamat static struct pci_device_id nhi_ids[] = { 130016603153SAndreas Noever /* 130116603153SAndreas Noever * We have to specify class, the TB bridges use the same device and 13021d111406SLukas Wunner * vendor (sub)id on gen 1 and gen 2 controllers. 130316603153SAndreas Noever */ 130416603153SAndreas Noever { 130516603153SAndreas Noever .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 13061d111406SLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 130719bf4d4fSLukas Wunner .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, 130819bf4d4fSLukas Wunner .subvendor = 0x2222, .subdevice = 0x1111, 130919bf4d4fSLukas Wunner }, 131019bf4d4fSLukas Wunner { 131119bf4d4fSLukas Wunner .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 131219bf4d4fSLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 13131d111406SLukas Wunner .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, 131416603153SAndreas Noever .subvendor = 0x2222, .subdevice = 0x1111, 131516603153SAndreas Noever }, 131616603153SAndreas Noever { 131716603153SAndreas Noever .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 13181d111406SLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 131982a6a81cSXavier Gnata .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI, 132082a6a81cSXavier Gnata .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 132182a6a81cSXavier Gnata }, 132282a6a81cSXavier Gnata { 132382a6a81cSXavier Gnata .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 132482a6a81cSXavier Gnata .vendor = PCI_VENDOR_ID_INTEL, 13251d111406SLukas Wunner .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, 1326a42fb351SKnuth Posern .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 132716603153SAndreas Noever }, 13285e2781bcSMika Westerberg 13295e2781bcSMika Westerberg /* Thunderbolt 3 */ 13305e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) }, 13315e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) }, 13325e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) }, 13335e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) }, 13345e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) }, 13355e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) }, 13365e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) }, 13375e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, 13384bac471dSRadion Mirchevsky { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) }, 13394bac471dSRadion Mirchevsky { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) }, 13403cdb9446SMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0), 13413cdb9446SMika Westerberg .driver_data = (kernel_ulong_t)&icl_nhi_ops }, 13423cdb9446SMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1), 13433cdb9446SMika Westerberg .driver_data = (kernel_ulong_t)&icl_nhi_ops }, 134457d8df68SMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0), 134557d8df68SMika Westerberg .driver_data = (kernel_ulong_t)&icl_nhi_ops }, 134657d8df68SMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1), 134757d8df68SMika Westerberg .driver_data = (kernel_ulong_t)&icl_nhi_ops }, 13485e2781bcSMika Westerberg 1349b0407983SMika Westerberg /* Any USB4 compliant host */ 1350b0407983SMika Westerberg { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) }, 1351b0407983SMika Westerberg 135216603153SAndreas Noever { 0,} 135316603153SAndreas Noever }; 135416603153SAndreas Noever 135516603153SAndreas Noever MODULE_DEVICE_TABLE(pci, nhi_ids); 135616603153SAndreas Noever MODULE_LICENSE("GPL"); 135716603153SAndreas Noever 135816603153SAndreas Noever static struct pci_driver nhi_driver = { 135916603153SAndreas Noever .name = "thunderbolt", 136016603153SAndreas Noever .id_table = nhi_ids, 136116603153SAndreas Noever .probe = nhi_probe, 136216603153SAndreas Noever .remove = nhi_remove, 13634caf2511SMaxim Levitsky .shutdown = nhi_remove, 136423dd5bb4SAndreas Noever .driver.pm = &nhi_pm_ops, 136516603153SAndreas Noever }; 136616603153SAndreas Noever 136716603153SAndreas Noever static int __init nhi_init(void) 136816603153SAndreas Noever { 13699d3cce0bSMika Westerberg int ret; 13709d3cce0bSMika Westerberg 13719d3cce0bSMika Westerberg ret = tb_domain_init(); 13729d3cce0bSMika Westerberg if (ret) 13739d3cce0bSMika Westerberg return ret; 13749d3cce0bSMika Westerberg ret = pci_register_driver(&nhi_driver); 13759d3cce0bSMika Westerberg if (ret) 13769d3cce0bSMika Westerberg tb_domain_exit(); 13779d3cce0bSMika Westerberg return ret; 137816603153SAndreas Noever } 137916603153SAndreas Noever 138016603153SAndreas Noever static void __exit nhi_unload(void) 138116603153SAndreas Noever { 138216603153SAndreas Noever pci_unregister_driver(&nhi_driver); 13839d3cce0bSMika Westerberg tb_domain_exit(); 138416603153SAndreas Noever } 138516603153SAndreas Noever 1386eafa717bSMika Westerberg rootfs_initcall(nhi_init); 138716603153SAndreas Noever module_exit(nhi_unload); 1388