109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 216603153SAndreas Noever /* 315c6784cSMika Westerberg * Thunderbolt driver - NHI driver 416603153SAndreas Noever * 516603153SAndreas Noever * The NHI (native host interface) is the pci device that allows us to send and 616603153SAndreas Noever * receive frames from the thunderbolt bus. 716603153SAndreas Noever * 816603153SAndreas Noever * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 915c6784cSMika Westerberg * Copyright (C) 2018, Intel Corporation 1016603153SAndreas Noever */ 1116603153SAndreas Noever 1223dd5bb4SAndreas Noever #include <linux/pm_runtime.h> 1316603153SAndreas Noever #include <linux/slab.h> 1416603153SAndreas Noever #include <linux/errno.h> 1516603153SAndreas Noever #include <linux/pci.h> 1616603153SAndreas Noever #include <linux/interrupt.h> 1716603153SAndreas Noever #include <linux/module.h> 18cd446ee2SMika Westerberg #include <linux/delay.h> 193cdb9446SMika Westerberg #include <linux/property.h> 202b9941e0SMika Westerberg #include <linux/platform_data/x86/apple.h> 2116603153SAndreas Noever 2216603153SAndreas Noever #include "nhi.h" 2316603153SAndreas Noever #include "nhi_regs.h" 24d6cc51cdSAndreas Noever #include "tb.h" 2516603153SAndreas Noever 2616603153SAndreas Noever #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") 2716603153SAndreas Noever 2853f13319SMika Westerberg #define RING_FIRST_USABLE_HOPID 1 299fb1e654SMika Westerberg 309fb1e654SMika Westerberg /* 31046bee1fSMika Westerberg * Minimal number of vectors when we use MSI-X. Two for control channel 32046bee1fSMika Westerberg * Rx/Tx and the rest four are for cross domain DMA paths. 33046bee1fSMika Westerberg */ 34046bee1fSMika Westerberg #define MSIX_MIN_VECS 6 35046bee1fSMika Westerberg #define MSIX_MAX_VECS 16 3616603153SAndreas Noever 37cd446ee2SMika Westerberg #define NHI_MAILBOX_TIMEOUT 500 /* ms */ 38cd446ee2SMika Westerberg 3916603153SAndreas Noever static int ring_interrupt_index(struct tb_ring *ring) 4016603153SAndreas Noever { 4116603153SAndreas Noever int bit = ring->hop; 4216603153SAndreas Noever if (!ring->is_tx) 4316603153SAndreas Noever bit += ring->nhi->hop_count; 4416603153SAndreas Noever return bit; 4516603153SAndreas Noever } 4616603153SAndreas Noever 4716603153SAndreas Noever /** 4816603153SAndreas Noever * ring_interrupt_active() - activate/deactivate interrupts for a single ring 4916603153SAndreas Noever * 5016603153SAndreas Noever * ring->nhi->lock must be held. 5116603153SAndreas Noever */ 5216603153SAndreas Noever static void ring_interrupt_active(struct tb_ring *ring, bool active) 5316603153SAndreas Noever { 5419bf4d4fSLukas Wunner int reg = REG_RING_INTERRUPT_BASE + 5519bf4d4fSLukas Wunner ring_interrupt_index(ring) / 32 * 4; 5616603153SAndreas Noever int bit = ring_interrupt_index(ring) & 31; 5716603153SAndreas Noever int mask = 1 << bit; 5816603153SAndreas Noever u32 old, new; 59046bee1fSMika Westerberg 60046bee1fSMika Westerberg if (ring->irq > 0) { 61046bee1fSMika Westerberg u32 step, shift, ivr, misc; 62046bee1fSMika Westerberg void __iomem *ivr_base; 63046bee1fSMika Westerberg int index; 64046bee1fSMika Westerberg 65046bee1fSMika Westerberg if (ring->is_tx) 66046bee1fSMika Westerberg index = ring->hop; 67046bee1fSMika Westerberg else 68046bee1fSMika Westerberg index = ring->hop + ring->nhi->hop_count; 69046bee1fSMika Westerberg 70046bee1fSMika Westerberg /* 71046bee1fSMika Westerberg * Ask the hardware to clear interrupt status bits automatically 72046bee1fSMika Westerberg * since we already know which interrupt was triggered. 73046bee1fSMika Westerberg */ 74046bee1fSMika Westerberg misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); 75046bee1fSMika Westerberg if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) { 76046bee1fSMika Westerberg misc |= REG_DMA_MISC_INT_AUTO_CLEAR; 77046bee1fSMika Westerberg iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); 78046bee1fSMika Westerberg } 79046bee1fSMika Westerberg 80046bee1fSMika Westerberg ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; 81046bee1fSMika Westerberg step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; 82046bee1fSMika Westerberg shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; 83046bee1fSMika Westerberg ivr = ioread32(ivr_base + step); 84046bee1fSMika Westerberg ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift); 85046bee1fSMika Westerberg if (active) 86046bee1fSMika Westerberg ivr |= ring->vector << shift; 87046bee1fSMika Westerberg iowrite32(ivr, ivr_base + step); 88046bee1fSMika Westerberg } 89046bee1fSMika Westerberg 9016603153SAndreas Noever old = ioread32(ring->nhi->iobase + reg); 9116603153SAndreas Noever if (active) 9216603153SAndreas Noever new = old | mask; 9316603153SAndreas Noever else 9416603153SAndreas Noever new = old & ~mask; 9516603153SAndreas Noever 96daa5140fSMika Westerberg dev_dbg(&ring->nhi->pdev->dev, 9716603153SAndreas Noever "%s interrupt at register %#x bit %d (%#x -> %#x)\n", 9816603153SAndreas Noever active ? "enabling" : "disabling", reg, bit, old, new); 9916603153SAndreas Noever 10016603153SAndreas Noever if (new == old) 10116603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, 10216603153SAndreas Noever "interrupt for %s %d is already %s\n", 10316603153SAndreas Noever RING_TYPE(ring), ring->hop, 10416603153SAndreas Noever active ? "enabled" : "disabled"); 10516603153SAndreas Noever iowrite32(new, ring->nhi->iobase + reg); 10616603153SAndreas Noever } 10716603153SAndreas Noever 10816603153SAndreas Noever /** 10916603153SAndreas Noever * nhi_disable_interrupts() - disable interrupts for all rings 11016603153SAndreas Noever * 11116603153SAndreas Noever * Use only during init and shutdown. 11216603153SAndreas Noever */ 11316603153SAndreas Noever static void nhi_disable_interrupts(struct tb_nhi *nhi) 11416603153SAndreas Noever { 11516603153SAndreas Noever int i = 0; 11616603153SAndreas Noever /* disable interrupts */ 11716603153SAndreas Noever for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) 11816603153SAndreas Noever iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); 11916603153SAndreas Noever 12016603153SAndreas Noever /* clear interrupt status bits */ 12116603153SAndreas Noever for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) 12216603153SAndreas Noever ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); 12316603153SAndreas Noever } 12416603153SAndreas Noever 12516603153SAndreas Noever /* ring helper methods */ 12616603153SAndreas Noever 12716603153SAndreas Noever static void __iomem *ring_desc_base(struct tb_ring *ring) 12816603153SAndreas Noever { 12916603153SAndreas Noever void __iomem *io = ring->nhi->iobase; 13016603153SAndreas Noever io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; 13116603153SAndreas Noever io += ring->hop * 16; 13216603153SAndreas Noever return io; 13316603153SAndreas Noever } 13416603153SAndreas Noever 13516603153SAndreas Noever static void __iomem *ring_options_base(struct tb_ring *ring) 13616603153SAndreas Noever { 13716603153SAndreas Noever void __iomem *io = ring->nhi->iobase; 13816603153SAndreas Noever io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; 13916603153SAndreas Noever io += ring->hop * 32; 14016603153SAndreas Noever return io; 14116603153SAndreas Noever } 14216603153SAndreas Noever 14394379521SMika Westerberg static void ring_iowrite_cons(struct tb_ring *ring, u16 cons) 14416603153SAndreas Noever { 14594379521SMika Westerberg /* 14694379521SMika Westerberg * The other 16-bits in the register is read-only and writes to it 14794379521SMika Westerberg * are ignored by the hardware so we can save one ioread32() by 14894379521SMika Westerberg * filling the read-only bits with zeroes. 14994379521SMika Westerberg */ 15094379521SMika Westerberg iowrite32(cons, ring_desc_base(ring) + 8); 15194379521SMika Westerberg } 15294379521SMika Westerberg 15394379521SMika Westerberg static void ring_iowrite_prod(struct tb_ring *ring, u16 prod) 15494379521SMika Westerberg { 15594379521SMika Westerberg /* See ring_iowrite_cons() above for explanation */ 15694379521SMika Westerberg iowrite32(prod << 16, ring_desc_base(ring) + 8); 15716603153SAndreas Noever } 15816603153SAndreas Noever 15916603153SAndreas Noever static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) 16016603153SAndreas Noever { 16116603153SAndreas Noever iowrite32(value, ring_desc_base(ring) + offset); 16216603153SAndreas Noever } 16316603153SAndreas Noever 16416603153SAndreas Noever static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) 16516603153SAndreas Noever { 16616603153SAndreas Noever iowrite32(value, ring_desc_base(ring) + offset); 16716603153SAndreas Noever iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); 16816603153SAndreas Noever } 16916603153SAndreas Noever 17016603153SAndreas Noever static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) 17116603153SAndreas Noever { 17216603153SAndreas Noever iowrite32(value, ring_options_base(ring) + offset); 17316603153SAndreas Noever } 17416603153SAndreas Noever 17516603153SAndreas Noever static bool ring_full(struct tb_ring *ring) 17616603153SAndreas Noever { 17716603153SAndreas Noever return ((ring->head + 1) % ring->size) == ring->tail; 17816603153SAndreas Noever } 17916603153SAndreas Noever 18016603153SAndreas Noever static bool ring_empty(struct tb_ring *ring) 18116603153SAndreas Noever { 18216603153SAndreas Noever return ring->head == ring->tail; 18316603153SAndreas Noever } 18416603153SAndreas Noever 18516603153SAndreas Noever /** 18616603153SAndreas Noever * ring_write_descriptors() - post frames from ring->queue to the controller 18716603153SAndreas Noever * 18816603153SAndreas Noever * ring->lock is held. 18916603153SAndreas Noever */ 19016603153SAndreas Noever static void ring_write_descriptors(struct tb_ring *ring) 19116603153SAndreas Noever { 19216603153SAndreas Noever struct ring_frame *frame, *n; 19316603153SAndreas Noever struct ring_desc *descriptor; 19416603153SAndreas Noever list_for_each_entry_safe(frame, n, &ring->queue, list) { 19516603153SAndreas Noever if (ring_full(ring)) 19616603153SAndreas Noever break; 19716603153SAndreas Noever list_move_tail(&frame->list, &ring->in_flight); 19816603153SAndreas Noever descriptor = &ring->descriptors[ring->head]; 19916603153SAndreas Noever descriptor->phys = frame->buffer_phy; 20016603153SAndreas Noever descriptor->time = 0; 20116603153SAndreas Noever descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT; 20216603153SAndreas Noever if (ring->is_tx) { 20316603153SAndreas Noever descriptor->length = frame->size; 20416603153SAndreas Noever descriptor->eof = frame->eof; 20516603153SAndreas Noever descriptor->sof = frame->sof; 20616603153SAndreas Noever } 20716603153SAndreas Noever ring->head = (ring->head + 1) % ring->size; 20894379521SMika Westerberg if (ring->is_tx) 20994379521SMika Westerberg ring_iowrite_prod(ring, ring->head); 21094379521SMika Westerberg else 21194379521SMika Westerberg ring_iowrite_cons(ring, ring->head); 21216603153SAndreas Noever } 21316603153SAndreas Noever } 21416603153SAndreas Noever 21516603153SAndreas Noever /** 21616603153SAndreas Noever * ring_work() - progress completed frames 21716603153SAndreas Noever * 21816603153SAndreas Noever * If the ring is shutting down then all frames are marked as canceled and 21916603153SAndreas Noever * their callbacks are invoked. 22016603153SAndreas Noever * 22116603153SAndreas Noever * Otherwise we collect all completed frame from the ring buffer, write new 22216603153SAndreas Noever * frame to the ring buffer and invoke the callbacks for the completed frames. 22316603153SAndreas Noever */ 22416603153SAndreas Noever static void ring_work(struct work_struct *work) 22516603153SAndreas Noever { 22616603153SAndreas Noever struct tb_ring *ring = container_of(work, typeof(*ring), work); 22716603153SAndreas Noever struct ring_frame *frame; 22816603153SAndreas Noever bool canceled = false; 22922b7de10SMika Westerberg unsigned long flags; 23016603153SAndreas Noever LIST_HEAD(done); 23122b7de10SMika Westerberg 23222b7de10SMika Westerberg spin_lock_irqsave(&ring->lock, flags); 23316603153SAndreas Noever 23416603153SAndreas Noever if (!ring->running) { 23516603153SAndreas Noever /* Move all frames to done and mark them as canceled. */ 23616603153SAndreas Noever list_splice_tail_init(&ring->in_flight, &done); 23716603153SAndreas Noever list_splice_tail_init(&ring->queue, &done); 23816603153SAndreas Noever canceled = true; 23916603153SAndreas Noever goto invoke_callback; 24016603153SAndreas Noever } 24116603153SAndreas Noever 24216603153SAndreas Noever while (!ring_empty(ring)) { 24316603153SAndreas Noever if (!(ring->descriptors[ring->tail].flags 24416603153SAndreas Noever & RING_DESC_COMPLETED)) 24516603153SAndreas Noever break; 24616603153SAndreas Noever frame = list_first_entry(&ring->in_flight, typeof(*frame), 24716603153SAndreas Noever list); 24816603153SAndreas Noever list_move_tail(&frame->list, &done); 24916603153SAndreas Noever if (!ring->is_tx) { 25016603153SAndreas Noever frame->size = ring->descriptors[ring->tail].length; 25116603153SAndreas Noever frame->eof = ring->descriptors[ring->tail].eof; 25216603153SAndreas Noever frame->sof = ring->descriptors[ring->tail].sof; 25316603153SAndreas Noever frame->flags = ring->descriptors[ring->tail].flags; 25416603153SAndreas Noever } 25516603153SAndreas Noever ring->tail = (ring->tail + 1) % ring->size; 25616603153SAndreas Noever } 25716603153SAndreas Noever ring_write_descriptors(ring); 25816603153SAndreas Noever 25916603153SAndreas Noever invoke_callback: 26022b7de10SMika Westerberg /* allow callbacks to schedule new work */ 26122b7de10SMika Westerberg spin_unlock_irqrestore(&ring->lock, flags); 26216603153SAndreas Noever while (!list_empty(&done)) { 26316603153SAndreas Noever frame = list_first_entry(&done, typeof(*frame), list); 26416603153SAndreas Noever /* 26516603153SAndreas Noever * The callback may reenqueue or delete frame. 26616603153SAndreas Noever * Do not hold on to it. 26716603153SAndreas Noever */ 26816603153SAndreas Noever list_del_init(&frame->list); 2694ffe722eSMika Westerberg if (frame->callback) 27016603153SAndreas Noever frame->callback(ring, frame, canceled); 27116603153SAndreas Noever } 27216603153SAndreas Noever } 27316603153SAndreas Noever 2743b3d9f4dSMika Westerberg int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) 27516603153SAndreas Noever { 27622b7de10SMika Westerberg unsigned long flags; 27716603153SAndreas Noever int ret = 0; 27822b7de10SMika Westerberg 27922b7de10SMika Westerberg spin_lock_irqsave(&ring->lock, flags); 28016603153SAndreas Noever if (ring->running) { 28116603153SAndreas Noever list_add_tail(&frame->list, &ring->queue); 28216603153SAndreas Noever ring_write_descriptors(ring); 28316603153SAndreas Noever } else { 28416603153SAndreas Noever ret = -ESHUTDOWN; 28516603153SAndreas Noever } 28622b7de10SMika Westerberg spin_unlock_irqrestore(&ring->lock, flags); 28716603153SAndreas Noever return ret; 28816603153SAndreas Noever } 2893b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(__tb_ring_enqueue); 29016603153SAndreas Noever 2914ffe722eSMika Westerberg /** 2924ffe722eSMika Westerberg * tb_ring_poll() - Poll one completed frame from the ring 2934ffe722eSMika Westerberg * @ring: Ring to poll 2944ffe722eSMika Westerberg * 2954ffe722eSMika Westerberg * This function can be called when @start_poll callback of the @ring 2964ffe722eSMika Westerberg * has been called. It will read one completed frame from the ring and 2974ffe722eSMika Westerberg * return it to the caller. Returns %NULL if there is no more completed 2984ffe722eSMika Westerberg * frames. 2994ffe722eSMika Westerberg */ 3004ffe722eSMika Westerberg struct ring_frame *tb_ring_poll(struct tb_ring *ring) 3014ffe722eSMika Westerberg { 3024ffe722eSMika Westerberg struct ring_frame *frame = NULL; 3034ffe722eSMika Westerberg unsigned long flags; 3044ffe722eSMika Westerberg 3054ffe722eSMika Westerberg spin_lock_irqsave(&ring->lock, flags); 3064ffe722eSMika Westerberg if (!ring->running) 3074ffe722eSMika Westerberg goto unlock; 3084ffe722eSMika Westerberg if (ring_empty(ring)) 3094ffe722eSMika Westerberg goto unlock; 3104ffe722eSMika Westerberg 3114ffe722eSMika Westerberg if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) { 3124ffe722eSMika Westerberg frame = list_first_entry(&ring->in_flight, typeof(*frame), 3134ffe722eSMika Westerberg list); 3144ffe722eSMika Westerberg list_del_init(&frame->list); 3154ffe722eSMika Westerberg 3164ffe722eSMika Westerberg if (!ring->is_tx) { 3174ffe722eSMika Westerberg frame->size = ring->descriptors[ring->tail].length; 3184ffe722eSMika Westerberg frame->eof = ring->descriptors[ring->tail].eof; 3194ffe722eSMika Westerberg frame->sof = ring->descriptors[ring->tail].sof; 3204ffe722eSMika Westerberg frame->flags = ring->descriptors[ring->tail].flags; 3214ffe722eSMika Westerberg } 3224ffe722eSMika Westerberg 3234ffe722eSMika Westerberg ring->tail = (ring->tail + 1) % ring->size; 3244ffe722eSMika Westerberg } 3254ffe722eSMika Westerberg 3264ffe722eSMika Westerberg unlock: 3274ffe722eSMika Westerberg spin_unlock_irqrestore(&ring->lock, flags); 3284ffe722eSMika Westerberg return frame; 3294ffe722eSMika Westerberg } 3304ffe722eSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_poll); 3314ffe722eSMika Westerberg 3324ffe722eSMika Westerberg static void __ring_interrupt_mask(struct tb_ring *ring, bool mask) 3334ffe722eSMika Westerberg { 3344ffe722eSMika Westerberg int idx = ring_interrupt_index(ring); 3354ffe722eSMika Westerberg int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4; 3364ffe722eSMika Westerberg int bit = idx % 32; 3374ffe722eSMika Westerberg u32 val; 3384ffe722eSMika Westerberg 3394ffe722eSMika Westerberg val = ioread32(ring->nhi->iobase + reg); 3404ffe722eSMika Westerberg if (mask) 3414ffe722eSMika Westerberg val &= ~BIT(bit); 3424ffe722eSMika Westerberg else 3434ffe722eSMika Westerberg val |= BIT(bit); 3444ffe722eSMika Westerberg iowrite32(val, ring->nhi->iobase + reg); 3454ffe722eSMika Westerberg } 3464ffe722eSMika Westerberg 3474ffe722eSMika Westerberg /* Both @nhi->lock and @ring->lock should be held */ 3484ffe722eSMika Westerberg static void __ring_interrupt(struct tb_ring *ring) 3494ffe722eSMika Westerberg { 3504ffe722eSMika Westerberg if (!ring->running) 3514ffe722eSMika Westerberg return; 3524ffe722eSMika Westerberg 3534ffe722eSMika Westerberg if (ring->start_poll) { 35474657181SMika Westerberg __ring_interrupt_mask(ring, true); 3554ffe722eSMika Westerberg ring->start_poll(ring->poll_data); 3564ffe722eSMika Westerberg } else { 3574ffe722eSMika Westerberg schedule_work(&ring->work); 3584ffe722eSMika Westerberg } 3594ffe722eSMika Westerberg } 3604ffe722eSMika Westerberg 3614ffe722eSMika Westerberg /** 3624ffe722eSMika Westerberg * tb_ring_poll_complete() - Re-start interrupt for the ring 3634ffe722eSMika Westerberg * @ring: Ring to re-start the interrupt 3644ffe722eSMika Westerberg * 3654ffe722eSMika Westerberg * This will re-start (unmask) the ring interrupt once the user is done 3664ffe722eSMika Westerberg * with polling. 3674ffe722eSMika Westerberg */ 3684ffe722eSMika Westerberg void tb_ring_poll_complete(struct tb_ring *ring) 3694ffe722eSMika Westerberg { 3704ffe722eSMika Westerberg unsigned long flags; 3714ffe722eSMika Westerberg 3724ffe722eSMika Westerberg spin_lock_irqsave(&ring->nhi->lock, flags); 3734ffe722eSMika Westerberg spin_lock(&ring->lock); 3744ffe722eSMika Westerberg if (ring->start_poll) 3754ffe722eSMika Westerberg __ring_interrupt_mask(ring, false); 3764ffe722eSMika Westerberg spin_unlock(&ring->lock); 3774ffe722eSMika Westerberg spin_unlock_irqrestore(&ring->nhi->lock, flags); 3784ffe722eSMika Westerberg } 3794ffe722eSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_poll_complete); 3804ffe722eSMika Westerberg 381046bee1fSMika Westerberg static irqreturn_t ring_msix(int irq, void *data) 382046bee1fSMika Westerberg { 383046bee1fSMika Westerberg struct tb_ring *ring = data; 384046bee1fSMika Westerberg 3854ffe722eSMika Westerberg spin_lock(&ring->nhi->lock); 3864ffe722eSMika Westerberg spin_lock(&ring->lock); 3874ffe722eSMika Westerberg __ring_interrupt(ring); 3884ffe722eSMika Westerberg spin_unlock(&ring->lock); 3894ffe722eSMika Westerberg spin_unlock(&ring->nhi->lock); 3904ffe722eSMika Westerberg 391046bee1fSMika Westerberg return IRQ_HANDLED; 392046bee1fSMika Westerberg } 393046bee1fSMika Westerberg 394046bee1fSMika Westerberg static int ring_request_msix(struct tb_ring *ring, bool no_suspend) 395046bee1fSMika Westerberg { 396046bee1fSMika Westerberg struct tb_nhi *nhi = ring->nhi; 397046bee1fSMika Westerberg unsigned long irqflags; 398046bee1fSMika Westerberg int ret; 399046bee1fSMika Westerberg 400046bee1fSMika Westerberg if (!nhi->pdev->msix_enabled) 401046bee1fSMika Westerberg return 0; 402046bee1fSMika Westerberg 403046bee1fSMika Westerberg ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL); 404046bee1fSMika Westerberg if (ret < 0) 405046bee1fSMika Westerberg return ret; 406046bee1fSMika Westerberg 407046bee1fSMika Westerberg ring->vector = ret; 408046bee1fSMika Westerberg 409046bee1fSMika Westerberg ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector); 410046bee1fSMika Westerberg if (ring->irq < 0) 411046bee1fSMika Westerberg return ring->irq; 412046bee1fSMika Westerberg 413046bee1fSMika Westerberg irqflags = no_suspend ? IRQF_NO_SUSPEND : 0; 414046bee1fSMika Westerberg return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); 415046bee1fSMika Westerberg } 416046bee1fSMika Westerberg 417046bee1fSMika Westerberg static void ring_release_msix(struct tb_ring *ring) 418046bee1fSMika Westerberg { 419046bee1fSMika Westerberg if (ring->irq <= 0) 420046bee1fSMika Westerberg return; 421046bee1fSMika Westerberg 422046bee1fSMika Westerberg free_irq(ring->irq, ring); 423046bee1fSMika Westerberg ida_simple_remove(&ring->nhi->msix_ida, ring->vector); 424046bee1fSMika Westerberg ring->vector = 0; 425046bee1fSMika Westerberg ring->irq = 0; 426046bee1fSMika Westerberg } 427046bee1fSMika Westerberg 4289a01c7c2SMika Westerberg static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) 4299a01c7c2SMika Westerberg { 4309a01c7c2SMika Westerberg int ret = 0; 4319a01c7c2SMika Westerberg 4329a01c7c2SMika Westerberg spin_lock_irq(&nhi->lock); 4339a01c7c2SMika Westerberg 4349a01c7c2SMika Westerberg if (ring->hop < 0) { 4359a01c7c2SMika Westerberg unsigned int i; 4369a01c7c2SMika Westerberg 4379a01c7c2SMika Westerberg /* 4389a01c7c2SMika Westerberg * Automatically allocate HopID from the non-reserved 43953f13319SMika Westerberg * range 1 .. hop_count - 1. 4409a01c7c2SMika Westerberg */ 4419a01c7c2SMika Westerberg for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) { 4429a01c7c2SMika Westerberg if (ring->is_tx) { 4439a01c7c2SMika Westerberg if (!nhi->tx_rings[i]) { 4449a01c7c2SMika Westerberg ring->hop = i; 4459a01c7c2SMika Westerberg break; 4469a01c7c2SMika Westerberg } 4479a01c7c2SMika Westerberg } else { 4489a01c7c2SMika Westerberg if (!nhi->rx_rings[i]) { 4499a01c7c2SMika Westerberg ring->hop = i; 4509a01c7c2SMika Westerberg break; 4519a01c7c2SMika Westerberg } 4529a01c7c2SMika Westerberg } 4539a01c7c2SMika Westerberg } 4549a01c7c2SMika Westerberg } 4559a01c7c2SMika Westerberg 4569a01c7c2SMika Westerberg if (ring->hop < 0 || ring->hop >= nhi->hop_count) { 4579a01c7c2SMika Westerberg dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); 4589a01c7c2SMika Westerberg ret = -EINVAL; 4599a01c7c2SMika Westerberg goto err_unlock; 4609a01c7c2SMika Westerberg } 4619a01c7c2SMika Westerberg if (ring->is_tx && nhi->tx_rings[ring->hop]) { 4629a01c7c2SMika Westerberg dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n", 4639a01c7c2SMika Westerberg ring->hop); 4649a01c7c2SMika Westerberg ret = -EBUSY; 4659a01c7c2SMika Westerberg goto err_unlock; 4669a01c7c2SMika Westerberg } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) { 4679a01c7c2SMika Westerberg dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n", 4689a01c7c2SMika Westerberg ring->hop); 4699a01c7c2SMika Westerberg ret = -EBUSY; 4709a01c7c2SMika Westerberg goto err_unlock; 4719a01c7c2SMika Westerberg } 4729a01c7c2SMika Westerberg 4739a01c7c2SMika Westerberg if (ring->is_tx) 4749a01c7c2SMika Westerberg nhi->tx_rings[ring->hop] = ring; 4759a01c7c2SMika Westerberg else 4769a01c7c2SMika Westerberg nhi->rx_rings[ring->hop] = ring; 4779a01c7c2SMika Westerberg 4789a01c7c2SMika Westerberg err_unlock: 4799a01c7c2SMika Westerberg spin_unlock_irq(&nhi->lock); 4809a01c7c2SMika Westerberg 4819a01c7c2SMika Westerberg return ret; 4829a01c7c2SMika Westerberg } 4839a01c7c2SMika Westerberg 4843b3d9f4dSMika Westerberg static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, 4859fb1e654SMika Westerberg bool transmit, unsigned int flags, 486*afe704a2SMika Westerberg int e2e_tx_hop, u16 sof_mask, u16 eof_mask, 4874ffe722eSMika Westerberg void (*start_poll)(void *), 4884ffe722eSMika Westerberg void *poll_data) 48916603153SAndreas Noever { 49016603153SAndreas Noever struct tb_ring *ring = NULL; 491daa5140fSMika Westerberg 492daa5140fSMika Westerberg dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", 49316603153SAndreas Noever transmit ? "TX" : "RX", hop, size); 49416603153SAndreas Noever 49516603153SAndreas Noever ring = kzalloc(sizeof(*ring), GFP_KERNEL); 49616603153SAndreas Noever if (!ring) 49759120e06SMika Westerberg return NULL; 49816603153SAndreas Noever 49922b7de10SMika Westerberg spin_lock_init(&ring->lock); 50016603153SAndreas Noever INIT_LIST_HEAD(&ring->queue); 50116603153SAndreas Noever INIT_LIST_HEAD(&ring->in_flight); 50216603153SAndreas Noever INIT_WORK(&ring->work, ring_work); 50316603153SAndreas Noever 50416603153SAndreas Noever ring->nhi = nhi; 50516603153SAndreas Noever ring->hop = hop; 50616603153SAndreas Noever ring->is_tx = transmit; 50716603153SAndreas Noever ring->size = size; 508046bee1fSMika Westerberg ring->flags = flags; 509*afe704a2SMika Westerberg ring->e2e_tx_hop = e2e_tx_hop; 5109fb1e654SMika Westerberg ring->sof_mask = sof_mask; 5119fb1e654SMika Westerberg ring->eof_mask = eof_mask; 51216603153SAndreas Noever ring->head = 0; 51316603153SAndreas Noever ring->tail = 0; 51416603153SAndreas Noever ring->running = false; 5154ffe722eSMika Westerberg ring->start_poll = start_poll; 5164ffe722eSMika Westerberg ring->poll_data = poll_data; 517046bee1fSMika Westerberg 51816603153SAndreas Noever ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, 51916603153SAndreas Noever size * sizeof(*ring->descriptors), 52016603153SAndreas Noever &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); 52116603153SAndreas Noever if (!ring->descriptors) 52259120e06SMika Westerberg goto err_free_ring; 52316603153SAndreas Noever 52459120e06SMika Westerberg if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND)) 52559120e06SMika Westerberg goto err_free_descs; 52659120e06SMika Westerberg 5279a01c7c2SMika Westerberg if (nhi_alloc_hop(nhi, ring)) 52859120e06SMika Westerberg goto err_release_msix; 52959120e06SMika Westerberg 53016603153SAndreas Noever return ring; 53116603153SAndreas Noever 53259120e06SMika Westerberg err_release_msix: 53359120e06SMika Westerberg ring_release_msix(ring); 53459120e06SMika Westerberg err_free_descs: 53559120e06SMika Westerberg dma_free_coherent(&ring->nhi->pdev->dev, 53659120e06SMika Westerberg ring->size * sizeof(*ring->descriptors), 53759120e06SMika Westerberg ring->descriptors, ring->descriptors_dma); 53859120e06SMika Westerberg err_free_ring: 53916603153SAndreas Noever kfree(ring); 54059120e06SMika Westerberg 54116603153SAndreas Noever return NULL; 54216603153SAndreas Noever } 54316603153SAndreas Noever 5443b3d9f4dSMika Westerberg /** 5453b3d9f4dSMika Westerberg * tb_ring_alloc_tx() - Allocate DMA ring for transmit 5463b3d9f4dSMika Westerberg * @nhi: Pointer to the NHI the ring is to be allocated 5473b3d9f4dSMika Westerberg * @hop: HopID (ring) to allocate 5483b3d9f4dSMika Westerberg * @size: Number of entries in the ring 5493b3d9f4dSMika Westerberg * @flags: Flags for the ring 5503b3d9f4dSMika Westerberg */ 5513b3d9f4dSMika Westerberg struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, 552046bee1fSMika Westerberg unsigned int flags) 55316603153SAndreas Noever { 554*afe704a2SMika Westerberg return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, 0, NULL, NULL); 55516603153SAndreas Noever } 5563b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); 55716603153SAndreas Noever 55816603153SAndreas Noever /** 5593b3d9f4dSMika Westerberg * tb_ring_alloc_rx() - Allocate DMA ring for receive 5603b3d9f4dSMika Westerberg * @nhi: Pointer to the NHI the ring is to be allocated 5619a01c7c2SMika Westerberg * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation. 5623b3d9f4dSMika Westerberg * @size: Number of entries in the ring 5633b3d9f4dSMika Westerberg * @flags: Flags for the ring 564*afe704a2SMika Westerberg * @e2e_tx_hop: Transmit HopID when E2E is enabled in @flags 5653b3d9f4dSMika Westerberg * @sof_mask: Mask of PDF values that start a frame 5663b3d9f4dSMika Westerberg * @eof_mask: Mask of PDF values that end a frame 5674ffe722eSMika Westerberg * @start_poll: If not %NULL the ring will call this function when an 5684ffe722eSMika Westerberg * interrupt is triggered and masked, instead of callback 5694ffe722eSMika Westerberg * in each Rx frame. 5704ffe722eSMika Westerberg * @poll_data: Optional data passed to @start_poll 57116603153SAndreas Noever */ 5723b3d9f4dSMika Westerberg struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, 573*afe704a2SMika Westerberg unsigned int flags, int e2e_tx_hop, 574*afe704a2SMika Westerberg u16 sof_mask, u16 eof_mask, 5754ffe722eSMika Westerberg void (*start_poll)(void *), void *poll_data) 5763b3d9f4dSMika Westerberg { 577*afe704a2SMika Westerberg return tb_ring_alloc(nhi, hop, size, false, flags, e2e_tx_hop, sof_mask, eof_mask, 5784ffe722eSMika Westerberg start_poll, poll_data); 5793b3d9f4dSMika Westerberg } 5803b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_alloc_rx); 5813b3d9f4dSMika Westerberg 5823b3d9f4dSMika Westerberg /** 5833b3d9f4dSMika Westerberg * tb_ring_start() - enable a ring 5843b3d9f4dSMika Westerberg * 5853b3d9f4dSMika Westerberg * Must not be invoked in parallel with tb_ring_stop(). 5863b3d9f4dSMika Westerberg */ 5873b3d9f4dSMika Westerberg void tb_ring_start(struct tb_ring *ring) 58816603153SAndreas Noever { 5899fb1e654SMika Westerberg u16 frame_size; 5909fb1e654SMika Westerberg u32 flags; 5919fb1e654SMika Westerberg 59259120e06SMika Westerberg spin_lock_irq(&ring->nhi->lock); 59359120e06SMika Westerberg spin_lock(&ring->lock); 594bdccf295SMika Westerberg if (ring->nhi->going_away) 595bdccf295SMika Westerberg goto err; 59616603153SAndreas Noever if (ring->running) { 59716603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); 59816603153SAndreas Noever goto err; 59916603153SAndreas Noever } 600daa5140fSMika Westerberg dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n", 60116603153SAndreas Noever RING_TYPE(ring), ring->hop); 60216603153SAndreas Noever 6039fb1e654SMika Westerberg if (ring->flags & RING_FLAG_FRAME) { 6049fb1e654SMika Westerberg /* Means 4096 */ 6059fb1e654SMika Westerberg frame_size = 0; 6069fb1e654SMika Westerberg flags = RING_FLAG_ENABLE; 6079fb1e654SMika Westerberg } else { 6089fb1e654SMika Westerberg frame_size = TB_FRAME_SIZE; 6099fb1e654SMika Westerberg flags = RING_FLAG_ENABLE | RING_FLAG_RAW; 6109fb1e654SMika Westerberg } 6119fb1e654SMika Westerberg 61216603153SAndreas Noever ring_iowrite64desc(ring, ring->descriptors_dma, 0); 61316603153SAndreas Noever if (ring->is_tx) { 61416603153SAndreas Noever ring_iowrite32desc(ring, ring->size, 12); 61516603153SAndreas Noever ring_iowrite32options(ring, 0, 4); /* time releated ? */ 6169fb1e654SMika Westerberg ring_iowrite32options(ring, flags, 0); 61716603153SAndreas Noever } else { 6189fb1e654SMika Westerberg u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask; 6199fb1e654SMika Westerberg 6209fb1e654SMika Westerberg ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12); 6219fb1e654SMika Westerberg ring_iowrite32options(ring, sof_eof_mask, 4); 6229fb1e654SMika Westerberg ring_iowrite32options(ring, flags, 0); 62316603153SAndreas Noever } 624*afe704a2SMika Westerberg 625*afe704a2SMika Westerberg /* 626*afe704a2SMika Westerberg * Now that the ring valid bit is set we can configure E2E if 627*afe704a2SMika Westerberg * enabled for the ring. 628*afe704a2SMika Westerberg */ 629*afe704a2SMika Westerberg if (ring->flags & RING_FLAG_E2E) { 630*afe704a2SMika Westerberg if (!ring->is_tx) { 631*afe704a2SMika Westerberg u32 hop; 632*afe704a2SMika Westerberg 633*afe704a2SMika Westerberg hop = ring->e2e_tx_hop << REG_RX_OPTIONS_E2E_HOP_SHIFT; 634*afe704a2SMika Westerberg hop &= REG_RX_OPTIONS_E2E_HOP_MASK; 635*afe704a2SMika Westerberg flags |= hop; 636*afe704a2SMika Westerberg 637*afe704a2SMika Westerberg dev_dbg(&ring->nhi->pdev->dev, 638*afe704a2SMika Westerberg "enabling E2E for %s %d with TX HopID %d\n", 639*afe704a2SMika Westerberg RING_TYPE(ring), ring->hop, ring->e2e_tx_hop); 640*afe704a2SMika Westerberg } else { 641*afe704a2SMika Westerberg dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n", 642*afe704a2SMika Westerberg RING_TYPE(ring), ring->hop); 643*afe704a2SMika Westerberg } 644*afe704a2SMika Westerberg 645*afe704a2SMika Westerberg flags |= RING_FLAG_E2E_FLOW_CONTROL; 646*afe704a2SMika Westerberg ring_iowrite32options(ring, flags, 0); 647*afe704a2SMika Westerberg } 648*afe704a2SMika Westerberg 64916603153SAndreas Noever ring_interrupt_active(ring, true); 65016603153SAndreas Noever ring->running = true; 65116603153SAndreas Noever err: 65259120e06SMika Westerberg spin_unlock(&ring->lock); 65359120e06SMika Westerberg spin_unlock_irq(&ring->nhi->lock); 65416603153SAndreas Noever } 6553b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_start); 65616603153SAndreas Noever 65716603153SAndreas Noever /** 6583b3d9f4dSMika Westerberg * tb_ring_stop() - shutdown a ring 65916603153SAndreas Noever * 66016603153SAndreas Noever * Must not be invoked from a callback. 66116603153SAndreas Noever * 6623b3d9f4dSMika Westerberg * This method will disable the ring. Further calls to 6633b3d9f4dSMika Westerberg * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been 6643b3d9f4dSMika Westerberg * called. 66516603153SAndreas Noever * 66616603153SAndreas Noever * All enqueued frames will be canceled and their callbacks will be executed 66716603153SAndreas Noever * with frame->canceled set to true (on the callback thread). This method 66816603153SAndreas Noever * returns only after all callback invocations have finished. 66916603153SAndreas Noever */ 6703b3d9f4dSMika Westerberg void tb_ring_stop(struct tb_ring *ring) 67116603153SAndreas Noever { 67259120e06SMika Westerberg spin_lock_irq(&ring->nhi->lock); 67359120e06SMika Westerberg spin_lock(&ring->lock); 674daa5140fSMika Westerberg dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n", 67516603153SAndreas Noever RING_TYPE(ring), ring->hop); 676bdccf295SMika Westerberg if (ring->nhi->going_away) 677bdccf295SMika Westerberg goto err; 67816603153SAndreas Noever if (!ring->running) { 67916603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", 68016603153SAndreas Noever RING_TYPE(ring), ring->hop); 68116603153SAndreas Noever goto err; 68216603153SAndreas Noever } 68316603153SAndreas Noever ring_interrupt_active(ring, false); 68416603153SAndreas Noever 68516603153SAndreas Noever ring_iowrite32options(ring, 0, 0); 68616603153SAndreas Noever ring_iowrite64desc(ring, 0, 0); 68794379521SMika Westerberg ring_iowrite32desc(ring, 0, 8); 68816603153SAndreas Noever ring_iowrite32desc(ring, 0, 12); 68916603153SAndreas Noever ring->head = 0; 69016603153SAndreas Noever ring->tail = 0; 69116603153SAndreas Noever ring->running = false; 69216603153SAndreas Noever 69316603153SAndreas Noever err: 69459120e06SMika Westerberg spin_unlock(&ring->lock); 69559120e06SMika Westerberg spin_unlock_irq(&ring->nhi->lock); 69616603153SAndreas Noever 69716603153SAndreas Noever /* 69816603153SAndreas Noever * schedule ring->work to invoke callbacks on all remaining frames. 69916603153SAndreas Noever */ 70016603153SAndreas Noever schedule_work(&ring->work); 70116603153SAndreas Noever flush_work(&ring->work); 70216603153SAndreas Noever } 7033b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_stop); 70416603153SAndreas Noever 70516603153SAndreas Noever /* 7063b3d9f4dSMika Westerberg * tb_ring_free() - free ring 70716603153SAndreas Noever * 70816603153SAndreas Noever * When this method returns all invocations of ring->callback will have 70916603153SAndreas Noever * finished. 71016603153SAndreas Noever * 71116603153SAndreas Noever * Ring must be stopped. 71216603153SAndreas Noever * 71316603153SAndreas Noever * Must NOT be called from ring_frame->callback! 71416603153SAndreas Noever */ 7153b3d9f4dSMika Westerberg void tb_ring_free(struct tb_ring *ring) 71616603153SAndreas Noever { 71759120e06SMika Westerberg spin_lock_irq(&ring->nhi->lock); 71816603153SAndreas Noever /* 71916603153SAndreas Noever * Dissociate the ring from the NHI. This also ensures that 72016603153SAndreas Noever * nhi_interrupt_work cannot reschedule ring->work. 72116603153SAndreas Noever */ 72216603153SAndreas Noever if (ring->is_tx) 72316603153SAndreas Noever ring->nhi->tx_rings[ring->hop] = NULL; 72416603153SAndreas Noever else 72516603153SAndreas Noever ring->nhi->rx_rings[ring->hop] = NULL; 72616603153SAndreas Noever 72716603153SAndreas Noever if (ring->running) { 72816603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", 72916603153SAndreas Noever RING_TYPE(ring), ring->hop); 73016603153SAndreas Noever } 7314ffe722eSMika Westerberg spin_unlock_irq(&ring->nhi->lock); 73216603153SAndreas Noever 733046bee1fSMika Westerberg ring_release_msix(ring); 734046bee1fSMika Westerberg 73516603153SAndreas Noever dma_free_coherent(&ring->nhi->pdev->dev, 73616603153SAndreas Noever ring->size * sizeof(*ring->descriptors), 73716603153SAndreas Noever ring->descriptors, ring->descriptors_dma); 73816603153SAndreas Noever 739f19b72c6SSachin Kamat ring->descriptors = NULL; 74016603153SAndreas Noever ring->descriptors_dma = 0; 74116603153SAndreas Noever 74216603153SAndreas Noever 743daa5140fSMika Westerberg dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring), 74416603153SAndreas Noever ring->hop); 74516603153SAndreas Noever 74616603153SAndreas Noever /** 747046bee1fSMika Westerberg * ring->work can no longer be scheduled (it is scheduled only 748046bee1fSMika Westerberg * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it 749046bee1fSMika Westerberg * to finish before freeing the ring. 75016603153SAndreas Noever */ 75116603153SAndreas Noever flush_work(&ring->work); 75216603153SAndreas Noever kfree(ring); 75316603153SAndreas Noever } 7543b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_free); 75516603153SAndreas Noever 756cd446ee2SMika Westerberg /** 757cd446ee2SMika Westerberg * nhi_mailbox_cmd() - Send a command through NHI mailbox 758cd446ee2SMika Westerberg * @nhi: Pointer to the NHI structure 759cd446ee2SMika Westerberg * @cmd: Command to send 760cd446ee2SMika Westerberg * @data: Data to be send with the command 761cd446ee2SMika Westerberg * 762cd446ee2SMika Westerberg * Sends mailbox command to the firmware running on NHI. Returns %0 in 763cd446ee2SMika Westerberg * case of success and negative errno in case of failure. 764cd446ee2SMika Westerberg */ 765cd446ee2SMika Westerberg int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data) 766cd446ee2SMika Westerberg { 767cd446ee2SMika Westerberg ktime_t timeout; 768cd446ee2SMika Westerberg u32 val; 769cd446ee2SMika Westerberg 770cd446ee2SMika Westerberg iowrite32(data, nhi->iobase + REG_INMAIL_DATA); 771cd446ee2SMika Westerberg 772cd446ee2SMika Westerberg val = ioread32(nhi->iobase + REG_INMAIL_CMD); 773cd446ee2SMika Westerberg val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR); 774cd446ee2SMika Westerberg val |= REG_INMAIL_OP_REQUEST | cmd; 775cd446ee2SMika Westerberg iowrite32(val, nhi->iobase + REG_INMAIL_CMD); 776cd446ee2SMika Westerberg 777cd446ee2SMika Westerberg timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT); 778cd446ee2SMika Westerberg do { 779cd446ee2SMika Westerberg val = ioread32(nhi->iobase + REG_INMAIL_CMD); 780cd446ee2SMika Westerberg if (!(val & REG_INMAIL_OP_REQUEST)) 781cd446ee2SMika Westerberg break; 782cd446ee2SMika Westerberg usleep_range(10, 20); 783cd446ee2SMika Westerberg } while (ktime_before(ktime_get(), timeout)); 784cd446ee2SMika Westerberg 785cd446ee2SMika Westerberg if (val & REG_INMAIL_OP_REQUEST) 786cd446ee2SMika Westerberg return -ETIMEDOUT; 787cd446ee2SMika Westerberg if (val & REG_INMAIL_ERROR) 788cd446ee2SMika Westerberg return -EIO; 789cd446ee2SMika Westerberg 790cd446ee2SMika Westerberg return 0; 791cd446ee2SMika Westerberg } 792cd446ee2SMika Westerberg 793cd446ee2SMika Westerberg /** 794cd446ee2SMika Westerberg * nhi_mailbox_mode() - Return current firmware operation mode 795cd446ee2SMika Westerberg * @nhi: Pointer to the NHI structure 796cd446ee2SMika Westerberg * 797cd446ee2SMika Westerberg * The function reads current firmware operation mode using NHI mailbox 798cd446ee2SMika Westerberg * registers and returns it to the caller. 799cd446ee2SMika Westerberg */ 800cd446ee2SMika Westerberg enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi) 801cd446ee2SMika Westerberg { 802cd446ee2SMika Westerberg u32 val; 803cd446ee2SMika Westerberg 804cd446ee2SMika Westerberg val = ioread32(nhi->iobase + REG_OUTMAIL_CMD); 805cd446ee2SMika Westerberg val &= REG_OUTMAIL_CMD_OPMODE_MASK; 806cd446ee2SMika Westerberg val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT; 807cd446ee2SMika Westerberg 808cd446ee2SMika Westerberg return (enum nhi_fw_mode)val; 809cd446ee2SMika Westerberg } 810cd446ee2SMika Westerberg 81116603153SAndreas Noever static void nhi_interrupt_work(struct work_struct *work) 81216603153SAndreas Noever { 81316603153SAndreas Noever struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); 81416603153SAndreas Noever int value = 0; /* Suppress uninitialized usage warning. */ 81516603153SAndreas Noever int bit; 81616603153SAndreas Noever int hop = -1; 81716603153SAndreas Noever int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */ 81816603153SAndreas Noever struct tb_ring *ring; 81916603153SAndreas Noever 82059120e06SMika Westerberg spin_lock_irq(&nhi->lock); 82116603153SAndreas Noever 82216603153SAndreas Noever /* 82316603153SAndreas Noever * Starting at REG_RING_NOTIFY_BASE there are three status bitfields 82416603153SAndreas Noever * (TX, RX, RX overflow). We iterate over the bits and read a new 82516603153SAndreas Noever * dwords as required. The registers are cleared on read. 82616603153SAndreas Noever */ 82716603153SAndreas Noever for (bit = 0; bit < 3 * nhi->hop_count; bit++) { 82816603153SAndreas Noever if (bit % 32 == 0) 82916603153SAndreas Noever value = ioread32(nhi->iobase 83016603153SAndreas Noever + REG_RING_NOTIFY_BASE 83116603153SAndreas Noever + 4 * (bit / 32)); 83216603153SAndreas Noever if (++hop == nhi->hop_count) { 83316603153SAndreas Noever hop = 0; 83416603153SAndreas Noever type++; 83516603153SAndreas Noever } 83616603153SAndreas Noever if ((value & (1 << (bit % 32))) == 0) 83716603153SAndreas Noever continue; 83816603153SAndreas Noever if (type == 2) { 83916603153SAndreas Noever dev_warn(&nhi->pdev->dev, 84016603153SAndreas Noever "RX overflow for ring %d\n", 84116603153SAndreas Noever hop); 84216603153SAndreas Noever continue; 84316603153SAndreas Noever } 84416603153SAndreas Noever if (type == 0) 84516603153SAndreas Noever ring = nhi->tx_rings[hop]; 84616603153SAndreas Noever else 84716603153SAndreas Noever ring = nhi->rx_rings[hop]; 84816603153SAndreas Noever if (ring == NULL) { 84916603153SAndreas Noever dev_warn(&nhi->pdev->dev, 85016603153SAndreas Noever "got interrupt for inactive %s ring %d\n", 85116603153SAndreas Noever type ? "RX" : "TX", 85216603153SAndreas Noever hop); 85316603153SAndreas Noever continue; 85416603153SAndreas Noever } 8554ffe722eSMika Westerberg 8564ffe722eSMika Westerberg spin_lock(&ring->lock); 8574ffe722eSMika Westerberg __ring_interrupt(ring); 8584ffe722eSMika Westerberg spin_unlock(&ring->lock); 85916603153SAndreas Noever } 86059120e06SMika Westerberg spin_unlock_irq(&nhi->lock); 86116603153SAndreas Noever } 86216603153SAndreas Noever 86316603153SAndreas Noever static irqreturn_t nhi_msi(int irq, void *data) 86416603153SAndreas Noever { 86516603153SAndreas Noever struct tb_nhi *nhi = data; 86616603153SAndreas Noever schedule_work(&nhi->interrupt_work); 86716603153SAndreas Noever return IRQ_HANDLED; 86816603153SAndreas Noever } 86916603153SAndreas Noever 8703cdb9446SMika Westerberg static int __nhi_suspend_noirq(struct device *dev, bool wakeup) 87123dd5bb4SAndreas Noever { 87223dd5bb4SAndreas Noever struct pci_dev *pdev = to_pci_dev(dev); 87323dd5bb4SAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 8743cdb9446SMika Westerberg struct tb_nhi *nhi = tb->nhi; 8753cdb9446SMika Westerberg int ret; 8769d3cce0bSMika Westerberg 8773cdb9446SMika Westerberg ret = tb_domain_suspend_noirq(tb); 8783cdb9446SMika Westerberg if (ret) 8793cdb9446SMika Westerberg return ret; 8803cdb9446SMika Westerberg 8813cdb9446SMika Westerberg if (nhi->ops && nhi->ops->suspend_noirq) { 8823cdb9446SMika Westerberg ret = nhi->ops->suspend_noirq(tb->nhi, wakeup); 8833cdb9446SMika Westerberg if (ret) 8843cdb9446SMika Westerberg return ret; 8853cdb9446SMika Westerberg } 8863cdb9446SMika Westerberg 8873cdb9446SMika Westerberg return 0; 8883cdb9446SMika Westerberg } 8893cdb9446SMika Westerberg 8903cdb9446SMika Westerberg static int nhi_suspend_noirq(struct device *dev) 8913cdb9446SMika Westerberg { 8923cdb9446SMika Westerberg return __nhi_suspend_noirq(dev, device_may_wakeup(dev)); 8933cdb9446SMika Westerberg } 8943cdb9446SMika Westerberg 895884e4d57SMika Westerberg static int nhi_freeze_noirq(struct device *dev) 896884e4d57SMika Westerberg { 897884e4d57SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 898884e4d57SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 899884e4d57SMika Westerberg 900884e4d57SMika Westerberg return tb_domain_freeze_noirq(tb); 901884e4d57SMika Westerberg } 902884e4d57SMika Westerberg 903884e4d57SMika Westerberg static int nhi_thaw_noirq(struct device *dev) 904884e4d57SMika Westerberg { 905884e4d57SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 906884e4d57SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 907884e4d57SMika Westerberg 908884e4d57SMika Westerberg return tb_domain_thaw_noirq(tb); 909884e4d57SMika Westerberg } 910884e4d57SMika Westerberg 9113cdb9446SMika Westerberg static bool nhi_wake_supported(struct pci_dev *pdev) 9123cdb9446SMika Westerberg { 9133cdb9446SMika Westerberg u8 val; 9143cdb9446SMika Westerberg 9153cdb9446SMika Westerberg /* 9163cdb9446SMika Westerberg * If power rails are sustainable for wakeup from S4 this 9173cdb9446SMika Westerberg * property is set by the BIOS. 9183cdb9446SMika Westerberg */ 9193cdb9446SMika Westerberg if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val)) 9203cdb9446SMika Westerberg return !!val; 9213cdb9446SMika Westerberg 9223cdb9446SMika Westerberg return true; 9233cdb9446SMika Westerberg } 9243cdb9446SMika Westerberg 9253cdb9446SMika Westerberg static int nhi_poweroff_noirq(struct device *dev) 9263cdb9446SMika Westerberg { 9273cdb9446SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 9283cdb9446SMika Westerberg bool wakeup; 9293cdb9446SMika Westerberg 9303cdb9446SMika Westerberg wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev); 9313cdb9446SMika Westerberg return __nhi_suspend_noirq(dev, wakeup); 93223dd5bb4SAndreas Noever } 93323dd5bb4SAndreas Noever 9348c6bba10SMika Westerberg static void nhi_enable_int_throttling(struct tb_nhi *nhi) 9358c6bba10SMika Westerberg { 9368c6bba10SMika Westerberg /* Throttling is specified in 256ns increments */ 9378c6bba10SMika Westerberg u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256); 9388c6bba10SMika Westerberg unsigned int i; 9398c6bba10SMika Westerberg 9408c6bba10SMika Westerberg /* 9418c6bba10SMika Westerberg * Configure interrupt throttling for all vectors even if we 9428c6bba10SMika Westerberg * only use few. 9438c6bba10SMika Westerberg */ 9448c6bba10SMika Westerberg for (i = 0; i < MSIX_MAX_VECS; i++) { 9458c6bba10SMika Westerberg u32 reg = REG_INT_THROTTLING_RATE + i * 4; 9468c6bba10SMika Westerberg iowrite32(throttle, nhi->iobase + reg); 9478c6bba10SMika Westerberg } 9488c6bba10SMika Westerberg } 9498c6bba10SMika Westerberg 95023dd5bb4SAndreas Noever static int nhi_resume_noirq(struct device *dev) 95123dd5bb4SAndreas Noever { 95223dd5bb4SAndreas Noever struct pci_dev *pdev = to_pci_dev(dev); 95323dd5bb4SAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 9543cdb9446SMika Westerberg struct tb_nhi *nhi = tb->nhi; 9553cdb9446SMika Westerberg int ret; 9569d3cce0bSMika Westerberg 957bdccf295SMika Westerberg /* 958bdccf295SMika Westerberg * Check that the device is still there. It may be that the user 959bdccf295SMika Westerberg * unplugged last device which causes the host controller to go 960bdccf295SMika Westerberg * away on PCs. 961bdccf295SMika Westerberg */ 9623cdb9446SMika Westerberg if (!pci_device_is_present(pdev)) { 9633cdb9446SMika Westerberg nhi->going_away = true; 9643cdb9446SMika Westerberg } else { 9653cdb9446SMika Westerberg if (nhi->ops && nhi->ops->resume_noirq) { 9663cdb9446SMika Westerberg ret = nhi->ops->resume_noirq(nhi); 9673cdb9446SMika Westerberg if (ret) 9683cdb9446SMika Westerberg return ret; 9693cdb9446SMika Westerberg } 9708c6bba10SMika Westerberg nhi_enable_int_throttling(tb->nhi); 9713cdb9446SMika Westerberg } 972bdccf295SMika Westerberg 9739d3cce0bSMika Westerberg return tb_domain_resume_noirq(tb); 97423dd5bb4SAndreas Noever } 97523dd5bb4SAndreas Noever 976f67cf491SMika Westerberg static int nhi_suspend(struct device *dev) 977f67cf491SMika Westerberg { 978f67cf491SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 979f67cf491SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 980f67cf491SMika Westerberg 981f67cf491SMika Westerberg return tb_domain_suspend(tb); 982f67cf491SMika Westerberg } 983f67cf491SMika Westerberg 984f67cf491SMika Westerberg static void nhi_complete(struct device *dev) 985f67cf491SMika Westerberg { 986f67cf491SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 987f67cf491SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 988f67cf491SMika Westerberg 9892d8ff0b5SMika Westerberg /* 9902d8ff0b5SMika Westerberg * If we were runtime suspended when system suspend started, 9912d8ff0b5SMika Westerberg * schedule runtime resume now. It should bring the domain back 9922d8ff0b5SMika Westerberg * to functional state. 9932d8ff0b5SMika Westerberg */ 9942d8ff0b5SMika Westerberg if (pm_runtime_suspended(&pdev->dev)) 9952d8ff0b5SMika Westerberg pm_runtime_resume(&pdev->dev); 9962d8ff0b5SMika Westerberg else 997f67cf491SMika Westerberg tb_domain_complete(tb); 998f67cf491SMika Westerberg } 999f67cf491SMika Westerberg 10002d8ff0b5SMika Westerberg static int nhi_runtime_suspend(struct device *dev) 10012d8ff0b5SMika Westerberg { 10022d8ff0b5SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 10032d8ff0b5SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 10043cdb9446SMika Westerberg struct tb_nhi *nhi = tb->nhi; 10053cdb9446SMika Westerberg int ret; 10062d8ff0b5SMika Westerberg 10073cdb9446SMika Westerberg ret = tb_domain_runtime_suspend(tb); 10083cdb9446SMika Westerberg if (ret) 10093cdb9446SMika Westerberg return ret; 10103cdb9446SMika Westerberg 10113cdb9446SMika Westerberg if (nhi->ops && nhi->ops->runtime_suspend) { 10123cdb9446SMika Westerberg ret = nhi->ops->runtime_suspend(tb->nhi); 10133cdb9446SMika Westerberg if (ret) 10143cdb9446SMika Westerberg return ret; 10153cdb9446SMika Westerberg } 10163cdb9446SMika Westerberg return 0; 10172d8ff0b5SMika Westerberg } 10182d8ff0b5SMika Westerberg 10192d8ff0b5SMika Westerberg static int nhi_runtime_resume(struct device *dev) 10202d8ff0b5SMika Westerberg { 10212d8ff0b5SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 10222d8ff0b5SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 10233cdb9446SMika Westerberg struct tb_nhi *nhi = tb->nhi; 10243cdb9446SMika Westerberg int ret; 10252d8ff0b5SMika Westerberg 10263cdb9446SMika Westerberg if (nhi->ops && nhi->ops->runtime_resume) { 10273cdb9446SMika Westerberg ret = nhi->ops->runtime_resume(nhi); 10283cdb9446SMika Westerberg if (ret) 10293cdb9446SMika Westerberg return ret; 10303cdb9446SMika Westerberg } 10313cdb9446SMika Westerberg 10323cdb9446SMika Westerberg nhi_enable_int_throttling(nhi); 10332d8ff0b5SMika Westerberg return tb_domain_runtime_resume(tb); 10342d8ff0b5SMika Westerberg } 10352d8ff0b5SMika Westerberg 103616603153SAndreas Noever static void nhi_shutdown(struct tb_nhi *nhi) 103716603153SAndreas Noever { 103816603153SAndreas Noever int i; 1039daa5140fSMika Westerberg 1040daa5140fSMika Westerberg dev_dbg(&nhi->pdev->dev, "shutdown\n"); 104116603153SAndreas Noever 104216603153SAndreas Noever for (i = 0; i < nhi->hop_count; i++) { 104316603153SAndreas Noever if (nhi->tx_rings[i]) 104416603153SAndreas Noever dev_WARN(&nhi->pdev->dev, 104516603153SAndreas Noever "TX ring %d is still active\n", i); 104616603153SAndreas Noever if (nhi->rx_rings[i]) 104716603153SAndreas Noever dev_WARN(&nhi->pdev->dev, 104816603153SAndreas Noever "RX ring %d is still active\n", i); 104916603153SAndreas Noever } 105016603153SAndreas Noever nhi_disable_interrupts(nhi); 105116603153SAndreas Noever /* 105216603153SAndreas Noever * We have to release the irq before calling flush_work. Otherwise an 105316603153SAndreas Noever * already executing IRQ handler could call schedule_work again. 105416603153SAndreas Noever */ 1055046bee1fSMika Westerberg if (!nhi->pdev->msix_enabled) { 105616603153SAndreas Noever devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); 105716603153SAndreas Noever flush_work(&nhi->interrupt_work); 1058046bee1fSMika Westerberg } 1059046bee1fSMika Westerberg ida_destroy(&nhi->msix_ida); 10603cdb9446SMika Westerberg 10613cdb9446SMika Westerberg if (nhi->ops && nhi->ops->shutdown) 10623cdb9446SMika Westerberg nhi->ops->shutdown(nhi); 1063046bee1fSMika Westerberg } 1064046bee1fSMika Westerberg 1065046bee1fSMika Westerberg static int nhi_init_msi(struct tb_nhi *nhi) 1066046bee1fSMika Westerberg { 1067046bee1fSMika Westerberg struct pci_dev *pdev = nhi->pdev; 1068046bee1fSMika Westerberg int res, irq, nvec; 1069046bee1fSMika Westerberg 1070046bee1fSMika Westerberg /* In case someone left them on. */ 1071046bee1fSMika Westerberg nhi_disable_interrupts(nhi); 1072046bee1fSMika Westerberg 10738c6bba10SMika Westerberg nhi_enable_int_throttling(nhi); 10748c6bba10SMika Westerberg 1075046bee1fSMika Westerberg ida_init(&nhi->msix_ida); 1076046bee1fSMika Westerberg 1077046bee1fSMika Westerberg /* 1078046bee1fSMika Westerberg * The NHI has 16 MSI-X vectors or a single MSI. We first try to 1079046bee1fSMika Westerberg * get all MSI-X vectors and if we succeed, each ring will have 1080046bee1fSMika Westerberg * one MSI-X. If for some reason that does not work out, we 1081046bee1fSMika Westerberg * fallback to a single MSI. 1082046bee1fSMika Westerberg */ 1083046bee1fSMika Westerberg nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS, 1084046bee1fSMika Westerberg PCI_IRQ_MSIX); 1085046bee1fSMika Westerberg if (nvec < 0) { 1086046bee1fSMika Westerberg nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 1087046bee1fSMika Westerberg if (nvec < 0) 1088046bee1fSMika Westerberg return nvec; 1089046bee1fSMika Westerberg 1090046bee1fSMika Westerberg INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); 1091046bee1fSMika Westerberg 1092046bee1fSMika Westerberg irq = pci_irq_vector(nhi->pdev, 0); 1093046bee1fSMika Westerberg if (irq < 0) 1094046bee1fSMika Westerberg return irq; 1095046bee1fSMika Westerberg 1096046bee1fSMika Westerberg res = devm_request_irq(&pdev->dev, irq, nhi_msi, 1097046bee1fSMika Westerberg IRQF_NO_SUSPEND, "thunderbolt", nhi); 1098046bee1fSMika Westerberg if (res) { 1099046bee1fSMika Westerberg dev_err(&pdev->dev, "request_irq failed, aborting\n"); 1100046bee1fSMika Westerberg return res; 1101046bee1fSMika Westerberg } 1102046bee1fSMika Westerberg } 1103046bee1fSMika Westerberg 1104046bee1fSMika Westerberg return 0; 110516603153SAndreas Noever } 110616603153SAndreas Noever 11073cdb9446SMika Westerberg static bool nhi_imr_valid(struct pci_dev *pdev) 11083cdb9446SMika Westerberg { 11093cdb9446SMika Westerberg u8 val; 11103cdb9446SMika Westerberg 11113cdb9446SMika Westerberg if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val)) 11123cdb9446SMika Westerberg return !!val; 11133cdb9446SMika Westerberg 11143cdb9446SMika Westerberg return true; 11153cdb9446SMika Westerberg } 11163cdb9446SMika Westerberg 11172b9941e0SMika Westerberg /* 11182b9941e0SMika Westerberg * During suspend the Thunderbolt controller is reset and all PCIe 11192b9941e0SMika Westerberg * tunnels are lost. The NHI driver will try to reestablish all tunnels 11202b9941e0SMika Westerberg * during resume. This adds device links between the tunneled PCIe 11212b9941e0SMika Westerberg * downstream ports and the NHI so that the device core will make sure 11222b9941e0SMika Westerberg * NHI is resumed first before the rest. 11232b9941e0SMika Westerberg */ 11242b9941e0SMika Westerberg static void tb_apple_add_links(struct tb_nhi *nhi) 11252b9941e0SMika Westerberg { 11262b9941e0SMika Westerberg struct pci_dev *upstream, *pdev; 11272b9941e0SMika Westerberg 11282b9941e0SMika Westerberg if (!x86_apple_machine) 11292b9941e0SMika Westerberg return; 11302b9941e0SMika Westerberg 11312b9941e0SMika Westerberg switch (nhi->pdev->device) { 11322b9941e0SMika Westerberg case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 11332b9941e0SMika Westerberg case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 11342b9941e0SMika Westerberg case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: 11352b9941e0SMika Westerberg case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: 11362b9941e0SMika Westerberg break; 11372b9941e0SMika Westerberg default: 11382b9941e0SMika Westerberg return; 11392b9941e0SMika Westerberg } 11402b9941e0SMika Westerberg 11412b9941e0SMika Westerberg upstream = pci_upstream_bridge(nhi->pdev); 11422b9941e0SMika Westerberg while (upstream) { 11432b9941e0SMika Westerberg if (!pci_is_pcie(upstream)) 11442b9941e0SMika Westerberg return; 11452b9941e0SMika Westerberg if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM) 11462b9941e0SMika Westerberg break; 11472b9941e0SMika Westerberg upstream = pci_upstream_bridge(upstream); 11482b9941e0SMika Westerberg } 11492b9941e0SMika Westerberg 11502b9941e0SMika Westerberg if (!upstream) 11512b9941e0SMika Westerberg return; 11522b9941e0SMika Westerberg 11532b9941e0SMika Westerberg /* 11542b9941e0SMika Westerberg * For each hotplug downstream port, create add device link 11552b9941e0SMika Westerberg * back to NHI so that PCIe tunnels can be re-established after 11562b9941e0SMika Westerberg * sleep. 11572b9941e0SMika Westerberg */ 11582b9941e0SMika Westerberg for_each_pci_bridge(pdev, upstream->subordinate) { 11592b9941e0SMika Westerberg const struct device_link *link; 11602b9941e0SMika Westerberg 11612b9941e0SMika Westerberg if (!pci_is_pcie(pdev)) 11622b9941e0SMika Westerberg continue; 11632b9941e0SMika Westerberg if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM || 11642b9941e0SMika Westerberg !pdev->is_hotplug_bridge) 11652b9941e0SMika Westerberg continue; 11662b9941e0SMika Westerberg 11672b9941e0SMika Westerberg link = device_link_add(&pdev->dev, &nhi->pdev->dev, 11682b9941e0SMika Westerberg DL_FLAG_AUTOREMOVE_SUPPLIER | 11692b9941e0SMika Westerberg DL_FLAG_PM_RUNTIME); 11702b9941e0SMika Westerberg if (link) { 11712b9941e0SMika Westerberg dev_dbg(&nhi->pdev->dev, "created link from %s\n", 11722b9941e0SMika Westerberg dev_name(&pdev->dev)); 11732b9941e0SMika Westerberg } else { 11742b9941e0SMika Westerberg dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", 11752b9941e0SMika Westerberg dev_name(&pdev->dev)); 11762b9941e0SMika Westerberg } 11772b9941e0SMika Westerberg } 11782b9941e0SMika Westerberg } 11792b9941e0SMika Westerberg 118016603153SAndreas Noever static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) 118116603153SAndreas Noever { 118216603153SAndreas Noever struct tb_nhi *nhi; 1183d6cc51cdSAndreas Noever struct tb *tb; 118416603153SAndreas Noever int res; 118516603153SAndreas Noever 11863cdb9446SMika Westerberg if (!nhi_imr_valid(pdev)) { 11873cdb9446SMika Westerberg dev_warn(&pdev->dev, "firmware image not valid, aborting\n"); 11883cdb9446SMika Westerberg return -ENODEV; 11893cdb9446SMika Westerberg } 11903cdb9446SMika Westerberg 119116603153SAndreas Noever res = pcim_enable_device(pdev); 119216603153SAndreas Noever if (res) { 119316603153SAndreas Noever dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); 119416603153SAndreas Noever return res; 119516603153SAndreas Noever } 119616603153SAndreas Noever 119716603153SAndreas Noever res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt"); 119816603153SAndreas Noever if (res) { 119916603153SAndreas Noever dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n"); 120016603153SAndreas Noever return res; 120116603153SAndreas Noever } 120216603153SAndreas Noever 120316603153SAndreas Noever nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); 120416603153SAndreas Noever if (!nhi) 120516603153SAndreas Noever return -ENOMEM; 120616603153SAndreas Noever 120716603153SAndreas Noever nhi->pdev = pdev; 12083cdb9446SMika Westerberg nhi->ops = (const struct tb_nhi_ops *)id->driver_data; 120916603153SAndreas Noever /* cannot fail - table is allocated bin pcim_iomap_regions */ 121016603153SAndreas Noever nhi->iobase = pcim_iomap_table(pdev)[0]; 121116603153SAndreas Noever nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; 1212177aa362SMika Westerberg dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count); 121316603153SAndreas Noever 12142a211f32SHimangi Saraogi nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, 12152a211f32SHimangi Saraogi sizeof(*nhi->tx_rings), GFP_KERNEL); 12162a211f32SHimangi Saraogi nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, 12172a211f32SHimangi Saraogi sizeof(*nhi->rx_rings), GFP_KERNEL); 121816603153SAndreas Noever if (!nhi->tx_rings || !nhi->rx_rings) 121916603153SAndreas Noever return -ENOMEM; 122016603153SAndreas Noever 1221046bee1fSMika Westerberg res = nhi_init_msi(nhi); 122216603153SAndreas Noever if (res) { 1223046bee1fSMika Westerberg dev_err(&pdev->dev, "cannot enable MSI, aborting\n"); 122416603153SAndreas Noever return res; 122516603153SAndreas Noever } 122616603153SAndreas Noever 122759120e06SMika Westerberg spin_lock_init(&nhi->lock); 122816603153SAndreas Noever 1229dba3caf6SMika Westerberg res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1230dba3caf6SMika Westerberg if (res) 1231dba3caf6SMika Westerberg res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1232dba3caf6SMika Westerberg if (res) { 1233dba3caf6SMika Westerberg dev_err(&pdev->dev, "failed to set DMA mask\n"); 1234dba3caf6SMika Westerberg return res; 1235dba3caf6SMika Westerberg } 1236dba3caf6SMika Westerberg 123716603153SAndreas Noever pci_set_master(pdev); 123816603153SAndreas Noever 12393cdb9446SMika Westerberg if (nhi->ops && nhi->ops->init) { 12403cdb9446SMika Westerberg res = nhi->ops->init(nhi); 12413cdb9446SMika Westerberg if (res) 12423cdb9446SMika Westerberg return res; 12433cdb9446SMika Westerberg } 12443cdb9446SMika Westerberg 12452b9941e0SMika Westerberg tb_apple_add_links(nhi); 1246b2be2b05SMika Westerberg tb_acpi_add_links(nhi); 12472b9941e0SMika Westerberg 1248f67cf491SMika Westerberg tb = icm_probe(nhi); 12499d3cce0bSMika Westerberg if (!tb) 1250f67cf491SMika Westerberg tb = tb_probe(nhi); 1251f67cf491SMika Westerberg if (!tb) { 1252f67cf491SMika Westerberg dev_err(&nhi->pdev->dev, 1253f67cf491SMika Westerberg "failed to determine connection manager, aborting\n"); 12549d3cce0bSMika Westerberg return -ENODEV; 1255f67cf491SMika Westerberg } 1256f67cf491SMika Westerberg 1257daa5140fSMika Westerberg dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); 12589d3cce0bSMika Westerberg 12599d3cce0bSMika Westerberg res = tb_domain_add(tb); 12609d3cce0bSMika Westerberg if (res) { 1261d6cc51cdSAndreas Noever /* 1262d6cc51cdSAndreas Noever * At this point the RX/TX rings might already have been 1263d6cc51cdSAndreas Noever * activated. Do a proper shutdown. 1264d6cc51cdSAndreas Noever */ 12659d3cce0bSMika Westerberg tb_domain_put(tb); 1266d6cc51cdSAndreas Noever nhi_shutdown(nhi); 126768a7a2acSMika Westerberg return res; 1268d6cc51cdSAndreas Noever } 1269d6cc51cdSAndreas Noever pci_set_drvdata(pdev, tb); 127016603153SAndreas Noever 1271b2911a59SMika Westerberg device_wakeup_enable(&pdev->dev); 1272b2911a59SMika Westerberg 12732d8ff0b5SMika Westerberg pm_runtime_allow(&pdev->dev); 12742d8ff0b5SMika Westerberg pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY); 12752d8ff0b5SMika Westerberg pm_runtime_use_autosuspend(&pdev->dev); 12762d8ff0b5SMika Westerberg pm_runtime_put_autosuspend(&pdev->dev); 12772d8ff0b5SMika Westerberg 127816603153SAndreas Noever return 0; 127916603153SAndreas Noever } 128016603153SAndreas Noever 128116603153SAndreas Noever static void nhi_remove(struct pci_dev *pdev) 128216603153SAndreas Noever { 1283d6cc51cdSAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 1284d6cc51cdSAndreas Noever struct tb_nhi *nhi = tb->nhi; 12859d3cce0bSMika Westerberg 12862d8ff0b5SMika Westerberg pm_runtime_get_sync(&pdev->dev); 12872d8ff0b5SMika Westerberg pm_runtime_dont_use_autosuspend(&pdev->dev); 12882d8ff0b5SMika Westerberg pm_runtime_forbid(&pdev->dev); 12892d8ff0b5SMika Westerberg 12909d3cce0bSMika Westerberg tb_domain_remove(tb); 129116603153SAndreas Noever nhi_shutdown(nhi); 129216603153SAndreas Noever } 129316603153SAndreas Noever 129423dd5bb4SAndreas Noever /* 129523dd5bb4SAndreas Noever * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable 129623dd5bb4SAndreas Noever * the tunnels asap. A corresponding pci quirk blocks the downstream bridges 129723dd5bb4SAndreas Noever * resume_noirq until we are done. 129823dd5bb4SAndreas Noever */ 129923dd5bb4SAndreas Noever static const struct dev_pm_ops nhi_pm_ops = { 130023dd5bb4SAndreas Noever .suspend_noirq = nhi_suspend_noirq, 130123dd5bb4SAndreas Noever .resume_noirq = nhi_resume_noirq, 1302884e4d57SMika Westerberg .freeze_noirq = nhi_freeze_noirq, /* 130323dd5bb4SAndreas Noever * we just disable hotplug, the 130423dd5bb4SAndreas Noever * pci-tunnels stay alive. 130523dd5bb4SAndreas Noever */ 1306884e4d57SMika Westerberg .thaw_noirq = nhi_thaw_noirq, 130723dd5bb4SAndreas Noever .restore_noirq = nhi_resume_noirq, 1308f67cf491SMika Westerberg .suspend = nhi_suspend, 13093cdb9446SMika Westerberg .poweroff_noirq = nhi_poweroff_noirq, 1310f67cf491SMika Westerberg .poweroff = nhi_suspend, 1311f67cf491SMika Westerberg .complete = nhi_complete, 13122d8ff0b5SMika Westerberg .runtime_suspend = nhi_runtime_suspend, 13132d8ff0b5SMika Westerberg .runtime_resume = nhi_runtime_resume, 131423dd5bb4SAndreas Noever }; 131523dd5bb4SAndreas Noever 1316620863f7SSachin Kamat static struct pci_device_id nhi_ids[] = { 131716603153SAndreas Noever /* 131816603153SAndreas Noever * We have to specify class, the TB bridges use the same device and 13191d111406SLukas Wunner * vendor (sub)id on gen 1 and gen 2 controllers. 132016603153SAndreas Noever */ 132116603153SAndreas Noever { 132216603153SAndreas Noever .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 13231d111406SLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 132419bf4d4fSLukas Wunner .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, 132519bf4d4fSLukas Wunner .subvendor = 0x2222, .subdevice = 0x1111, 132619bf4d4fSLukas Wunner }, 132719bf4d4fSLukas Wunner { 132819bf4d4fSLukas Wunner .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 132919bf4d4fSLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 13301d111406SLukas Wunner .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, 133116603153SAndreas Noever .subvendor = 0x2222, .subdevice = 0x1111, 133216603153SAndreas Noever }, 133316603153SAndreas Noever { 133416603153SAndreas Noever .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 13351d111406SLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 133682a6a81cSXavier Gnata .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI, 133782a6a81cSXavier Gnata .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 133882a6a81cSXavier Gnata }, 133982a6a81cSXavier Gnata { 134082a6a81cSXavier Gnata .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 134182a6a81cSXavier Gnata .vendor = PCI_VENDOR_ID_INTEL, 13421d111406SLukas Wunner .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, 1343a42fb351SKnuth Posern .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 134416603153SAndreas Noever }, 13455e2781bcSMika Westerberg 13465e2781bcSMika Westerberg /* Thunderbolt 3 */ 13475e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) }, 13485e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) }, 13495e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) }, 13505e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) }, 13515e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) }, 13525e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) }, 13535e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) }, 13545e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, 13554bac471dSRadion Mirchevsky { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) }, 13564bac471dSRadion Mirchevsky { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) }, 13573cdb9446SMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0), 13583cdb9446SMika Westerberg .driver_data = (kernel_ulong_t)&icl_nhi_ops }, 13593cdb9446SMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1), 13603cdb9446SMika Westerberg .driver_data = (kernel_ulong_t)&icl_nhi_ops }, 136157d8df68SMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0), 136257d8df68SMika Westerberg .driver_data = (kernel_ulong_t)&icl_nhi_ops }, 136357d8df68SMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1), 136457d8df68SMika Westerberg .driver_data = (kernel_ulong_t)&icl_nhi_ops }, 13655e2781bcSMika Westerberg 1366b0407983SMika Westerberg /* Any USB4 compliant host */ 1367b0407983SMika Westerberg { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) }, 1368b0407983SMika Westerberg 136916603153SAndreas Noever { 0,} 137016603153SAndreas Noever }; 137116603153SAndreas Noever 137216603153SAndreas Noever MODULE_DEVICE_TABLE(pci, nhi_ids); 137316603153SAndreas Noever MODULE_LICENSE("GPL"); 137416603153SAndreas Noever 137516603153SAndreas Noever static struct pci_driver nhi_driver = { 137616603153SAndreas Noever .name = "thunderbolt", 137716603153SAndreas Noever .id_table = nhi_ids, 137816603153SAndreas Noever .probe = nhi_probe, 137916603153SAndreas Noever .remove = nhi_remove, 13804caf2511SMaxim Levitsky .shutdown = nhi_remove, 138123dd5bb4SAndreas Noever .driver.pm = &nhi_pm_ops, 138216603153SAndreas Noever }; 138316603153SAndreas Noever 138416603153SAndreas Noever static int __init nhi_init(void) 138516603153SAndreas Noever { 13869d3cce0bSMika Westerberg int ret; 13879d3cce0bSMika Westerberg 13889d3cce0bSMika Westerberg ret = tb_domain_init(); 13899d3cce0bSMika Westerberg if (ret) 13909d3cce0bSMika Westerberg return ret; 13919d3cce0bSMika Westerberg ret = pci_register_driver(&nhi_driver); 13929d3cce0bSMika Westerberg if (ret) 13939d3cce0bSMika Westerberg tb_domain_exit(); 13949d3cce0bSMika Westerberg return ret; 139516603153SAndreas Noever } 139616603153SAndreas Noever 139716603153SAndreas Noever static void __exit nhi_unload(void) 139816603153SAndreas Noever { 139916603153SAndreas Noever pci_unregister_driver(&nhi_driver); 14009d3cce0bSMika Westerberg tb_domain_exit(); 140116603153SAndreas Noever } 140216603153SAndreas Noever 1403eafa717bSMika Westerberg rootfs_initcall(nhi_init); 140416603153SAndreas Noever module_exit(nhi_unload); 1405