116603153SAndreas Noever /* 216603153SAndreas Noever * Thunderbolt Cactus Ridge driver - NHI driver 316603153SAndreas Noever * 416603153SAndreas Noever * The NHI (native host interface) is the pci device that allows us to send and 516603153SAndreas Noever * receive frames from the thunderbolt bus. 616603153SAndreas Noever * 716603153SAndreas Noever * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 816603153SAndreas Noever */ 916603153SAndreas Noever 1023dd5bb4SAndreas Noever #include <linux/pm_runtime.h> 1116603153SAndreas Noever #include <linux/slab.h> 1216603153SAndreas Noever #include <linux/errno.h> 1316603153SAndreas Noever #include <linux/pci.h> 1416603153SAndreas Noever #include <linux/interrupt.h> 1516603153SAndreas Noever #include <linux/module.h> 16cd446ee2SMika Westerberg #include <linux/delay.h> 1716603153SAndreas Noever 1816603153SAndreas Noever #include "nhi.h" 1916603153SAndreas Noever #include "nhi_regs.h" 20d6cc51cdSAndreas Noever #include "tb.h" 2116603153SAndreas Noever 2216603153SAndreas Noever #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") 2316603153SAndreas Noever 24046bee1fSMika Westerberg /* 259fb1e654SMika Westerberg * Used to enable end-to-end workaround for missing RX packets. Do not 269fb1e654SMika Westerberg * use this ring for anything else. 279fb1e654SMika Westerberg */ 289fb1e654SMika Westerberg #define RING_E2E_UNUSED_HOPID 2 299fb1e654SMika Westerberg 309fb1e654SMika Westerberg /* 31046bee1fSMika Westerberg * Minimal number of vectors when we use MSI-X. Two for control channel 32046bee1fSMika Westerberg * Rx/Tx and the rest four are for cross domain DMA paths. 33046bee1fSMika Westerberg */ 34046bee1fSMika Westerberg #define MSIX_MIN_VECS 6 35046bee1fSMika Westerberg #define MSIX_MAX_VECS 16 3616603153SAndreas Noever 37cd446ee2SMika Westerberg #define NHI_MAILBOX_TIMEOUT 500 /* ms */ 38cd446ee2SMika Westerberg 3916603153SAndreas Noever static int ring_interrupt_index(struct tb_ring *ring) 4016603153SAndreas Noever { 4116603153SAndreas Noever int bit = ring->hop; 4216603153SAndreas Noever if (!ring->is_tx) 4316603153SAndreas Noever bit += ring->nhi->hop_count; 4416603153SAndreas Noever return bit; 4516603153SAndreas Noever } 4616603153SAndreas Noever 4716603153SAndreas Noever /** 4816603153SAndreas Noever * ring_interrupt_active() - activate/deactivate interrupts for a single ring 4916603153SAndreas Noever * 5016603153SAndreas Noever * ring->nhi->lock must be held. 5116603153SAndreas Noever */ 5216603153SAndreas Noever static void ring_interrupt_active(struct tb_ring *ring, bool active) 5316603153SAndreas Noever { 5419bf4d4fSLukas Wunner int reg = REG_RING_INTERRUPT_BASE + 5519bf4d4fSLukas Wunner ring_interrupt_index(ring) / 32 * 4; 5616603153SAndreas Noever int bit = ring_interrupt_index(ring) & 31; 5716603153SAndreas Noever int mask = 1 << bit; 5816603153SAndreas Noever u32 old, new; 59046bee1fSMika Westerberg 60046bee1fSMika Westerberg if (ring->irq > 0) { 61046bee1fSMika Westerberg u32 step, shift, ivr, misc; 62046bee1fSMika Westerberg void __iomem *ivr_base; 63046bee1fSMika Westerberg int index; 64046bee1fSMika Westerberg 65046bee1fSMika Westerberg if (ring->is_tx) 66046bee1fSMika Westerberg index = ring->hop; 67046bee1fSMika Westerberg else 68046bee1fSMika Westerberg index = ring->hop + ring->nhi->hop_count; 69046bee1fSMika Westerberg 70046bee1fSMika Westerberg /* 71046bee1fSMika Westerberg * Ask the hardware to clear interrupt status bits automatically 72046bee1fSMika Westerberg * since we already know which interrupt was triggered. 73046bee1fSMika Westerberg */ 74046bee1fSMika Westerberg misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); 75046bee1fSMika Westerberg if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) { 76046bee1fSMika Westerberg misc |= REG_DMA_MISC_INT_AUTO_CLEAR; 77046bee1fSMika Westerberg iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); 78046bee1fSMika Westerberg } 79046bee1fSMika Westerberg 80046bee1fSMika Westerberg ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; 81046bee1fSMika Westerberg step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; 82046bee1fSMika Westerberg shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; 83046bee1fSMika Westerberg ivr = ioread32(ivr_base + step); 84046bee1fSMika Westerberg ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift); 85046bee1fSMika Westerberg if (active) 86046bee1fSMika Westerberg ivr |= ring->vector << shift; 87046bee1fSMika Westerberg iowrite32(ivr, ivr_base + step); 88046bee1fSMika Westerberg } 89046bee1fSMika Westerberg 9016603153SAndreas Noever old = ioread32(ring->nhi->iobase + reg); 9116603153SAndreas Noever if (active) 9216603153SAndreas Noever new = old | mask; 9316603153SAndreas Noever else 9416603153SAndreas Noever new = old & ~mask; 9516603153SAndreas Noever 9616603153SAndreas Noever dev_info(&ring->nhi->pdev->dev, 9716603153SAndreas Noever "%s interrupt at register %#x bit %d (%#x -> %#x)\n", 9816603153SAndreas Noever active ? "enabling" : "disabling", reg, bit, old, new); 9916603153SAndreas Noever 10016603153SAndreas Noever if (new == old) 10116603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, 10216603153SAndreas Noever "interrupt for %s %d is already %s\n", 10316603153SAndreas Noever RING_TYPE(ring), ring->hop, 10416603153SAndreas Noever active ? "enabled" : "disabled"); 10516603153SAndreas Noever iowrite32(new, ring->nhi->iobase + reg); 10616603153SAndreas Noever } 10716603153SAndreas Noever 10816603153SAndreas Noever /** 10916603153SAndreas Noever * nhi_disable_interrupts() - disable interrupts for all rings 11016603153SAndreas Noever * 11116603153SAndreas Noever * Use only during init and shutdown. 11216603153SAndreas Noever */ 11316603153SAndreas Noever static void nhi_disable_interrupts(struct tb_nhi *nhi) 11416603153SAndreas Noever { 11516603153SAndreas Noever int i = 0; 11616603153SAndreas Noever /* disable interrupts */ 11716603153SAndreas Noever for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) 11816603153SAndreas Noever iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); 11916603153SAndreas Noever 12016603153SAndreas Noever /* clear interrupt status bits */ 12116603153SAndreas Noever for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) 12216603153SAndreas Noever ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); 12316603153SAndreas Noever } 12416603153SAndreas Noever 12516603153SAndreas Noever /* ring helper methods */ 12616603153SAndreas Noever 12716603153SAndreas Noever static void __iomem *ring_desc_base(struct tb_ring *ring) 12816603153SAndreas Noever { 12916603153SAndreas Noever void __iomem *io = ring->nhi->iobase; 13016603153SAndreas Noever io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; 13116603153SAndreas Noever io += ring->hop * 16; 13216603153SAndreas Noever return io; 13316603153SAndreas Noever } 13416603153SAndreas Noever 13516603153SAndreas Noever static void __iomem *ring_options_base(struct tb_ring *ring) 13616603153SAndreas Noever { 13716603153SAndreas Noever void __iomem *io = ring->nhi->iobase; 13816603153SAndreas Noever io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; 13916603153SAndreas Noever io += ring->hop * 32; 14016603153SAndreas Noever return io; 14116603153SAndreas Noever } 14216603153SAndreas Noever 14316603153SAndreas Noever static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) 14416603153SAndreas Noever { 14516603153SAndreas Noever iowrite16(value, ring_desc_base(ring) + offset); 14616603153SAndreas Noever } 14716603153SAndreas Noever 14816603153SAndreas Noever static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) 14916603153SAndreas Noever { 15016603153SAndreas Noever iowrite32(value, ring_desc_base(ring) + offset); 15116603153SAndreas Noever } 15216603153SAndreas Noever 15316603153SAndreas Noever static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) 15416603153SAndreas Noever { 15516603153SAndreas Noever iowrite32(value, ring_desc_base(ring) + offset); 15616603153SAndreas Noever iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); 15716603153SAndreas Noever } 15816603153SAndreas Noever 15916603153SAndreas Noever static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) 16016603153SAndreas Noever { 16116603153SAndreas Noever iowrite32(value, ring_options_base(ring) + offset); 16216603153SAndreas Noever } 16316603153SAndreas Noever 16416603153SAndreas Noever static bool ring_full(struct tb_ring *ring) 16516603153SAndreas Noever { 16616603153SAndreas Noever return ((ring->head + 1) % ring->size) == ring->tail; 16716603153SAndreas Noever } 16816603153SAndreas Noever 16916603153SAndreas Noever static bool ring_empty(struct tb_ring *ring) 17016603153SAndreas Noever { 17116603153SAndreas Noever return ring->head == ring->tail; 17216603153SAndreas Noever } 17316603153SAndreas Noever 17416603153SAndreas Noever /** 17516603153SAndreas Noever * ring_write_descriptors() - post frames from ring->queue to the controller 17616603153SAndreas Noever * 17716603153SAndreas Noever * ring->lock is held. 17816603153SAndreas Noever */ 17916603153SAndreas Noever static void ring_write_descriptors(struct tb_ring *ring) 18016603153SAndreas Noever { 18116603153SAndreas Noever struct ring_frame *frame, *n; 18216603153SAndreas Noever struct ring_desc *descriptor; 18316603153SAndreas Noever list_for_each_entry_safe(frame, n, &ring->queue, list) { 18416603153SAndreas Noever if (ring_full(ring)) 18516603153SAndreas Noever break; 18616603153SAndreas Noever list_move_tail(&frame->list, &ring->in_flight); 18716603153SAndreas Noever descriptor = &ring->descriptors[ring->head]; 18816603153SAndreas Noever descriptor->phys = frame->buffer_phy; 18916603153SAndreas Noever descriptor->time = 0; 19016603153SAndreas Noever descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT; 19116603153SAndreas Noever if (ring->is_tx) { 19216603153SAndreas Noever descriptor->length = frame->size; 19316603153SAndreas Noever descriptor->eof = frame->eof; 19416603153SAndreas Noever descriptor->sof = frame->sof; 19516603153SAndreas Noever } 19616603153SAndreas Noever ring->head = (ring->head + 1) % ring->size; 19716603153SAndreas Noever ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); 19816603153SAndreas Noever } 19916603153SAndreas Noever } 20016603153SAndreas Noever 20116603153SAndreas Noever /** 20216603153SAndreas Noever * ring_work() - progress completed frames 20316603153SAndreas Noever * 20416603153SAndreas Noever * If the ring is shutting down then all frames are marked as canceled and 20516603153SAndreas Noever * their callbacks are invoked. 20616603153SAndreas Noever * 20716603153SAndreas Noever * Otherwise we collect all completed frame from the ring buffer, write new 20816603153SAndreas Noever * frame to the ring buffer and invoke the callbacks for the completed frames. 20916603153SAndreas Noever */ 21016603153SAndreas Noever static void ring_work(struct work_struct *work) 21116603153SAndreas Noever { 21216603153SAndreas Noever struct tb_ring *ring = container_of(work, typeof(*ring), work); 21316603153SAndreas Noever struct ring_frame *frame; 21416603153SAndreas Noever bool canceled = false; 21522b7de10SMika Westerberg unsigned long flags; 21616603153SAndreas Noever LIST_HEAD(done); 21722b7de10SMika Westerberg 21822b7de10SMika Westerberg spin_lock_irqsave(&ring->lock, flags); 21916603153SAndreas Noever 22016603153SAndreas Noever if (!ring->running) { 22116603153SAndreas Noever /* Move all frames to done and mark them as canceled. */ 22216603153SAndreas Noever list_splice_tail_init(&ring->in_flight, &done); 22316603153SAndreas Noever list_splice_tail_init(&ring->queue, &done); 22416603153SAndreas Noever canceled = true; 22516603153SAndreas Noever goto invoke_callback; 22616603153SAndreas Noever } 22716603153SAndreas Noever 22816603153SAndreas Noever while (!ring_empty(ring)) { 22916603153SAndreas Noever if (!(ring->descriptors[ring->tail].flags 23016603153SAndreas Noever & RING_DESC_COMPLETED)) 23116603153SAndreas Noever break; 23216603153SAndreas Noever frame = list_first_entry(&ring->in_flight, typeof(*frame), 23316603153SAndreas Noever list); 23416603153SAndreas Noever list_move_tail(&frame->list, &done); 23516603153SAndreas Noever if (!ring->is_tx) { 23616603153SAndreas Noever frame->size = ring->descriptors[ring->tail].length; 23716603153SAndreas Noever frame->eof = ring->descriptors[ring->tail].eof; 23816603153SAndreas Noever frame->sof = ring->descriptors[ring->tail].sof; 23916603153SAndreas Noever frame->flags = ring->descriptors[ring->tail].flags; 24016603153SAndreas Noever } 24116603153SAndreas Noever ring->tail = (ring->tail + 1) % ring->size; 24216603153SAndreas Noever } 24316603153SAndreas Noever ring_write_descriptors(ring); 24416603153SAndreas Noever 24516603153SAndreas Noever invoke_callback: 24622b7de10SMika Westerberg /* allow callbacks to schedule new work */ 24722b7de10SMika Westerberg spin_unlock_irqrestore(&ring->lock, flags); 24816603153SAndreas Noever while (!list_empty(&done)) { 24916603153SAndreas Noever frame = list_first_entry(&done, typeof(*frame), list); 25016603153SAndreas Noever /* 25116603153SAndreas Noever * The callback may reenqueue or delete frame. 25216603153SAndreas Noever * Do not hold on to it. 25316603153SAndreas Noever */ 25416603153SAndreas Noever list_del_init(&frame->list); 25516603153SAndreas Noever frame->callback(ring, frame, canceled); 25616603153SAndreas Noever } 25716603153SAndreas Noever } 25816603153SAndreas Noever 2593b3d9f4dSMika Westerberg int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) 26016603153SAndreas Noever { 26122b7de10SMika Westerberg unsigned long flags; 26216603153SAndreas Noever int ret = 0; 26322b7de10SMika Westerberg 26422b7de10SMika Westerberg spin_lock_irqsave(&ring->lock, flags); 26516603153SAndreas Noever if (ring->running) { 26616603153SAndreas Noever list_add_tail(&frame->list, &ring->queue); 26716603153SAndreas Noever ring_write_descriptors(ring); 26816603153SAndreas Noever } else { 26916603153SAndreas Noever ret = -ESHUTDOWN; 27016603153SAndreas Noever } 27122b7de10SMika Westerberg spin_unlock_irqrestore(&ring->lock, flags); 27216603153SAndreas Noever return ret; 27316603153SAndreas Noever } 2743b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(__tb_ring_enqueue); 27516603153SAndreas Noever 276046bee1fSMika Westerberg static irqreturn_t ring_msix(int irq, void *data) 277046bee1fSMika Westerberg { 278046bee1fSMika Westerberg struct tb_ring *ring = data; 279046bee1fSMika Westerberg 280046bee1fSMika Westerberg schedule_work(&ring->work); 281046bee1fSMika Westerberg return IRQ_HANDLED; 282046bee1fSMika Westerberg } 283046bee1fSMika Westerberg 284046bee1fSMika Westerberg static int ring_request_msix(struct tb_ring *ring, bool no_suspend) 285046bee1fSMika Westerberg { 286046bee1fSMika Westerberg struct tb_nhi *nhi = ring->nhi; 287046bee1fSMika Westerberg unsigned long irqflags; 288046bee1fSMika Westerberg int ret; 289046bee1fSMika Westerberg 290046bee1fSMika Westerberg if (!nhi->pdev->msix_enabled) 291046bee1fSMika Westerberg return 0; 292046bee1fSMika Westerberg 293046bee1fSMika Westerberg ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL); 294046bee1fSMika Westerberg if (ret < 0) 295046bee1fSMika Westerberg return ret; 296046bee1fSMika Westerberg 297046bee1fSMika Westerberg ring->vector = ret; 298046bee1fSMika Westerberg 299046bee1fSMika Westerberg ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector); 300046bee1fSMika Westerberg if (ring->irq < 0) 301046bee1fSMika Westerberg return ring->irq; 302046bee1fSMika Westerberg 303046bee1fSMika Westerberg irqflags = no_suspend ? IRQF_NO_SUSPEND : 0; 304046bee1fSMika Westerberg return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); 305046bee1fSMika Westerberg } 306046bee1fSMika Westerberg 307046bee1fSMika Westerberg static void ring_release_msix(struct tb_ring *ring) 308046bee1fSMika Westerberg { 309046bee1fSMika Westerberg if (ring->irq <= 0) 310046bee1fSMika Westerberg return; 311046bee1fSMika Westerberg 312046bee1fSMika Westerberg free_irq(ring->irq, ring); 313046bee1fSMika Westerberg ida_simple_remove(&ring->nhi->msix_ida, ring->vector); 314046bee1fSMika Westerberg ring->vector = 0; 315046bee1fSMika Westerberg ring->irq = 0; 316046bee1fSMika Westerberg } 317046bee1fSMika Westerberg 3183b3d9f4dSMika Westerberg static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, 3199fb1e654SMika Westerberg bool transmit, unsigned int flags, 3209fb1e654SMika Westerberg u16 sof_mask, u16 eof_mask) 32116603153SAndreas Noever { 32216603153SAndreas Noever struct tb_ring *ring = NULL; 32316603153SAndreas Noever dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", 32416603153SAndreas Noever transmit ? "TX" : "RX", hop, size); 32516603153SAndreas Noever 3269fb1e654SMika Westerberg /* Tx Ring 2 is reserved for E2E workaround */ 3279fb1e654SMika Westerberg if (transmit && hop == RING_E2E_UNUSED_HOPID) 3289fb1e654SMika Westerberg return NULL; 3299fb1e654SMika Westerberg 33016603153SAndreas Noever ring = kzalloc(sizeof(*ring), GFP_KERNEL); 33116603153SAndreas Noever if (!ring) 332*59120e06SMika Westerberg return NULL; 33316603153SAndreas Noever 33422b7de10SMika Westerberg spin_lock_init(&ring->lock); 33516603153SAndreas Noever INIT_LIST_HEAD(&ring->queue); 33616603153SAndreas Noever INIT_LIST_HEAD(&ring->in_flight); 33716603153SAndreas Noever INIT_WORK(&ring->work, ring_work); 33816603153SAndreas Noever 33916603153SAndreas Noever ring->nhi = nhi; 34016603153SAndreas Noever ring->hop = hop; 34116603153SAndreas Noever ring->is_tx = transmit; 34216603153SAndreas Noever ring->size = size; 343046bee1fSMika Westerberg ring->flags = flags; 3449fb1e654SMika Westerberg ring->sof_mask = sof_mask; 3459fb1e654SMika Westerberg ring->eof_mask = eof_mask; 34616603153SAndreas Noever ring->head = 0; 34716603153SAndreas Noever ring->tail = 0; 34816603153SAndreas Noever ring->running = false; 349046bee1fSMika Westerberg 35016603153SAndreas Noever ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, 35116603153SAndreas Noever size * sizeof(*ring->descriptors), 35216603153SAndreas Noever &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); 35316603153SAndreas Noever if (!ring->descriptors) 354*59120e06SMika Westerberg goto err_free_ring; 35516603153SAndreas Noever 356*59120e06SMika Westerberg if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND)) 357*59120e06SMika Westerberg goto err_free_descs; 358*59120e06SMika Westerberg 359*59120e06SMika Westerberg spin_lock_irq(&nhi->lock); 360*59120e06SMika Westerberg if (hop >= nhi->hop_count) { 361*59120e06SMika Westerberg dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop); 362*59120e06SMika Westerberg goto err_release_msix; 363*59120e06SMika Westerberg } 364*59120e06SMika Westerberg if (transmit && nhi->tx_rings[hop]) { 365*59120e06SMika Westerberg dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop); 366*59120e06SMika Westerberg goto err_release_msix; 367*59120e06SMika Westerberg } else if (!transmit && nhi->rx_rings[hop]) { 368*59120e06SMika Westerberg dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop); 369*59120e06SMika Westerberg goto err_release_msix; 370*59120e06SMika Westerberg } 37116603153SAndreas Noever if (transmit) 37216603153SAndreas Noever nhi->tx_rings[hop] = ring; 37316603153SAndreas Noever else 37416603153SAndreas Noever nhi->rx_rings[hop] = ring; 375*59120e06SMika Westerberg spin_unlock_irq(&nhi->lock); 376*59120e06SMika Westerberg 37716603153SAndreas Noever return ring; 37816603153SAndreas Noever 379*59120e06SMika Westerberg err_release_msix: 380*59120e06SMika Westerberg spin_unlock_irq(&nhi->lock); 381*59120e06SMika Westerberg ring_release_msix(ring); 382*59120e06SMika Westerberg err_free_descs: 383*59120e06SMika Westerberg dma_free_coherent(&ring->nhi->pdev->dev, 384*59120e06SMika Westerberg ring->size * sizeof(*ring->descriptors), 385*59120e06SMika Westerberg ring->descriptors, ring->descriptors_dma); 386*59120e06SMika Westerberg err_free_ring: 38716603153SAndreas Noever kfree(ring); 388*59120e06SMika Westerberg 38916603153SAndreas Noever return NULL; 39016603153SAndreas Noever } 39116603153SAndreas Noever 3923b3d9f4dSMika Westerberg /** 3933b3d9f4dSMika Westerberg * tb_ring_alloc_tx() - Allocate DMA ring for transmit 3943b3d9f4dSMika Westerberg * @nhi: Pointer to the NHI the ring is to be allocated 3953b3d9f4dSMika Westerberg * @hop: HopID (ring) to allocate 3963b3d9f4dSMika Westerberg * @size: Number of entries in the ring 3973b3d9f4dSMika Westerberg * @flags: Flags for the ring 3983b3d9f4dSMika Westerberg */ 3993b3d9f4dSMika Westerberg struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, 400046bee1fSMika Westerberg unsigned int flags) 40116603153SAndreas Noever { 4023b3d9f4dSMika Westerberg return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0); 40316603153SAndreas Noever } 4043b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); 40516603153SAndreas Noever 40616603153SAndreas Noever /** 4073b3d9f4dSMika Westerberg * tb_ring_alloc_rx() - Allocate DMA ring for receive 4083b3d9f4dSMika Westerberg * @nhi: Pointer to the NHI the ring is to be allocated 4093b3d9f4dSMika Westerberg * @hop: HopID (ring) to allocate 4103b3d9f4dSMika Westerberg * @size: Number of entries in the ring 4113b3d9f4dSMika Westerberg * @flags: Flags for the ring 4123b3d9f4dSMika Westerberg * @sof_mask: Mask of PDF values that start a frame 4133b3d9f4dSMika Westerberg * @eof_mask: Mask of PDF values that end a frame 41416603153SAndreas Noever */ 4153b3d9f4dSMika Westerberg struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, 4163b3d9f4dSMika Westerberg unsigned int flags, u16 sof_mask, u16 eof_mask) 4173b3d9f4dSMika Westerberg { 4183b3d9f4dSMika Westerberg return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask); 4193b3d9f4dSMika Westerberg } 4203b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_alloc_rx); 4213b3d9f4dSMika Westerberg 4223b3d9f4dSMika Westerberg /** 4233b3d9f4dSMika Westerberg * tb_ring_start() - enable a ring 4243b3d9f4dSMika Westerberg * 4253b3d9f4dSMika Westerberg * Must not be invoked in parallel with tb_ring_stop(). 4263b3d9f4dSMika Westerberg */ 4273b3d9f4dSMika Westerberg void tb_ring_start(struct tb_ring *ring) 42816603153SAndreas Noever { 4299fb1e654SMika Westerberg u16 frame_size; 4309fb1e654SMika Westerberg u32 flags; 4319fb1e654SMika Westerberg 432*59120e06SMika Westerberg spin_lock_irq(&ring->nhi->lock); 433*59120e06SMika Westerberg spin_lock(&ring->lock); 434bdccf295SMika Westerberg if (ring->nhi->going_away) 435bdccf295SMika Westerberg goto err; 43616603153SAndreas Noever if (ring->running) { 43716603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); 43816603153SAndreas Noever goto err; 43916603153SAndreas Noever } 44016603153SAndreas Noever dev_info(&ring->nhi->pdev->dev, "starting %s %d\n", 44116603153SAndreas Noever RING_TYPE(ring), ring->hop); 44216603153SAndreas Noever 4439fb1e654SMika Westerberg if (ring->flags & RING_FLAG_FRAME) { 4449fb1e654SMika Westerberg /* Means 4096 */ 4459fb1e654SMika Westerberg frame_size = 0; 4469fb1e654SMika Westerberg flags = RING_FLAG_ENABLE; 4479fb1e654SMika Westerberg } else { 4489fb1e654SMika Westerberg frame_size = TB_FRAME_SIZE; 4499fb1e654SMika Westerberg flags = RING_FLAG_ENABLE | RING_FLAG_RAW; 4509fb1e654SMika Westerberg } 4519fb1e654SMika Westerberg 4529fb1e654SMika Westerberg if (ring->flags & RING_FLAG_E2E && !ring->is_tx) { 4539fb1e654SMika Westerberg u32 hop; 4549fb1e654SMika Westerberg 4559fb1e654SMika Westerberg /* 4569fb1e654SMika Westerberg * In order not to lose Rx packets we enable end-to-end 4579fb1e654SMika Westerberg * workaround which transfers Rx credits to an unused Tx 4589fb1e654SMika Westerberg * HopID. 4599fb1e654SMika Westerberg */ 4609fb1e654SMika Westerberg hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT; 4619fb1e654SMika Westerberg hop &= REG_RX_OPTIONS_E2E_HOP_MASK; 4629fb1e654SMika Westerberg flags |= hop | RING_FLAG_E2E_FLOW_CONTROL; 4639fb1e654SMika Westerberg } 4649fb1e654SMika Westerberg 46516603153SAndreas Noever ring_iowrite64desc(ring, ring->descriptors_dma, 0); 46616603153SAndreas Noever if (ring->is_tx) { 46716603153SAndreas Noever ring_iowrite32desc(ring, ring->size, 12); 46816603153SAndreas Noever ring_iowrite32options(ring, 0, 4); /* time releated ? */ 4699fb1e654SMika Westerberg ring_iowrite32options(ring, flags, 0); 47016603153SAndreas Noever } else { 4719fb1e654SMika Westerberg u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask; 4729fb1e654SMika Westerberg 4739fb1e654SMika Westerberg ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12); 4749fb1e654SMika Westerberg ring_iowrite32options(ring, sof_eof_mask, 4); 4759fb1e654SMika Westerberg ring_iowrite32options(ring, flags, 0); 47616603153SAndreas Noever } 47716603153SAndreas Noever ring_interrupt_active(ring, true); 47816603153SAndreas Noever ring->running = true; 47916603153SAndreas Noever err: 480*59120e06SMika Westerberg spin_unlock(&ring->lock); 481*59120e06SMika Westerberg spin_unlock_irq(&ring->nhi->lock); 48216603153SAndreas Noever } 4833b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_start); 48416603153SAndreas Noever 48516603153SAndreas Noever /** 4863b3d9f4dSMika Westerberg * tb_ring_stop() - shutdown a ring 48716603153SAndreas Noever * 48816603153SAndreas Noever * Must not be invoked from a callback. 48916603153SAndreas Noever * 4903b3d9f4dSMika Westerberg * This method will disable the ring. Further calls to 4913b3d9f4dSMika Westerberg * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been 4923b3d9f4dSMika Westerberg * called. 49316603153SAndreas Noever * 49416603153SAndreas Noever * All enqueued frames will be canceled and their callbacks will be executed 49516603153SAndreas Noever * with frame->canceled set to true (on the callback thread). This method 49616603153SAndreas Noever * returns only after all callback invocations have finished. 49716603153SAndreas Noever */ 4983b3d9f4dSMika Westerberg void tb_ring_stop(struct tb_ring *ring) 49916603153SAndreas Noever { 500*59120e06SMika Westerberg spin_lock_irq(&ring->nhi->lock); 501*59120e06SMika Westerberg spin_lock(&ring->lock); 50216603153SAndreas Noever dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n", 50316603153SAndreas Noever RING_TYPE(ring), ring->hop); 504bdccf295SMika Westerberg if (ring->nhi->going_away) 505bdccf295SMika Westerberg goto err; 50616603153SAndreas Noever if (!ring->running) { 50716603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", 50816603153SAndreas Noever RING_TYPE(ring), ring->hop); 50916603153SAndreas Noever goto err; 51016603153SAndreas Noever } 51116603153SAndreas Noever ring_interrupt_active(ring, false); 51216603153SAndreas Noever 51316603153SAndreas Noever ring_iowrite32options(ring, 0, 0); 51416603153SAndreas Noever ring_iowrite64desc(ring, 0, 0); 51516603153SAndreas Noever ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); 51616603153SAndreas Noever ring_iowrite32desc(ring, 0, 12); 51716603153SAndreas Noever ring->head = 0; 51816603153SAndreas Noever ring->tail = 0; 51916603153SAndreas Noever ring->running = false; 52016603153SAndreas Noever 52116603153SAndreas Noever err: 522*59120e06SMika Westerberg spin_unlock(&ring->lock); 523*59120e06SMika Westerberg spin_unlock_irq(&ring->nhi->lock); 52416603153SAndreas Noever 52516603153SAndreas Noever /* 52616603153SAndreas Noever * schedule ring->work to invoke callbacks on all remaining frames. 52716603153SAndreas Noever */ 52816603153SAndreas Noever schedule_work(&ring->work); 52916603153SAndreas Noever flush_work(&ring->work); 53016603153SAndreas Noever } 5313b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_stop); 53216603153SAndreas Noever 53316603153SAndreas Noever /* 5343b3d9f4dSMika Westerberg * tb_ring_free() - free ring 53516603153SAndreas Noever * 53616603153SAndreas Noever * When this method returns all invocations of ring->callback will have 53716603153SAndreas Noever * finished. 53816603153SAndreas Noever * 53916603153SAndreas Noever * Ring must be stopped. 54016603153SAndreas Noever * 54116603153SAndreas Noever * Must NOT be called from ring_frame->callback! 54216603153SAndreas Noever */ 5433b3d9f4dSMika Westerberg void tb_ring_free(struct tb_ring *ring) 54416603153SAndreas Noever { 545*59120e06SMika Westerberg spin_lock_irq(&ring->nhi->lock); 54616603153SAndreas Noever /* 54716603153SAndreas Noever * Dissociate the ring from the NHI. This also ensures that 54816603153SAndreas Noever * nhi_interrupt_work cannot reschedule ring->work. 54916603153SAndreas Noever */ 55016603153SAndreas Noever if (ring->is_tx) 55116603153SAndreas Noever ring->nhi->tx_rings[ring->hop] = NULL; 55216603153SAndreas Noever else 55316603153SAndreas Noever ring->nhi->rx_rings[ring->hop] = NULL; 55416603153SAndreas Noever 55516603153SAndreas Noever if (ring->running) { 55616603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", 55716603153SAndreas Noever RING_TYPE(ring), ring->hop); 55816603153SAndreas Noever } 55916603153SAndreas Noever 560046bee1fSMika Westerberg ring_release_msix(ring); 561046bee1fSMika Westerberg 56216603153SAndreas Noever dma_free_coherent(&ring->nhi->pdev->dev, 56316603153SAndreas Noever ring->size * sizeof(*ring->descriptors), 56416603153SAndreas Noever ring->descriptors, ring->descriptors_dma); 56516603153SAndreas Noever 566f19b72c6SSachin Kamat ring->descriptors = NULL; 56716603153SAndreas Noever ring->descriptors_dma = 0; 56816603153SAndreas Noever 56916603153SAndreas Noever 57016603153SAndreas Noever dev_info(&ring->nhi->pdev->dev, 57116603153SAndreas Noever "freeing %s %d\n", 57216603153SAndreas Noever RING_TYPE(ring), 57316603153SAndreas Noever ring->hop); 57416603153SAndreas Noever 575*59120e06SMika Westerberg spin_unlock_irq(&ring->nhi->lock); 57616603153SAndreas Noever /** 577046bee1fSMika Westerberg * ring->work can no longer be scheduled (it is scheduled only 578046bee1fSMika Westerberg * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it 579046bee1fSMika Westerberg * to finish before freeing the ring. 58016603153SAndreas Noever */ 58116603153SAndreas Noever flush_work(&ring->work); 58216603153SAndreas Noever kfree(ring); 58316603153SAndreas Noever } 5843b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_free); 58516603153SAndreas Noever 586cd446ee2SMika Westerberg /** 587cd446ee2SMika Westerberg * nhi_mailbox_cmd() - Send a command through NHI mailbox 588cd446ee2SMika Westerberg * @nhi: Pointer to the NHI structure 589cd446ee2SMika Westerberg * @cmd: Command to send 590cd446ee2SMika Westerberg * @data: Data to be send with the command 591cd446ee2SMika Westerberg * 592cd446ee2SMika Westerberg * Sends mailbox command to the firmware running on NHI. Returns %0 in 593cd446ee2SMika Westerberg * case of success and negative errno in case of failure. 594cd446ee2SMika Westerberg */ 595cd446ee2SMika Westerberg int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data) 596cd446ee2SMika Westerberg { 597cd446ee2SMika Westerberg ktime_t timeout; 598cd446ee2SMika Westerberg u32 val; 599cd446ee2SMika Westerberg 600cd446ee2SMika Westerberg iowrite32(data, nhi->iobase + REG_INMAIL_DATA); 601cd446ee2SMika Westerberg 602cd446ee2SMika Westerberg val = ioread32(nhi->iobase + REG_INMAIL_CMD); 603cd446ee2SMika Westerberg val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR); 604cd446ee2SMika Westerberg val |= REG_INMAIL_OP_REQUEST | cmd; 605cd446ee2SMika Westerberg iowrite32(val, nhi->iobase + REG_INMAIL_CMD); 606cd446ee2SMika Westerberg 607cd446ee2SMika Westerberg timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT); 608cd446ee2SMika Westerberg do { 609cd446ee2SMika Westerberg val = ioread32(nhi->iobase + REG_INMAIL_CMD); 610cd446ee2SMika Westerberg if (!(val & REG_INMAIL_OP_REQUEST)) 611cd446ee2SMika Westerberg break; 612cd446ee2SMika Westerberg usleep_range(10, 20); 613cd446ee2SMika Westerberg } while (ktime_before(ktime_get(), timeout)); 614cd446ee2SMika Westerberg 615cd446ee2SMika Westerberg if (val & REG_INMAIL_OP_REQUEST) 616cd446ee2SMika Westerberg return -ETIMEDOUT; 617cd446ee2SMika Westerberg if (val & REG_INMAIL_ERROR) 618cd446ee2SMika Westerberg return -EIO; 619cd446ee2SMika Westerberg 620cd446ee2SMika Westerberg return 0; 621cd446ee2SMika Westerberg } 622cd446ee2SMika Westerberg 623cd446ee2SMika Westerberg /** 624cd446ee2SMika Westerberg * nhi_mailbox_mode() - Return current firmware operation mode 625cd446ee2SMika Westerberg * @nhi: Pointer to the NHI structure 626cd446ee2SMika Westerberg * 627cd446ee2SMika Westerberg * The function reads current firmware operation mode using NHI mailbox 628cd446ee2SMika Westerberg * registers and returns it to the caller. 629cd446ee2SMika Westerberg */ 630cd446ee2SMika Westerberg enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi) 631cd446ee2SMika Westerberg { 632cd446ee2SMika Westerberg u32 val; 633cd446ee2SMika Westerberg 634cd446ee2SMika Westerberg val = ioread32(nhi->iobase + REG_OUTMAIL_CMD); 635cd446ee2SMika Westerberg val &= REG_OUTMAIL_CMD_OPMODE_MASK; 636cd446ee2SMika Westerberg val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT; 637cd446ee2SMika Westerberg 638cd446ee2SMika Westerberg return (enum nhi_fw_mode)val; 639cd446ee2SMika Westerberg } 640cd446ee2SMika Westerberg 64116603153SAndreas Noever static void nhi_interrupt_work(struct work_struct *work) 64216603153SAndreas Noever { 64316603153SAndreas Noever struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); 64416603153SAndreas Noever int value = 0; /* Suppress uninitialized usage warning. */ 64516603153SAndreas Noever int bit; 64616603153SAndreas Noever int hop = -1; 64716603153SAndreas Noever int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */ 64816603153SAndreas Noever struct tb_ring *ring; 64916603153SAndreas Noever 650*59120e06SMika Westerberg spin_lock_irq(&nhi->lock); 65116603153SAndreas Noever 65216603153SAndreas Noever /* 65316603153SAndreas Noever * Starting at REG_RING_NOTIFY_BASE there are three status bitfields 65416603153SAndreas Noever * (TX, RX, RX overflow). We iterate over the bits and read a new 65516603153SAndreas Noever * dwords as required. The registers are cleared on read. 65616603153SAndreas Noever */ 65716603153SAndreas Noever for (bit = 0; bit < 3 * nhi->hop_count; bit++) { 65816603153SAndreas Noever if (bit % 32 == 0) 65916603153SAndreas Noever value = ioread32(nhi->iobase 66016603153SAndreas Noever + REG_RING_NOTIFY_BASE 66116603153SAndreas Noever + 4 * (bit / 32)); 66216603153SAndreas Noever if (++hop == nhi->hop_count) { 66316603153SAndreas Noever hop = 0; 66416603153SAndreas Noever type++; 66516603153SAndreas Noever } 66616603153SAndreas Noever if ((value & (1 << (bit % 32))) == 0) 66716603153SAndreas Noever continue; 66816603153SAndreas Noever if (type == 2) { 66916603153SAndreas Noever dev_warn(&nhi->pdev->dev, 67016603153SAndreas Noever "RX overflow for ring %d\n", 67116603153SAndreas Noever hop); 67216603153SAndreas Noever continue; 67316603153SAndreas Noever } 67416603153SAndreas Noever if (type == 0) 67516603153SAndreas Noever ring = nhi->tx_rings[hop]; 67616603153SAndreas Noever else 67716603153SAndreas Noever ring = nhi->rx_rings[hop]; 67816603153SAndreas Noever if (ring == NULL) { 67916603153SAndreas Noever dev_warn(&nhi->pdev->dev, 68016603153SAndreas Noever "got interrupt for inactive %s ring %d\n", 68116603153SAndreas Noever type ? "RX" : "TX", 68216603153SAndreas Noever hop); 68316603153SAndreas Noever continue; 68416603153SAndreas Noever } 68516603153SAndreas Noever /* we do not check ring->running, this is done in ring->work */ 68616603153SAndreas Noever schedule_work(&ring->work); 68716603153SAndreas Noever } 688*59120e06SMika Westerberg spin_unlock_irq(&nhi->lock); 68916603153SAndreas Noever } 69016603153SAndreas Noever 69116603153SAndreas Noever static irqreturn_t nhi_msi(int irq, void *data) 69216603153SAndreas Noever { 69316603153SAndreas Noever struct tb_nhi *nhi = data; 69416603153SAndreas Noever schedule_work(&nhi->interrupt_work); 69516603153SAndreas Noever return IRQ_HANDLED; 69616603153SAndreas Noever } 69716603153SAndreas Noever 69823dd5bb4SAndreas Noever static int nhi_suspend_noirq(struct device *dev) 69923dd5bb4SAndreas Noever { 70023dd5bb4SAndreas Noever struct pci_dev *pdev = to_pci_dev(dev); 70123dd5bb4SAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 7029d3cce0bSMika Westerberg 7039d3cce0bSMika Westerberg return tb_domain_suspend_noirq(tb); 70423dd5bb4SAndreas Noever } 70523dd5bb4SAndreas Noever 7068c6bba10SMika Westerberg static void nhi_enable_int_throttling(struct tb_nhi *nhi) 7078c6bba10SMika Westerberg { 7088c6bba10SMika Westerberg /* Throttling is specified in 256ns increments */ 7098c6bba10SMika Westerberg u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256); 7108c6bba10SMika Westerberg unsigned int i; 7118c6bba10SMika Westerberg 7128c6bba10SMika Westerberg /* 7138c6bba10SMika Westerberg * Configure interrupt throttling for all vectors even if we 7148c6bba10SMika Westerberg * only use few. 7158c6bba10SMika Westerberg */ 7168c6bba10SMika Westerberg for (i = 0; i < MSIX_MAX_VECS; i++) { 7178c6bba10SMika Westerberg u32 reg = REG_INT_THROTTLING_RATE + i * 4; 7188c6bba10SMika Westerberg iowrite32(throttle, nhi->iobase + reg); 7198c6bba10SMika Westerberg } 7208c6bba10SMika Westerberg } 7218c6bba10SMika Westerberg 72223dd5bb4SAndreas Noever static int nhi_resume_noirq(struct device *dev) 72323dd5bb4SAndreas Noever { 72423dd5bb4SAndreas Noever struct pci_dev *pdev = to_pci_dev(dev); 72523dd5bb4SAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 7269d3cce0bSMika Westerberg 727bdccf295SMika Westerberg /* 728bdccf295SMika Westerberg * Check that the device is still there. It may be that the user 729bdccf295SMika Westerberg * unplugged last device which causes the host controller to go 730bdccf295SMika Westerberg * away on PCs. 731bdccf295SMika Westerberg */ 732bdccf295SMika Westerberg if (!pci_device_is_present(pdev)) 733bdccf295SMika Westerberg tb->nhi->going_away = true; 7348c6bba10SMika Westerberg else 7358c6bba10SMika Westerberg nhi_enable_int_throttling(tb->nhi); 736bdccf295SMika Westerberg 7379d3cce0bSMika Westerberg return tb_domain_resume_noirq(tb); 73823dd5bb4SAndreas Noever } 73923dd5bb4SAndreas Noever 740f67cf491SMika Westerberg static int nhi_suspend(struct device *dev) 741f67cf491SMika Westerberg { 742f67cf491SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 743f67cf491SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 744f67cf491SMika Westerberg 745f67cf491SMika Westerberg return tb_domain_suspend(tb); 746f67cf491SMika Westerberg } 747f67cf491SMika Westerberg 748f67cf491SMika Westerberg static void nhi_complete(struct device *dev) 749f67cf491SMika Westerberg { 750f67cf491SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 751f67cf491SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 752f67cf491SMika Westerberg 753f67cf491SMika Westerberg tb_domain_complete(tb); 754f67cf491SMika Westerberg } 755f67cf491SMika Westerberg 75616603153SAndreas Noever static void nhi_shutdown(struct tb_nhi *nhi) 75716603153SAndreas Noever { 75816603153SAndreas Noever int i; 75916603153SAndreas Noever dev_info(&nhi->pdev->dev, "shutdown\n"); 76016603153SAndreas Noever 76116603153SAndreas Noever for (i = 0; i < nhi->hop_count; i++) { 76216603153SAndreas Noever if (nhi->tx_rings[i]) 76316603153SAndreas Noever dev_WARN(&nhi->pdev->dev, 76416603153SAndreas Noever "TX ring %d is still active\n", i); 76516603153SAndreas Noever if (nhi->rx_rings[i]) 76616603153SAndreas Noever dev_WARN(&nhi->pdev->dev, 76716603153SAndreas Noever "RX ring %d is still active\n", i); 76816603153SAndreas Noever } 76916603153SAndreas Noever nhi_disable_interrupts(nhi); 77016603153SAndreas Noever /* 77116603153SAndreas Noever * We have to release the irq before calling flush_work. Otherwise an 77216603153SAndreas Noever * already executing IRQ handler could call schedule_work again. 77316603153SAndreas Noever */ 774046bee1fSMika Westerberg if (!nhi->pdev->msix_enabled) { 77516603153SAndreas Noever devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); 77616603153SAndreas Noever flush_work(&nhi->interrupt_work); 777046bee1fSMika Westerberg } 778046bee1fSMika Westerberg ida_destroy(&nhi->msix_ida); 779046bee1fSMika Westerberg } 780046bee1fSMika Westerberg 781046bee1fSMika Westerberg static int nhi_init_msi(struct tb_nhi *nhi) 782046bee1fSMika Westerberg { 783046bee1fSMika Westerberg struct pci_dev *pdev = nhi->pdev; 784046bee1fSMika Westerberg int res, irq, nvec; 785046bee1fSMika Westerberg 786046bee1fSMika Westerberg /* In case someone left them on. */ 787046bee1fSMika Westerberg nhi_disable_interrupts(nhi); 788046bee1fSMika Westerberg 7898c6bba10SMika Westerberg nhi_enable_int_throttling(nhi); 7908c6bba10SMika Westerberg 791046bee1fSMika Westerberg ida_init(&nhi->msix_ida); 792046bee1fSMika Westerberg 793046bee1fSMika Westerberg /* 794046bee1fSMika Westerberg * The NHI has 16 MSI-X vectors or a single MSI. We first try to 795046bee1fSMika Westerberg * get all MSI-X vectors and if we succeed, each ring will have 796046bee1fSMika Westerberg * one MSI-X. If for some reason that does not work out, we 797046bee1fSMika Westerberg * fallback to a single MSI. 798046bee1fSMika Westerberg */ 799046bee1fSMika Westerberg nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS, 800046bee1fSMika Westerberg PCI_IRQ_MSIX); 801046bee1fSMika Westerberg if (nvec < 0) { 802046bee1fSMika Westerberg nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 803046bee1fSMika Westerberg if (nvec < 0) 804046bee1fSMika Westerberg return nvec; 805046bee1fSMika Westerberg 806046bee1fSMika Westerberg INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); 807046bee1fSMika Westerberg 808046bee1fSMika Westerberg irq = pci_irq_vector(nhi->pdev, 0); 809046bee1fSMika Westerberg if (irq < 0) 810046bee1fSMika Westerberg return irq; 811046bee1fSMika Westerberg 812046bee1fSMika Westerberg res = devm_request_irq(&pdev->dev, irq, nhi_msi, 813046bee1fSMika Westerberg IRQF_NO_SUSPEND, "thunderbolt", nhi); 814046bee1fSMika Westerberg if (res) { 815046bee1fSMika Westerberg dev_err(&pdev->dev, "request_irq failed, aborting\n"); 816046bee1fSMika Westerberg return res; 817046bee1fSMika Westerberg } 818046bee1fSMika Westerberg } 819046bee1fSMika Westerberg 820046bee1fSMika Westerberg return 0; 82116603153SAndreas Noever } 82216603153SAndreas Noever 82316603153SAndreas Noever static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) 82416603153SAndreas Noever { 82516603153SAndreas Noever struct tb_nhi *nhi; 826d6cc51cdSAndreas Noever struct tb *tb; 82716603153SAndreas Noever int res; 82816603153SAndreas Noever 82916603153SAndreas Noever res = pcim_enable_device(pdev); 83016603153SAndreas Noever if (res) { 83116603153SAndreas Noever dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); 83216603153SAndreas Noever return res; 83316603153SAndreas Noever } 83416603153SAndreas Noever 83516603153SAndreas Noever res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt"); 83616603153SAndreas Noever if (res) { 83716603153SAndreas Noever dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n"); 83816603153SAndreas Noever return res; 83916603153SAndreas Noever } 84016603153SAndreas Noever 84116603153SAndreas Noever nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); 84216603153SAndreas Noever if (!nhi) 84316603153SAndreas Noever return -ENOMEM; 84416603153SAndreas Noever 84516603153SAndreas Noever nhi->pdev = pdev; 84616603153SAndreas Noever /* cannot fail - table is allocated bin pcim_iomap_regions */ 84716603153SAndreas Noever nhi->iobase = pcim_iomap_table(pdev)[0]; 84816603153SAndreas Noever nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; 84919bf4d4fSLukas Wunner if (nhi->hop_count != 12 && nhi->hop_count != 32) 85016603153SAndreas Noever dev_warn(&pdev->dev, "unexpected hop count: %d\n", 85116603153SAndreas Noever nhi->hop_count); 85216603153SAndreas Noever 8532a211f32SHimangi Saraogi nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, 8542a211f32SHimangi Saraogi sizeof(*nhi->tx_rings), GFP_KERNEL); 8552a211f32SHimangi Saraogi nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, 8562a211f32SHimangi Saraogi sizeof(*nhi->rx_rings), GFP_KERNEL); 85716603153SAndreas Noever if (!nhi->tx_rings || !nhi->rx_rings) 85816603153SAndreas Noever return -ENOMEM; 85916603153SAndreas Noever 860046bee1fSMika Westerberg res = nhi_init_msi(nhi); 86116603153SAndreas Noever if (res) { 862046bee1fSMika Westerberg dev_err(&pdev->dev, "cannot enable MSI, aborting\n"); 86316603153SAndreas Noever return res; 86416603153SAndreas Noever } 86516603153SAndreas Noever 866*59120e06SMika Westerberg spin_lock_init(&nhi->lock); 86716603153SAndreas Noever 86816603153SAndreas Noever pci_set_master(pdev); 86916603153SAndreas Noever 870f67cf491SMika Westerberg tb = icm_probe(nhi); 8719d3cce0bSMika Westerberg if (!tb) 872f67cf491SMika Westerberg tb = tb_probe(nhi); 873f67cf491SMika Westerberg if (!tb) { 874f67cf491SMika Westerberg dev_err(&nhi->pdev->dev, 875f67cf491SMika Westerberg "failed to determine connection manager, aborting\n"); 8769d3cce0bSMika Westerberg return -ENODEV; 877f67cf491SMika Westerberg } 878f67cf491SMika Westerberg 879f67cf491SMika Westerberg dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); 8809d3cce0bSMika Westerberg 8819d3cce0bSMika Westerberg res = tb_domain_add(tb); 8829d3cce0bSMika Westerberg if (res) { 883d6cc51cdSAndreas Noever /* 884d6cc51cdSAndreas Noever * At this point the RX/TX rings might already have been 885d6cc51cdSAndreas Noever * activated. Do a proper shutdown. 886d6cc51cdSAndreas Noever */ 8879d3cce0bSMika Westerberg tb_domain_put(tb); 888d6cc51cdSAndreas Noever nhi_shutdown(nhi); 889d6cc51cdSAndreas Noever return -EIO; 890d6cc51cdSAndreas Noever } 891d6cc51cdSAndreas Noever pci_set_drvdata(pdev, tb); 89216603153SAndreas Noever 89316603153SAndreas Noever return 0; 89416603153SAndreas Noever } 89516603153SAndreas Noever 89616603153SAndreas Noever static void nhi_remove(struct pci_dev *pdev) 89716603153SAndreas Noever { 898d6cc51cdSAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 899d6cc51cdSAndreas Noever struct tb_nhi *nhi = tb->nhi; 9009d3cce0bSMika Westerberg 9019d3cce0bSMika Westerberg tb_domain_remove(tb); 90216603153SAndreas Noever nhi_shutdown(nhi); 90316603153SAndreas Noever } 90416603153SAndreas Noever 90523dd5bb4SAndreas Noever /* 90623dd5bb4SAndreas Noever * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable 90723dd5bb4SAndreas Noever * the tunnels asap. A corresponding pci quirk blocks the downstream bridges 90823dd5bb4SAndreas Noever * resume_noirq until we are done. 90923dd5bb4SAndreas Noever */ 91023dd5bb4SAndreas Noever static const struct dev_pm_ops nhi_pm_ops = { 91123dd5bb4SAndreas Noever .suspend_noirq = nhi_suspend_noirq, 91223dd5bb4SAndreas Noever .resume_noirq = nhi_resume_noirq, 91323dd5bb4SAndreas Noever .freeze_noirq = nhi_suspend_noirq, /* 91423dd5bb4SAndreas Noever * we just disable hotplug, the 91523dd5bb4SAndreas Noever * pci-tunnels stay alive. 91623dd5bb4SAndreas Noever */ 91723dd5bb4SAndreas Noever .restore_noirq = nhi_resume_noirq, 918f67cf491SMika Westerberg .suspend = nhi_suspend, 919f67cf491SMika Westerberg .freeze = nhi_suspend, 920f67cf491SMika Westerberg .poweroff = nhi_suspend, 921f67cf491SMika Westerberg .complete = nhi_complete, 92223dd5bb4SAndreas Noever }; 92323dd5bb4SAndreas Noever 924620863f7SSachin Kamat static struct pci_device_id nhi_ids[] = { 92516603153SAndreas Noever /* 92616603153SAndreas Noever * We have to specify class, the TB bridges use the same device and 9271d111406SLukas Wunner * vendor (sub)id on gen 1 and gen 2 controllers. 92816603153SAndreas Noever */ 92916603153SAndreas Noever { 93016603153SAndreas Noever .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 9311d111406SLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 93219bf4d4fSLukas Wunner .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, 93319bf4d4fSLukas Wunner .subvendor = 0x2222, .subdevice = 0x1111, 93419bf4d4fSLukas Wunner }, 93519bf4d4fSLukas Wunner { 93619bf4d4fSLukas Wunner .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 93719bf4d4fSLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 9381d111406SLukas Wunner .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, 93916603153SAndreas Noever .subvendor = 0x2222, .subdevice = 0x1111, 94016603153SAndreas Noever }, 94116603153SAndreas Noever { 94216603153SAndreas Noever .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 9431d111406SLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 94482a6a81cSXavier Gnata .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI, 94582a6a81cSXavier Gnata .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 94682a6a81cSXavier Gnata }, 94782a6a81cSXavier Gnata { 94882a6a81cSXavier Gnata .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 94982a6a81cSXavier Gnata .vendor = PCI_VENDOR_ID_INTEL, 9501d111406SLukas Wunner .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, 951a42fb351SKnuth Posern .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 95216603153SAndreas Noever }, 9535e2781bcSMika Westerberg 9545e2781bcSMika Westerberg /* Thunderbolt 3 */ 9555e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) }, 9565e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) }, 9575e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) }, 9585e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) }, 9595e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) }, 9605e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) }, 9615e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) }, 9625e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, 9635e2781bcSMika Westerberg 96416603153SAndreas Noever { 0,} 96516603153SAndreas Noever }; 96616603153SAndreas Noever 96716603153SAndreas Noever MODULE_DEVICE_TABLE(pci, nhi_ids); 96816603153SAndreas Noever MODULE_LICENSE("GPL"); 96916603153SAndreas Noever 97016603153SAndreas Noever static struct pci_driver nhi_driver = { 97116603153SAndreas Noever .name = "thunderbolt", 97216603153SAndreas Noever .id_table = nhi_ids, 97316603153SAndreas Noever .probe = nhi_probe, 97416603153SAndreas Noever .remove = nhi_remove, 97523dd5bb4SAndreas Noever .driver.pm = &nhi_pm_ops, 97616603153SAndreas Noever }; 97716603153SAndreas Noever 97816603153SAndreas Noever static int __init nhi_init(void) 97916603153SAndreas Noever { 9809d3cce0bSMika Westerberg int ret; 9819d3cce0bSMika Westerberg 9829d3cce0bSMika Westerberg ret = tb_domain_init(); 9839d3cce0bSMika Westerberg if (ret) 9849d3cce0bSMika Westerberg return ret; 9859d3cce0bSMika Westerberg ret = pci_register_driver(&nhi_driver); 9869d3cce0bSMika Westerberg if (ret) 9879d3cce0bSMika Westerberg tb_domain_exit(); 9889d3cce0bSMika Westerberg return ret; 98916603153SAndreas Noever } 99016603153SAndreas Noever 99116603153SAndreas Noever static void __exit nhi_unload(void) 99216603153SAndreas Noever { 99316603153SAndreas Noever pci_unregister_driver(&nhi_driver); 9949d3cce0bSMika Westerberg tb_domain_exit(); 99516603153SAndreas Noever } 99616603153SAndreas Noever 99716603153SAndreas Noever module_init(nhi_init); 99816603153SAndreas Noever module_exit(nhi_unload); 999