116603153SAndreas Noever /* 216603153SAndreas Noever * Thunderbolt Cactus Ridge driver - NHI driver 316603153SAndreas Noever * 416603153SAndreas Noever * The NHI (native host interface) is the pci device that allows us to send and 516603153SAndreas Noever * receive frames from the thunderbolt bus. 616603153SAndreas Noever * 716603153SAndreas Noever * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 816603153SAndreas Noever */ 916603153SAndreas Noever 1023dd5bb4SAndreas Noever #include <linux/pm_runtime.h> 1116603153SAndreas Noever #include <linux/slab.h> 1216603153SAndreas Noever #include <linux/errno.h> 1316603153SAndreas Noever #include <linux/pci.h> 1416603153SAndreas Noever #include <linux/interrupt.h> 1516603153SAndreas Noever #include <linux/module.h> 16cd446ee2SMika Westerberg #include <linux/delay.h> 1716603153SAndreas Noever 1816603153SAndreas Noever #include "nhi.h" 1916603153SAndreas Noever #include "nhi_regs.h" 20d6cc51cdSAndreas Noever #include "tb.h" 2116603153SAndreas Noever 2216603153SAndreas Noever #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") 2316603153SAndreas Noever 24046bee1fSMika Westerberg /* 25*9fb1e654SMika Westerberg * Used to enable end-to-end workaround for missing RX packets. Do not 26*9fb1e654SMika Westerberg * use this ring for anything else. 27*9fb1e654SMika Westerberg */ 28*9fb1e654SMika Westerberg #define RING_E2E_UNUSED_HOPID 2 29*9fb1e654SMika Westerberg 30*9fb1e654SMika Westerberg /* 31046bee1fSMika Westerberg * Minimal number of vectors when we use MSI-X. Two for control channel 32046bee1fSMika Westerberg * Rx/Tx and the rest four are for cross domain DMA paths. 33046bee1fSMika Westerberg */ 34046bee1fSMika Westerberg #define MSIX_MIN_VECS 6 35046bee1fSMika Westerberg #define MSIX_MAX_VECS 16 3616603153SAndreas Noever 37cd446ee2SMika Westerberg #define NHI_MAILBOX_TIMEOUT 500 /* ms */ 38cd446ee2SMika Westerberg 3916603153SAndreas Noever static int ring_interrupt_index(struct tb_ring *ring) 4016603153SAndreas Noever { 4116603153SAndreas Noever int bit = ring->hop; 4216603153SAndreas Noever if (!ring->is_tx) 4316603153SAndreas Noever bit += ring->nhi->hop_count; 4416603153SAndreas Noever return bit; 4516603153SAndreas Noever } 4616603153SAndreas Noever 4716603153SAndreas Noever /** 4816603153SAndreas Noever * ring_interrupt_active() - activate/deactivate interrupts for a single ring 4916603153SAndreas Noever * 5016603153SAndreas Noever * ring->nhi->lock must be held. 5116603153SAndreas Noever */ 5216603153SAndreas Noever static void ring_interrupt_active(struct tb_ring *ring, bool active) 5316603153SAndreas Noever { 5419bf4d4fSLukas Wunner int reg = REG_RING_INTERRUPT_BASE + 5519bf4d4fSLukas Wunner ring_interrupt_index(ring) / 32 * 4; 5616603153SAndreas Noever int bit = ring_interrupt_index(ring) & 31; 5716603153SAndreas Noever int mask = 1 << bit; 5816603153SAndreas Noever u32 old, new; 59046bee1fSMika Westerberg 60046bee1fSMika Westerberg if (ring->irq > 0) { 61046bee1fSMika Westerberg u32 step, shift, ivr, misc; 62046bee1fSMika Westerberg void __iomem *ivr_base; 63046bee1fSMika Westerberg int index; 64046bee1fSMika Westerberg 65046bee1fSMika Westerberg if (ring->is_tx) 66046bee1fSMika Westerberg index = ring->hop; 67046bee1fSMika Westerberg else 68046bee1fSMika Westerberg index = ring->hop + ring->nhi->hop_count; 69046bee1fSMika Westerberg 70046bee1fSMika Westerberg /* 71046bee1fSMika Westerberg * Ask the hardware to clear interrupt status bits automatically 72046bee1fSMika Westerberg * since we already know which interrupt was triggered. 73046bee1fSMika Westerberg */ 74046bee1fSMika Westerberg misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); 75046bee1fSMika Westerberg if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) { 76046bee1fSMika Westerberg misc |= REG_DMA_MISC_INT_AUTO_CLEAR; 77046bee1fSMika Westerberg iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); 78046bee1fSMika Westerberg } 79046bee1fSMika Westerberg 80046bee1fSMika Westerberg ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; 81046bee1fSMika Westerberg step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; 82046bee1fSMika Westerberg shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; 83046bee1fSMika Westerberg ivr = ioread32(ivr_base + step); 84046bee1fSMika Westerberg ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift); 85046bee1fSMika Westerberg if (active) 86046bee1fSMika Westerberg ivr |= ring->vector << shift; 87046bee1fSMika Westerberg iowrite32(ivr, ivr_base + step); 88046bee1fSMika Westerberg } 89046bee1fSMika Westerberg 9016603153SAndreas Noever old = ioread32(ring->nhi->iobase + reg); 9116603153SAndreas Noever if (active) 9216603153SAndreas Noever new = old | mask; 9316603153SAndreas Noever else 9416603153SAndreas Noever new = old & ~mask; 9516603153SAndreas Noever 9616603153SAndreas Noever dev_info(&ring->nhi->pdev->dev, 9716603153SAndreas Noever "%s interrupt at register %#x bit %d (%#x -> %#x)\n", 9816603153SAndreas Noever active ? "enabling" : "disabling", reg, bit, old, new); 9916603153SAndreas Noever 10016603153SAndreas Noever if (new == old) 10116603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, 10216603153SAndreas Noever "interrupt for %s %d is already %s\n", 10316603153SAndreas Noever RING_TYPE(ring), ring->hop, 10416603153SAndreas Noever active ? "enabled" : "disabled"); 10516603153SAndreas Noever iowrite32(new, ring->nhi->iobase + reg); 10616603153SAndreas Noever } 10716603153SAndreas Noever 10816603153SAndreas Noever /** 10916603153SAndreas Noever * nhi_disable_interrupts() - disable interrupts for all rings 11016603153SAndreas Noever * 11116603153SAndreas Noever * Use only during init and shutdown. 11216603153SAndreas Noever */ 11316603153SAndreas Noever static void nhi_disable_interrupts(struct tb_nhi *nhi) 11416603153SAndreas Noever { 11516603153SAndreas Noever int i = 0; 11616603153SAndreas Noever /* disable interrupts */ 11716603153SAndreas Noever for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) 11816603153SAndreas Noever iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); 11916603153SAndreas Noever 12016603153SAndreas Noever /* clear interrupt status bits */ 12116603153SAndreas Noever for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) 12216603153SAndreas Noever ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); 12316603153SAndreas Noever } 12416603153SAndreas Noever 12516603153SAndreas Noever /* ring helper methods */ 12616603153SAndreas Noever 12716603153SAndreas Noever static void __iomem *ring_desc_base(struct tb_ring *ring) 12816603153SAndreas Noever { 12916603153SAndreas Noever void __iomem *io = ring->nhi->iobase; 13016603153SAndreas Noever io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; 13116603153SAndreas Noever io += ring->hop * 16; 13216603153SAndreas Noever return io; 13316603153SAndreas Noever } 13416603153SAndreas Noever 13516603153SAndreas Noever static void __iomem *ring_options_base(struct tb_ring *ring) 13616603153SAndreas Noever { 13716603153SAndreas Noever void __iomem *io = ring->nhi->iobase; 13816603153SAndreas Noever io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; 13916603153SAndreas Noever io += ring->hop * 32; 14016603153SAndreas Noever return io; 14116603153SAndreas Noever } 14216603153SAndreas Noever 14316603153SAndreas Noever static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) 14416603153SAndreas Noever { 14516603153SAndreas Noever iowrite16(value, ring_desc_base(ring) + offset); 14616603153SAndreas Noever } 14716603153SAndreas Noever 14816603153SAndreas Noever static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) 14916603153SAndreas Noever { 15016603153SAndreas Noever iowrite32(value, ring_desc_base(ring) + offset); 15116603153SAndreas Noever } 15216603153SAndreas Noever 15316603153SAndreas Noever static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) 15416603153SAndreas Noever { 15516603153SAndreas Noever iowrite32(value, ring_desc_base(ring) + offset); 15616603153SAndreas Noever iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); 15716603153SAndreas Noever } 15816603153SAndreas Noever 15916603153SAndreas Noever static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) 16016603153SAndreas Noever { 16116603153SAndreas Noever iowrite32(value, ring_options_base(ring) + offset); 16216603153SAndreas Noever } 16316603153SAndreas Noever 16416603153SAndreas Noever static bool ring_full(struct tb_ring *ring) 16516603153SAndreas Noever { 16616603153SAndreas Noever return ((ring->head + 1) % ring->size) == ring->tail; 16716603153SAndreas Noever } 16816603153SAndreas Noever 16916603153SAndreas Noever static bool ring_empty(struct tb_ring *ring) 17016603153SAndreas Noever { 17116603153SAndreas Noever return ring->head == ring->tail; 17216603153SAndreas Noever } 17316603153SAndreas Noever 17416603153SAndreas Noever /** 17516603153SAndreas Noever * ring_write_descriptors() - post frames from ring->queue to the controller 17616603153SAndreas Noever * 17716603153SAndreas Noever * ring->lock is held. 17816603153SAndreas Noever */ 17916603153SAndreas Noever static void ring_write_descriptors(struct tb_ring *ring) 18016603153SAndreas Noever { 18116603153SAndreas Noever struct ring_frame *frame, *n; 18216603153SAndreas Noever struct ring_desc *descriptor; 18316603153SAndreas Noever list_for_each_entry_safe(frame, n, &ring->queue, list) { 18416603153SAndreas Noever if (ring_full(ring)) 18516603153SAndreas Noever break; 18616603153SAndreas Noever list_move_tail(&frame->list, &ring->in_flight); 18716603153SAndreas Noever descriptor = &ring->descriptors[ring->head]; 18816603153SAndreas Noever descriptor->phys = frame->buffer_phy; 18916603153SAndreas Noever descriptor->time = 0; 19016603153SAndreas Noever descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT; 19116603153SAndreas Noever if (ring->is_tx) { 19216603153SAndreas Noever descriptor->length = frame->size; 19316603153SAndreas Noever descriptor->eof = frame->eof; 19416603153SAndreas Noever descriptor->sof = frame->sof; 19516603153SAndreas Noever } 19616603153SAndreas Noever ring->head = (ring->head + 1) % ring->size; 19716603153SAndreas Noever ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); 19816603153SAndreas Noever } 19916603153SAndreas Noever } 20016603153SAndreas Noever 20116603153SAndreas Noever /** 20216603153SAndreas Noever * ring_work() - progress completed frames 20316603153SAndreas Noever * 20416603153SAndreas Noever * If the ring is shutting down then all frames are marked as canceled and 20516603153SAndreas Noever * their callbacks are invoked. 20616603153SAndreas Noever * 20716603153SAndreas Noever * Otherwise we collect all completed frame from the ring buffer, write new 20816603153SAndreas Noever * frame to the ring buffer and invoke the callbacks for the completed frames. 20916603153SAndreas Noever */ 21016603153SAndreas Noever static void ring_work(struct work_struct *work) 21116603153SAndreas Noever { 21216603153SAndreas Noever struct tb_ring *ring = container_of(work, typeof(*ring), work); 21316603153SAndreas Noever struct ring_frame *frame; 21416603153SAndreas Noever bool canceled = false; 21516603153SAndreas Noever LIST_HEAD(done); 21616603153SAndreas Noever mutex_lock(&ring->lock); 21716603153SAndreas Noever 21816603153SAndreas Noever if (!ring->running) { 21916603153SAndreas Noever /* Move all frames to done and mark them as canceled. */ 22016603153SAndreas Noever list_splice_tail_init(&ring->in_flight, &done); 22116603153SAndreas Noever list_splice_tail_init(&ring->queue, &done); 22216603153SAndreas Noever canceled = true; 22316603153SAndreas Noever goto invoke_callback; 22416603153SAndreas Noever } 22516603153SAndreas Noever 22616603153SAndreas Noever while (!ring_empty(ring)) { 22716603153SAndreas Noever if (!(ring->descriptors[ring->tail].flags 22816603153SAndreas Noever & RING_DESC_COMPLETED)) 22916603153SAndreas Noever break; 23016603153SAndreas Noever frame = list_first_entry(&ring->in_flight, typeof(*frame), 23116603153SAndreas Noever list); 23216603153SAndreas Noever list_move_tail(&frame->list, &done); 23316603153SAndreas Noever if (!ring->is_tx) { 23416603153SAndreas Noever frame->size = ring->descriptors[ring->tail].length; 23516603153SAndreas Noever frame->eof = ring->descriptors[ring->tail].eof; 23616603153SAndreas Noever frame->sof = ring->descriptors[ring->tail].sof; 23716603153SAndreas Noever frame->flags = ring->descriptors[ring->tail].flags; 23816603153SAndreas Noever } 23916603153SAndreas Noever ring->tail = (ring->tail + 1) % ring->size; 24016603153SAndreas Noever } 24116603153SAndreas Noever ring_write_descriptors(ring); 24216603153SAndreas Noever 24316603153SAndreas Noever invoke_callback: 24416603153SAndreas Noever mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */ 24516603153SAndreas Noever while (!list_empty(&done)) { 24616603153SAndreas Noever frame = list_first_entry(&done, typeof(*frame), list); 24716603153SAndreas Noever /* 24816603153SAndreas Noever * The callback may reenqueue or delete frame. 24916603153SAndreas Noever * Do not hold on to it. 25016603153SAndreas Noever */ 25116603153SAndreas Noever list_del_init(&frame->list); 25216603153SAndreas Noever frame->callback(ring, frame, canceled); 25316603153SAndreas Noever } 25416603153SAndreas Noever } 25516603153SAndreas Noever 25616603153SAndreas Noever int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) 25716603153SAndreas Noever { 25816603153SAndreas Noever int ret = 0; 25916603153SAndreas Noever mutex_lock(&ring->lock); 26016603153SAndreas Noever if (ring->running) { 26116603153SAndreas Noever list_add_tail(&frame->list, &ring->queue); 26216603153SAndreas Noever ring_write_descriptors(ring); 26316603153SAndreas Noever } else { 26416603153SAndreas Noever ret = -ESHUTDOWN; 26516603153SAndreas Noever } 26616603153SAndreas Noever mutex_unlock(&ring->lock); 26716603153SAndreas Noever return ret; 26816603153SAndreas Noever } 26916603153SAndreas Noever 270046bee1fSMika Westerberg static irqreturn_t ring_msix(int irq, void *data) 271046bee1fSMika Westerberg { 272046bee1fSMika Westerberg struct tb_ring *ring = data; 273046bee1fSMika Westerberg 274046bee1fSMika Westerberg schedule_work(&ring->work); 275046bee1fSMika Westerberg return IRQ_HANDLED; 276046bee1fSMika Westerberg } 277046bee1fSMika Westerberg 278046bee1fSMika Westerberg static int ring_request_msix(struct tb_ring *ring, bool no_suspend) 279046bee1fSMika Westerberg { 280046bee1fSMika Westerberg struct tb_nhi *nhi = ring->nhi; 281046bee1fSMika Westerberg unsigned long irqflags; 282046bee1fSMika Westerberg int ret; 283046bee1fSMika Westerberg 284046bee1fSMika Westerberg if (!nhi->pdev->msix_enabled) 285046bee1fSMika Westerberg return 0; 286046bee1fSMika Westerberg 287046bee1fSMika Westerberg ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL); 288046bee1fSMika Westerberg if (ret < 0) 289046bee1fSMika Westerberg return ret; 290046bee1fSMika Westerberg 291046bee1fSMika Westerberg ring->vector = ret; 292046bee1fSMika Westerberg 293046bee1fSMika Westerberg ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector); 294046bee1fSMika Westerberg if (ring->irq < 0) 295046bee1fSMika Westerberg return ring->irq; 296046bee1fSMika Westerberg 297046bee1fSMika Westerberg irqflags = no_suspend ? IRQF_NO_SUSPEND : 0; 298046bee1fSMika Westerberg return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); 299046bee1fSMika Westerberg } 300046bee1fSMika Westerberg 301046bee1fSMika Westerberg static void ring_release_msix(struct tb_ring *ring) 302046bee1fSMika Westerberg { 303046bee1fSMika Westerberg if (ring->irq <= 0) 304046bee1fSMika Westerberg return; 305046bee1fSMika Westerberg 306046bee1fSMika Westerberg free_irq(ring->irq, ring); 307046bee1fSMika Westerberg ida_simple_remove(&ring->nhi->msix_ida, ring->vector); 308046bee1fSMika Westerberg ring->vector = 0; 309046bee1fSMika Westerberg ring->irq = 0; 310046bee1fSMika Westerberg } 311046bee1fSMika Westerberg 31216603153SAndreas Noever static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size, 313*9fb1e654SMika Westerberg bool transmit, unsigned int flags, 314*9fb1e654SMika Westerberg u16 sof_mask, u16 eof_mask) 31516603153SAndreas Noever { 31616603153SAndreas Noever struct tb_ring *ring = NULL; 31716603153SAndreas Noever dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", 31816603153SAndreas Noever transmit ? "TX" : "RX", hop, size); 31916603153SAndreas Noever 320*9fb1e654SMika Westerberg /* Tx Ring 2 is reserved for E2E workaround */ 321*9fb1e654SMika Westerberg if (transmit && hop == RING_E2E_UNUSED_HOPID) 322*9fb1e654SMika Westerberg return NULL; 323*9fb1e654SMika Westerberg 32416603153SAndreas Noever mutex_lock(&nhi->lock); 32516603153SAndreas Noever if (hop >= nhi->hop_count) { 32616603153SAndreas Noever dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop); 32716603153SAndreas Noever goto err; 32816603153SAndreas Noever } 32916603153SAndreas Noever if (transmit && nhi->tx_rings[hop]) { 33016603153SAndreas Noever dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop); 33116603153SAndreas Noever goto err; 33216603153SAndreas Noever } else if (!transmit && nhi->rx_rings[hop]) { 33316603153SAndreas Noever dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop); 33416603153SAndreas Noever goto err; 33516603153SAndreas Noever } 33616603153SAndreas Noever ring = kzalloc(sizeof(*ring), GFP_KERNEL); 33716603153SAndreas Noever if (!ring) 33816603153SAndreas Noever goto err; 33916603153SAndreas Noever 34016603153SAndreas Noever mutex_init(&ring->lock); 34116603153SAndreas Noever INIT_LIST_HEAD(&ring->queue); 34216603153SAndreas Noever INIT_LIST_HEAD(&ring->in_flight); 34316603153SAndreas Noever INIT_WORK(&ring->work, ring_work); 34416603153SAndreas Noever 34516603153SAndreas Noever ring->nhi = nhi; 34616603153SAndreas Noever ring->hop = hop; 34716603153SAndreas Noever ring->is_tx = transmit; 34816603153SAndreas Noever ring->size = size; 349046bee1fSMika Westerberg ring->flags = flags; 350*9fb1e654SMika Westerberg ring->sof_mask = sof_mask; 351*9fb1e654SMika Westerberg ring->eof_mask = eof_mask; 35216603153SAndreas Noever ring->head = 0; 35316603153SAndreas Noever ring->tail = 0; 35416603153SAndreas Noever ring->running = false; 355046bee1fSMika Westerberg 356046bee1fSMika Westerberg if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND)) 357046bee1fSMika Westerberg goto err; 358046bee1fSMika Westerberg 35916603153SAndreas Noever ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, 36016603153SAndreas Noever size * sizeof(*ring->descriptors), 36116603153SAndreas Noever &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); 36216603153SAndreas Noever if (!ring->descriptors) 36316603153SAndreas Noever goto err; 36416603153SAndreas Noever 36516603153SAndreas Noever if (transmit) 36616603153SAndreas Noever nhi->tx_rings[hop] = ring; 36716603153SAndreas Noever else 36816603153SAndreas Noever nhi->rx_rings[hop] = ring; 36916603153SAndreas Noever mutex_unlock(&nhi->lock); 37016603153SAndreas Noever return ring; 37116603153SAndreas Noever 37216603153SAndreas Noever err: 37316603153SAndreas Noever if (ring) 37416603153SAndreas Noever mutex_destroy(&ring->lock); 37516603153SAndreas Noever kfree(ring); 37616603153SAndreas Noever mutex_unlock(&nhi->lock); 37716603153SAndreas Noever return NULL; 37816603153SAndreas Noever } 37916603153SAndreas Noever 380046bee1fSMika Westerberg struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, 381046bee1fSMika Westerberg unsigned int flags) 38216603153SAndreas Noever { 383*9fb1e654SMika Westerberg return ring_alloc(nhi, hop, size, true, flags, 0, 0); 38416603153SAndreas Noever } 38516603153SAndreas Noever 386046bee1fSMika Westerberg struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, 387*9fb1e654SMika Westerberg unsigned int flags, u16 sof_mask, u16 eof_mask) 38816603153SAndreas Noever { 389*9fb1e654SMika Westerberg return ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask); 39016603153SAndreas Noever } 39116603153SAndreas Noever 39216603153SAndreas Noever /** 39316603153SAndreas Noever * ring_start() - enable a ring 39416603153SAndreas Noever * 39516603153SAndreas Noever * Must not be invoked in parallel with ring_stop(). 39616603153SAndreas Noever */ 39716603153SAndreas Noever void ring_start(struct tb_ring *ring) 39816603153SAndreas Noever { 399*9fb1e654SMika Westerberg u16 frame_size; 400*9fb1e654SMika Westerberg u32 flags; 401*9fb1e654SMika Westerberg 40216603153SAndreas Noever mutex_lock(&ring->nhi->lock); 40316603153SAndreas Noever mutex_lock(&ring->lock); 404bdccf295SMika Westerberg if (ring->nhi->going_away) 405bdccf295SMika Westerberg goto err; 40616603153SAndreas Noever if (ring->running) { 40716603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); 40816603153SAndreas Noever goto err; 40916603153SAndreas Noever } 41016603153SAndreas Noever dev_info(&ring->nhi->pdev->dev, "starting %s %d\n", 41116603153SAndreas Noever RING_TYPE(ring), ring->hop); 41216603153SAndreas Noever 413*9fb1e654SMika Westerberg if (ring->flags & RING_FLAG_FRAME) { 414*9fb1e654SMika Westerberg /* Means 4096 */ 415*9fb1e654SMika Westerberg frame_size = 0; 416*9fb1e654SMika Westerberg flags = RING_FLAG_ENABLE; 417*9fb1e654SMika Westerberg } else { 418*9fb1e654SMika Westerberg frame_size = TB_FRAME_SIZE; 419*9fb1e654SMika Westerberg flags = RING_FLAG_ENABLE | RING_FLAG_RAW; 420*9fb1e654SMika Westerberg } 421*9fb1e654SMika Westerberg 422*9fb1e654SMika Westerberg if (ring->flags & RING_FLAG_E2E && !ring->is_tx) { 423*9fb1e654SMika Westerberg u32 hop; 424*9fb1e654SMika Westerberg 425*9fb1e654SMika Westerberg /* 426*9fb1e654SMika Westerberg * In order not to lose Rx packets we enable end-to-end 427*9fb1e654SMika Westerberg * workaround which transfers Rx credits to an unused Tx 428*9fb1e654SMika Westerberg * HopID. 429*9fb1e654SMika Westerberg */ 430*9fb1e654SMika Westerberg hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT; 431*9fb1e654SMika Westerberg hop &= REG_RX_OPTIONS_E2E_HOP_MASK; 432*9fb1e654SMika Westerberg flags |= hop | RING_FLAG_E2E_FLOW_CONTROL; 433*9fb1e654SMika Westerberg } 434*9fb1e654SMika Westerberg 43516603153SAndreas Noever ring_iowrite64desc(ring, ring->descriptors_dma, 0); 43616603153SAndreas Noever if (ring->is_tx) { 43716603153SAndreas Noever ring_iowrite32desc(ring, ring->size, 12); 43816603153SAndreas Noever ring_iowrite32options(ring, 0, 4); /* time releated ? */ 439*9fb1e654SMika Westerberg ring_iowrite32options(ring, flags, 0); 44016603153SAndreas Noever } else { 441*9fb1e654SMika Westerberg u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask; 442*9fb1e654SMika Westerberg 443*9fb1e654SMika Westerberg ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12); 444*9fb1e654SMika Westerberg ring_iowrite32options(ring, sof_eof_mask, 4); 445*9fb1e654SMika Westerberg ring_iowrite32options(ring, flags, 0); 44616603153SAndreas Noever } 44716603153SAndreas Noever ring_interrupt_active(ring, true); 44816603153SAndreas Noever ring->running = true; 44916603153SAndreas Noever err: 45016603153SAndreas Noever mutex_unlock(&ring->lock); 45116603153SAndreas Noever mutex_unlock(&ring->nhi->lock); 45216603153SAndreas Noever } 45316603153SAndreas Noever 45416603153SAndreas Noever 45516603153SAndreas Noever /** 45616603153SAndreas Noever * ring_stop() - shutdown a ring 45716603153SAndreas Noever * 45816603153SAndreas Noever * Must not be invoked from a callback. 45916603153SAndreas Noever * 46016603153SAndreas Noever * This method will disable the ring. Further calls to ring_tx/ring_rx will 46116603153SAndreas Noever * return -ESHUTDOWN until ring_stop has been called. 46216603153SAndreas Noever * 46316603153SAndreas Noever * All enqueued frames will be canceled and their callbacks will be executed 46416603153SAndreas Noever * with frame->canceled set to true (on the callback thread). This method 46516603153SAndreas Noever * returns only after all callback invocations have finished. 46616603153SAndreas Noever */ 46716603153SAndreas Noever void ring_stop(struct tb_ring *ring) 46816603153SAndreas Noever { 46916603153SAndreas Noever mutex_lock(&ring->nhi->lock); 47016603153SAndreas Noever mutex_lock(&ring->lock); 47116603153SAndreas Noever dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n", 47216603153SAndreas Noever RING_TYPE(ring), ring->hop); 473bdccf295SMika Westerberg if (ring->nhi->going_away) 474bdccf295SMika Westerberg goto err; 47516603153SAndreas Noever if (!ring->running) { 47616603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", 47716603153SAndreas Noever RING_TYPE(ring), ring->hop); 47816603153SAndreas Noever goto err; 47916603153SAndreas Noever } 48016603153SAndreas Noever ring_interrupt_active(ring, false); 48116603153SAndreas Noever 48216603153SAndreas Noever ring_iowrite32options(ring, 0, 0); 48316603153SAndreas Noever ring_iowrite64desc(ring, 0, 0); 48416603153SAndreas Noever ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); 48516603153SAndreas Noever ring_iowrite32desc(ring, 0, 12); 48616603153SAndreas Noever ring->head = 0; 48716603153SAndreas Noever ring->tail = 0; 48816603153SAndreas Noever ring->running = false; 48916603153SAndreas Noever 49016603153SAndreas Noever err: 49116603153SAndreas Noever mutex_unlock(&ring->lock); 49216603153SAndreas Noever mutex_unlock(&ring->nhi->lock); 49316603153SAndreas Noever 49416603153SAndreas Noever /* 49516603153SAndreas Noever * schedule ring->work to invoke callbacks on all remaining frames. 49616603153SAndreas Noever */ 49716603153SAndreas Noever schedule_work(&ring->work); 49816603153SAndreas Noever flush_work(&ring->work); 49916603153SAndreas Noever } 50016603153SAndreas Noever 50116603153SAndreas Noever /* 50216603153SAndreas Noever * ring_free() - free ring 50316603153SAndreas Noever * 50416603153SAndreas Noever * When this method returns all invocations of ring->callback will have 50516603153SAndreas Noever * finished. 50616603153SAndreas Noever * 50716603153SAndreas Noever * Ring must be stopped. 50816603153SAndreas Noever * 50916603153SAndreas Noever * Must NOT be called from ring_frame->callback! 51016603153SAndreas Noever */ 51116603153SAndreas Noever void ring_free(struct tb_ring *ring) 51216603153SAndreas Noever { 51316603153SAndreas Noever mutex_lock(&ring->nhi->lock); 51416603153SAndreas Noever /* 51516603153SAndreas Noever * Dissociate the ring from the NHI. This also ensures that 51616603153SAndreas Noever * nhi_interrupt_work cannot reschedule ring->work. 51716603153SAndreas Noever */ 51816603153SAndreas Noever if (ring->is_tx) 51916603153SAndreas Noever ring->nhi->tx_rings[ring->hop] = NULL; 52016603153SAndreas Noever else 52116603153SAndreas Noever ring->nhi->rx_rings[ring->hop] = NULL; 52216603153SAndreas Noever 52316603153SAndreas Noever if (ring->running) { 52416603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", 52516603153SAndreas Noever RING_TYPE(ring), ring->hop); 52616603153SAndreas Noever } 52716603153SAndreas Noever 528046bee1fSMika Westerberg ring_release_msix(ring); 529046bee1fSMika Westerberg 53016603153SAndreas Noever dma_free_coherent(&ring->nhi->pdev->dev, 53116603153SAndreas Noever ring->size * sizeof(*ring->descriptors), 53216603153SAndreas Noever ring->descriptors, ring->descriptors_dma); 53316603153SAndreas Noever 534f19b72c6SSachin Kamat ring->descriptors = NULL; 53516603153SAndreas Noever ring->descriptors_dma = 0; 53616603153SAndreas Noever 53716603153SAndreas Noever 53816603153SAndreas Noever dev_info(&ring->nhi->pdev->dev, 53916603153SAndreas Noever "freeing %s %d\n", 54016603153SAndreas Noever RING_TYPE(ring), 54116603153SAndreas Noever ring->hop); 54216603153SAndreas Noever 54316603153SAndreas Noever mutex_unlock(&ring->nhi->lock); 54416603153SAndreas Noever /** 545046bee1fSMika Westerberg * ring->work can no longer be scheduled (it is scheduled only 546046bee1fSMika Westerberg * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it 547046bee1fSMika Westerberg * to finish before freeing the ring. 54816603153SAndreas Noever */ 54916603153SAndreas Noever flush_work(&ring->work); 55016603153SAndreas Noever mutex_destroy(&ring->lock); 55116603153SAndreas Noever kfree(ring); 55216603153SAndreas Noever } 55316603153SAndreas Noever 554cd446ee2SMika Westerberg /** 555cd446ee2SMika Westerberg * nhi_mailbox_cmd() - Send a command through NHI mailbox 556cd446ee2SMika Westerberg * @nhi: Pointer to the NHI structure 557cd446ee2SMika Westerberg * @cmd: Command to send 558cd446ee2SMika Westerberg * @data: Data to be send with the command 559cd446ee2SMika Westerberg * 560cd446ee2SMika Westerberg * Sends mailbox command to the firmware running on NHI. Returns %0 in 561cd446ee2SMika Westerberg * case of success and negative errno in case of failure. 562cd446ee2SMika Westerberg */ 563cd446ee2SMika Westerberg int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data) 564cd446ee2SMika Westerberg { 565cd446ee2SMika Westerberg ktime_t timeout; 566cd446ee2SMika Westerberg u32 val; 567cd446ee2SMika Westerberg 568cd446ee2SMika Westerberg iowrite32(data, nhi->iobase + REG_INMAIL_DATA); 569cd446ee2SMika Westerberg 570cd446ee2SMika Westerberg val = ioread32(nhi->iobase + REG_INMAIL_CMD); 571cd446ee2SMika Westerberg val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR); 572cd446ee2SMika Westerberg val |= REG_INMAIL_OP_REQUEST | cmd; 573cd446ee2SMika Westerberg iowrite32(val, nhi->iobase + REG_INMAIL_CMD); 574cd446ee2SMika Westerberg 575cd446ee2SMika Westerberg timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT); 576cd446ee2SMika Westerberg do { 577cd446ee2SMika Westerberg val = ioread32(nhi->iobase + REG_INMAIL_CMD); 578cd446ee2SMika Westerberg if (!(val & REG_INMAIL_OP_REQUEST)) 579cd446ee2SMika Westerberg break; 580cd446ee2SMika Westerberg usleep_range(10, 20); 581cd446ee2SMika Westerberg } while (ktime_before(ktime_get(), timeout)); 582cd446ee2SMika Westerberg 583cd446ee2SMika Westerberg if (val & REG_INMAIL_OP_REQUEST) 584cd446ee2SMika Westerberg return -ETIMEDOUT; 585cd446ee2SMika Westerberg if (val & REG_INMAIL_ERROR) 586cd446ee2SMika Westerberg return -EIO; 587cd446ee2SMika Westerberg 588cd446ee2SMika Westerberg return 0; 589cd446ee2SMika Westerberg } 590cd446ee2SMika Westerberg 591cd446ee2SMika Westerberg /** 592cd446ee2SMika Westerberg * nhi_mailbox_mode() - Return current firmware operation mode 593cd446ee2SMika Westerberg * @nhi: Pointer to the NHI structure 594cd446ee2SMika Westerberg * 595cd446ee2SMika Westerberg * The function reads current firmware operation mode using NHI mailbox 596cd446ee2SMika Westerberg * registers and returns it to the caller. 597cd446ee2SMika Westerberg */ 598cd446ee2SMika Westerberg enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi) 599cd446ee2SMika Westerberg { 600cd446ee2SMika Westerberg u32 val; 601cd446ee2SMika Westerberg 602cd446ee2SMika Westerberg val = ioread32(nhi->iobase + REG_OUTMAIL_CMD); 603cd446ee2SMika Westerberg val &= REG_OUTMAIL_CMD_OPMODE_MASK; 604cd446ee2SMika Westerberg val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT; 605cd446ee2SMika Westerberg 606cd446ee2SMika Westerberg return (enum nhi_fw_mode)val; 607cd446ee2SMika Westerberg } 608cd446ee2SMika Westerberg 60916603153SAndreas Noever static void nhi_interrupt_work(struct work_struct *work) 61016603153SAndreas Noever { 61116603153SAndreas Noever struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); 61216603153SAndreas Noever int value = 0; /* Suppress uninitialized usage warning. */ 61316603153SAndreas Noever int bit; 61416603153SAndreas Noever int hop = -1; 61516603153SAndreas Noever int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */ 61616603153SAndreas Noever struct tb_ring *ring; 61716603153SAndreas Noever 61816603153SAndreas Noever mutex_lock(&nhi->lock); 61916603153SAndreas Noever 62016603153SAndreas Noever /* 62116603153SAndreas Noever * Starting at REG_RING_NOTIFY_BASE there are three status bitfields 62216603153SAndreas Noever * (TX, RX, RX overflow). We iterate over the bits and read a new 62316603153SAndreas Noever * dwords as required. The registers are cleared on read. 62416603153SAndreas Noever */ 62516603153SAndreas Noever for (bit = 0; bit < 3 * nhi->hop_count; bit++) { 62616603153SAndreas Noever if (bit % 32 == 0) 62716603153SAndreas Noever value = ioread32(nhi->iobase 62816603153SAndreas Noever + REG_RING_NOTIFY_BASE 62916603153SAndreas Noever + 4 * (bit / 32)); 63016603153SAndreas Noever if (++hop == nhi->hop_count) { 63116603153SAndreas Noever hop = 0; 63216603153SAndreas Noever type++; 63316603153SAndreas Noever } 63416603153SAndreas Noever if ((value & (1 << (bit % 32))) == 0) 63516603153SAndreas Noever continue; 63616603153SAndreas Noever if (type == 2) { 63716603153SAndreas Noever dev_warn(&nhi->pdev->dev, 63816603153SAndreas Noever "RX overflow for ring %d\n", 63916603153SAndreas Noever hop); 64016603153SAndreas Noever continue; 64116603153SAndreas Noever } 64216603153SAndreas Noever if (type == 0) 64316603153SAndreas Noever ring = nhi->tx_rings[hop]; 64416603153SAndreas Noever else 64516603153SAndreas Noever ring = nhi->rx_rings[hop]; 64616603153SAndreas Noever if (ring == NULL) { 64716603153SAndreas Noever dev_warn(&nhi->pdev->dev, 64816603153SAndreas Noever "got interrupt for inactive %s ring %d\n", 64916603153SAndreas Noever type ? "RX" : "TX", 65016603153SAndreas Noever hop); 65116603153SAndreas Noever continue; 65216603153SAndreas Noever } 65316603153SAndreas Noever /* we do not check ring->running, this is done in ring->work */ 65416603153SAndreas Noever schedule_work(&ring->work); 65516603153SAndreas Noever } 65616603153SAndreas Noever mutex_unlock(&nhi->lock); 65716603153SAndreas Noever } 65816603153SAndreas Noever 65916603153SAndreas Noever static irqreturn_t nhi_msi(int irq, void *data) 66016603153SAndreas Noever { 66116603153SAndreas Noever struct tb_nhi *nhi = data; 66216603153SAndreas Noever schedule_work(&nhi->interrupt_work); 66316603153SAndreas Noever return IRQ_HANDLED; 66416603153SAndreas Noever } 66516603153SAndreas Noever 66623dd5bb4SAndreas Noever static int nhi_suspend_noirq(struct device *dev) 66723dd5bb4SAndreas Noever { 66823dd5bb4SAndreas Noever struct pci_dev *pdev = to_pci_dev(dev); 66923dd5bb4SAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 6709d3cce0bSMika Westerberg 6719d3cce0bSMika Westerberg return tb_domain_suspend_noirq(tb); 67223dd5bb4SAndreas Noever } 67323dd5bb4SAndreas Noever 6748c6bba10SMika Westerberg static void nhi_enable_int_throttling(struct tb_nhi *nhi) 6758c6bba10SMika Westerberg { 6768c6bba10SMika Westerberg /* Throttling is specified in 256ns increments */ 6778c6bba10SMika Westerberg u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256); 6788c6bba10SMika Westerberg unsigned int i; 6798c6bba10SMika Westerberg 6808c6bba10SMika Westerberg /* 6818c6bba10SMika Westerberg * Configure interrupt throttling for all vectors even if we 6828c6bba10SMika Westerberg * only use few. 6838c6bba10SMika Westerberg */ 6848c6bba10SMika Westerberg for (i = 0; i < MSIX_MAX_VECS; i++) { 6858c6bba10SMika Westerberg u32 reg = REG_INT_THROTTLING_RATE + i * 4; 6868c6bba10SMika Westerberg iowrite32(throttle, nhi->iobase + reg); 6878c6bba10SMika Westerberg } 6888c6bba10SMika Westerberg } 6898c6bba10SMika Westerberg 69023dd5bb4SAndreas Noever static int nhi_resume_noirq(struct device *dev) 69123dd5bb4SAndreas Noever { 69223dd5bb4SAndreas Noever struct pci_dev *pdev = to_pci_dev(dev); 69323dd5bb4SAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 6949d3cce0bSMika Westerberg 695bdccf295SMika Westerberg /* 696bdccf295SMika Westerberg * Check that the device is still there. It may be that the user 697bdccf295SMika Westerberg * unplugged last device which causes the host controller to go 698bdccf295SMika Westerberg * away on PCs. 699bdccf295SMika Westerberg */ 700bdccf295SMika Westerberg if (!pci_device_is_present(pdev)) 701bdccf295SMika Westerberg tb->nhi->going_away = true; 7028c6bba10SMika Westerberg else 7038c6bba10SMika Westerberg nhi_enable_int_throttling(tb->nhi); 704bdccf295SMika Westerberg 7059d3cce0bSMika Westerberg return tb_domain_resume_noirq(tb); 70623dd5bb4SAndreas Noever } 70723dd5bb4SAndreas Noever 708f67cf491SMika Westerberg static int nhi_suspend(struct device *dev) 709f67cf491SMika Westerberg { 710f67cf491SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 711f67cf491SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 712f67cf491SMika Westerberg 713f67cf491SMika Westerberg return tb_domain_suspend(tb); 714f67cf491SMika Westerberg } 715f67cf491SMika Westerberg 716f67cf491SMika Westerberg static void nhi_complete(struct device *dev) 717f67cf491SMika Westerberg { 718f67cf491SMika Westerberg struct pci_dev *pdev = to_pci_dev(dev); 719f67cf491SMika Westerberg struct tb *tb = pci_get_drvdata(pdev); 720f67cf491SMika Westerberg 721f67cf491SMika Westerberg tb_domain_complete(tb); 722f67cf491SMika Westerberg } 723f67cf491SMika Westerberg 72416603153SAndreas Noever static void nhi_shutdown(struct tb_nhi *nhi) 72516603153SAndreas Noever { 72616603153SAndreas Noever int i; 72716603153SAndreas Noever dev_info(&nhi->pdev->dev, "shutdown\n"); 72816603153SAndreas Noever 72916603153SAndreas Noever for (i = 0; i < nhi->hop_count; i++) { 73016603153SAndreas Noever if (nhi->tx_rings[i]) 73116603153SAndreas Noever dev_WARN(&nhi->pdev->dev, 73216603153SAndreas Noever "TX ring %d is still active\n", i); 73316603153SAndreas Noever if (nhi->rx_rings[i]) 73416603153SAndreas Noever dev_WARN(&nhi->pdev->dev, 73516603153SAndreas Noever "RX ring %d is still active\n", i); 73616603153SAndreas Noever } 73716603153SAndreas Noever nhi_disable_interrupts(nhi); 73816603153SAndreas Noever /* 73916603153SAndreas Noever * We have to release the irq before calling flush_work. Otherwise an 74016603153SAndreas Noever * already executing IRQ handler could call schedule_work again. 74116603153SAndreas Noever */ 742046bee1fSMika Westerberg if (!nhi->pdev->msix_enabled) { 74316603153SAndreas Noever devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); 74416603153SAndreas Noever flush_work(&nhi->interrupt_work); 745046bee1fSMika Westerberg } 74616603153SAndreas Noever mutex_destroy(&nhi->lock); 747046bee1fSMika Westerberg ida_destroy(&nhi->msix_ida); 748046bee1fSMika Westerberg } 749046bee1fSMika Westerberg 750046bee1fSMika Westerberg static int nhi_init_msi(struct tb_nhi *nhi) 751046bee1fSMika Westerberg { 752046bee1fSMika Westerberg struct pci_dev *pdev = nhi->pdev; 753046bee1fSMika Westerberg int res, irq, nvec; 754046bee1fSMika Westerberg 755046bee1fSMika Westerberg /* In case someone left them on. */ 756046bee1fSMika Westerberg nhi_disable_interrupts(nhi); 757046bee1fSMika Westerberg 7588c6bba10SMika Westerberg nhi_enable_int_throttling(nhi); 7598c6bba10SMika Westerberg 760046bee1fSMika Westerberg ida_init(&nhi->msix_ida); 761046bee1fSMika Westerberg 762046bee1fSMika Westerberg /* 763046bee1fSMika Westerberg * The NHI has 16 MSI-X vectors or a single MSI. We first try to 764046bee1fSMika Westerberg * get all MSI-X vectors and if we succeed, each ring will have 765046bee1fSMika Westerberg * one MSI-X. If for some reason that does not work out, we 766046bee1fSMika Westerberg * fallback to a single MSI. 767046bee1fSMika Westerberg */ 768046bee1fSMika Westerberg nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS, 769046bee1fSMika Westerberg PCI_IRQ_MSIX); 770046bee1fSMika Westerberg if (nvec < 0) { 771046bee1fSMika Westerberg nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 772046bee1fSMika Westerberg if (nvec < 0) 773046bee1fSMika Westerberg return nvec; 774046bee1fSMika Westerberg 775046bee1fSMika Westerberg INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); 776046bee1fSMika Westerberg 777046bee1fSMika Westerberg irq = pci_irq_vector(nhi->pdev, 0); 778046bee1fSMika Westerberg if (irq < 0) 779046bee1fSMika Westerberg return irq; 780046bee1fSMika Westerberg 781046bee1fSMika Westerberg res = devm_request_irq(&pdev->dev, irq, nhi_msi, 782046bee1fSMika Westerberg IRQF_NO_SUSPEND, "thunderbolt", nhi); 783046bee1fSMika Westerberg if (res) { 784046bee1fSMika Westerberg dev_err(&pdev->dev, "request_irq failed, aborting\n"); 785046bee1fSMika Westerberg return res; 786046bee1fSMika Westerberg } 787046bee1fSMika Westerberg } 788046bee1fSMika Westerberg 789046bee1fSMika Westerberg return 0; 79016603153SAndreas Noever } 79116603153SAndreas Noever 79216603153SAndreas Noever static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) 79316603153SAndreas Noever { 79416603153SAndreas Noever struct tb_nhi *nhi; 795d6cc51cdSAndreas Noever struct tb *tb; 79616603153SAndreas Noever int res; 79716603153SAndreas Noever 79816603153SAndreas Noever res = pcim_enable_device(pdev); 79916603153SAndreas Noever if (res) { 80016603153SAndreas Noever dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); 80116603153SAndreas Noever return res; 80216603153SAndreas Noever } 80316603153SAndreas Noever 80416603153SAndreas Noever res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt"); 80516603153SAndreas Noever if (res) { 80616603153SAndreas Noever dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n"); 80716603153SAndreas Noever return res; 80816603153SAndreas Noever } 80916603153SAndreas Noever 81016603153SAndreas Noever nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); 81116603153SAndreas Noever if (!nhi) 81216603153SAndreas Noever return -ENOMEM; 81316603153SAndreas Noever 81416603153SAndreas Noever nhi->pdev = pdev; 81516603153SAndreas Noever /* cannot fail - table is allocated bin pcim_iomap_regions */ 81616603153SAndreas Noever nhi->iobase = pcim_iomap_table(pdev)[0]; 81716603153SAndreas Noever nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; 81819bf4d4fSLukas Wunner if (nhi->hop_count != 12 && nhi->hop_count != 32) 81916603153SAndreas Noever dev_warn(&pdev->dev, "unexpected hop count: %d\n", 82016603153SAndreas Noever nhi->hop_count); 82116603153SAndreas Noever 8222a211f32SHimangi Saraogi nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, 8232a211f32SHimangi Saraogi sizeof(*nhi->tx_rings), GFP_KERNEL); 8242a211f32SHimangi Saraogi nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, 8252a211f32SHimangi Saraogi sizeof(*nhi->rx_rings), GFP_KERNEL); 82616603153SAndreas Noever if (!nhi->tx_rings || !nhi->rx_rings) 82716603153SAndreas Noever return -ENOMEM; 82816603153SAndreas Noever 829046bee1fSMika Westerberg res = nhi_init_msi(nhi); 83016603153SAndreas Noever if (res) { 831046bee1fSMika Westerberg dev_err(&pdev->dev, "cannot enable MSI, aborting\n"); 83216603153SAndreas Noever return res; 83316603153SAndreas Noever } 83416603153SAndreas Noever 83516603153SAndreas Noever mutex_init(&nhi->lock); 83616603153SAndreas Noever 83716603153SAndreas Noever pci_set_master(pdev); 83816603153SAndreas Noever 839f67cf491SMika Westerberg tb = icm_probe(nhi); 8409d3cce0bSMika Westerberg if (!tb) 841f67cf491SMika Westerberg tb = tb_probe(nhi); 842f67cf491SMika Westerberg if (!tb) { 843f67cf491SMika Westerberg dev_err(&nhi->pdev->dev, 844f67cf491SMika Westerberg "failed to determine connection manager, aborting\n"); 8459d3cce0bSMika Westerberg return -ENODEV; 846f67cf491SMika Westerberg } 847f67cf491SMika Westerberg 848f67cf491SMika Westerberg dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); 8499d3cce0bSMika Westerberg 8509d3cce0bSMika Westerberg res = tb_domain_add(tb); 8519d3cce0bSMika Westerberg if (res) { 852d6cc51cdSAndreas Noever /* 853d6cc51cdSAndreas Noever * At this point the RX/TX rings might already have been 854d6cc51cdSAndreas Noever * activated. Do a proper shutdown. 855d6cc51cdSAndreas Noever */ 8569d3cce0bSMika Westerberg tb_domain_put(tb); 857d6cc51cdSAndreas Noever nhi_shutdown(nhi); 858d6cc51cdSAndreas Noever return -EIO; 859d6cc51cdSAndreas Noever } 860d6cc51cdSAndreas Noever pci_set_drvdata(pdev, tb); 86116603153SAndreas Noever 86216603153SAndreas Noever return 0; 86316603153SAndreas Noever } 86416603153SAndreas Noever 86516603153SAndreas Noever static void nhi_remove(struct pci_dev *pdev) 86616603153SAndreas Noever { 867d6cc51cdSAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 868d6cc51cdSAndreas Noever struct tb_nhi *nhi = tb->nhi; 8699d3cce0bSMika Westerberg 8709d3cce0bSMika Westerberg tb_domain_remove(tb); 87116603153SAndreas Noever nhi_shutdown(nhi); 87216603153SAndreas Noever } 87316603153SAndreas Noever 87423dd5bb4SAndreas Noever /* 87523dd5bb4SAndreas Noever * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable 87623dd5bb4SAndreas Noever * the tunnels asap. A corresponding pci quirk blocks the downstream bridges 87723dd5bb4SAndreas Noever * resume_noirq until we are done. 87823dd5bb4SAndreas Noever */ 87923dd5bb4SAndreas Noever static const struct dev_pm_ops nhi_pm_ops = { 88023dd5bb4SAndreas Noever .suspend_noirq = nhi_suspend_noirq, 88123dd5bb4SAndreas Noever .resume_noirq = nhi_resume_noirq, 88223dd5bb4SAndreas Noever .freeze_noirq = nhi_suspend_noirq, /* 88323dd5bb4SAndreas Noever * we just disable hotplug, the 88423dd5bb4SAndreas Noever * pci-tunnels stay alive. 88523dd5bb4SAndreas Noever */ 88623dd5bb4SAndreas Noever .restore_noirq = nhi_resume_noirq, 887f67cf491SMika Westerberg .suspend = nhi_suspend, 888f67cf491SMika Westerberg .freeze = nhi_suspend, 889f67cf491SMika Westerberg .poweroff = nhi_suspend, 890f67cf491SMika Westerberg .complete = nhi_complete, 89123dd5bb4SAndreas Noever }; 89223dd5bb4SAndreas Noever 893620863f7SSachin Kamat static struct pci_device_id nhi_ids[] = { 89416603153SAndreas Noever /* 89516603153SAndreas Noever * We have to specify class, the TB bridges use the same device and 8961d111406SLukas Wunner * vendor (sub)id on gen 1 and gen 2 controllers. 89716603153SAndreas Noever */ 89816603153SAndreas Noever { 89916603153SAndreas Noever .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 9001d111406SLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 90119bf4d4fSLukas Wunner .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, 90219bf4d4fSLukas Wunner .subvendor = 0x2222, .subdevice = 0x1111, 90319bf4d4fSLukas Wunner }, 90419bf4d4fSLukas Wunner { 90519bf4d4fSLukas Wunner .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 90619bf4d4fSLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 9071d111406SLukas Wunner .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, 90816603153SAndreas Noever .subvendor = 0x2222, .subdevice = 0x1111, 90916603153SAndreas Noever }, 91016603153SAndreas Noever { 91116603153SAndreas Noever .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 9121d111406SLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 91382a6a81cSXavier Gnata .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI, 91482a6a81cSXavier Gnata .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 91582a6a81cSXavier Gnata }, 91682a6a81cSXavier Gnata { 91782a6a81cSXavier Gnata .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 91882a6a81cSXavier Gnata .vendor = PCI_VENDOR_ID_INTEL, 9191d111406SLukas Wunner .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, 920a42fb351SKnuth Posern .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 92116603153SAndreas Noever }, 9225e2781bcSMika Westerberg 9235e2781bcSMika Westerberg /* Thunderbolt 3 */ 9245e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) }, 9255e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) }, 9265e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) }, 9275e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) }, 9285e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) }, 9295e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) }, 9305e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) }, 9315e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, 9325e2781bcSMika Westerberg 93316603153SAndreas Noever { 0,} 93416603153SAndreas Noever }; 93516603153SAndreas Noever 93616603153SAndreas Noever MODULE_DEVICE_TABLE(pci, nhi_ids); 93716603153SAndreas Noever MODULE_LICENSE("GPL"); 93816603153SAndreas Noever 93916603153SAndreas Noever static struct pci_driver nhi_driver = { 94016603153SAndreas Noever .name = "thunderbolt", 94116603153SAndreas Noever .id_table = nhi_ids, 94216603153SAndreas Noever .probe = nhi_probe, 94316603153SAndreas Noever .remove = nhi_remove, 94423dd5bb4SAndreas Noever .driver.pm = &nhi_pm_ops, 94516603153SAndreas Noever }; 94616603153SAndreas Noever 94716603153SAndreas Noever static int __init nhi_init(void) 94816603153SAndreas Noever { 9499d3cce0bSMika Westerberg int ret; 9509d3cce0bSMika Westerberg 9519d3cce0bSMika Westerberg ret = tb_domain_init(); 9529d3cce0bSMika Westerberg if (ret) 9539d3cce0bSMika Westerberg return ret; 9549d3cce0bSMika Westerberg ret = pci_register_driver(&nhi_driver); 9559d3cce0bSMika Westerberg if (ret) 9569d3cce0bSMika Westerberg tb_domain_exit(); 9579d3cce0bSMika Westerberg return ret; 95816603153SAndreas Noever } 95916603153SAndreas Noever 96016603153SAndreas Noever static void __exit nhi_unload(void) 96116603153SAndreas Noever { 96216603153SAndreas Noever pci_unregister_driver(&nhi_driver); 9639d3cce0bSMika Westerberg tb_domain_exit(); 96416603153SAndreas Noever } 96516603153SAndreas Noever 96616603153SAndreas Noever module_init(nhi_init); 96716603153SAndreas Noever module_exit(nhi_unload); 968