116603153SAndreas Noever /* 216603153SAndreas Noever * Thunderbolt Cactus Ridge driver - NHI driver 316603153SAndreas Noever * 416603153SAndreas Noever * The NHI (native host interface) is the pci device that allows us to send and 516603153SAndreas Noever * receive frames from the thunderbolt bus. 616603153SAndreas Noever * 716603153SAndreas Noever * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 816603153SAndreas Noever */ 916603153SAndreas Noever 1023dd5bb4SAndreas Noever #include <linux/pm_runtime.h> 1116603153SAndreas Noever #include <linux/slab.h> 1216603153SAndreas Noever #include <linux/errno.h> 1316603153SAndreas Noever #include <linux/pci.h> 1416603153SAndreas Noever #include <linux/interrupt.h> 1516603153SAndreas Noever #include <linux/module.h> 1616603153SAndreas Noever #include <linux/dmi.h> 1716603153SAndreas Noever 1816603153SAndreas Noever #include "nhi.h" 1916603153SAndreas Noever #include "nhi_regs.h" 20d6cc51cdSAndreas Noever #include "tb.h" 2116603153SAndreas Noever 2216603153SAndreas Noever #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") 2316603153SAndreas Noever 24046bee1fSMika Westerberg /* 25046bee1fSMika Westerberg * Minimal number of vectors when we use MSI-X. Two for control channel 26046bee1fSMika Westerberg * Rx/Tx and the rest four are for cross domain DMA paths. 27046bee1fSMika Westerberg */ 28046bee1fSMika Westerberg #define MSIX_MIN_VECS 6 29046bee1fSMika Westerberg #define MSIX_MAX_VECS 16 3016603153SAndreas Noever 3116603153SAndreas Noever static int ring_interrupt_index(struct tb_ring *ring) 3216603153SAndreas Noever { 3316603153SAndreas Noever int bit = ring->hop; 3416603153SAndreas Noever if (!ring->is_tx) 3516603153SAndreas Noever bit += ring->nhi->hop_count; 3616603153SAndreas Noever return bit; 3716603153SAndreas Noever } 3816603153SAndreas Noever 3916603153SAndreas Noever /** 4016603153SAndreas Noever * ring_interrupt_active() - activate/deactivate interrupts for a single ring 4116603153SAndreas Noever * 4216603153SAndreas Noever * ring->nhi->lock must be held. 4316603153SAndreas Noever */ 4416603153SAndreas Noever static void ring_interrupt_active(struct tb_ring *ring, bool active) 4516603153SAndreas Noever { 4619bf4d4fSLukas Wunner int reg = REG_RING_INTERRUPT_BASE + 4719bf4d4fSLukas Wunner ring_interrupt_index(ring) / 32 * 4; 4816603153SAndreas Noever int bit = ring_interrupt_index(ring) & 31; 4916603153SAndreas Noever int mask = 1 << bit; 5016603153SAndreas Noever u32 old, new; 51046bee1fSMika Westerberg 52046bee1fSMika Westerberg if (ring->irq > 0) { 53046bee1fSMika Westerberg u32 step, shift, ivr, misc; 54046bee1fSMika Westerberg void __iomem *ivr_base; 55046bee1fSMika Westerberg int index; 56046bee1fSMika Westerberg 57046bee1fSMika Westerberg if (ring->is_tx) 58046bee1fSMika Westerberg index = ring->hop; 59046bee1fSMika Westerberg else 60046bee1fSMika Westerberg index = ring->hop + ring->nhi->hop_count; 61046bee1fSMika Westerberg 62046bee1fSMika Westerberg /* 63046bee1fSMika Westerberg * Ask the hardware to clear interrupt status bits automatically 64046bee1fSMika Westerberg * since we already know which interrupt was triggered. 65046bee1fSMika Westerberg */ 66046bee1fSMika Westerberg misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); 67046bee1fSMika Westerberg if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) { 68046bee1fSMika Westerberg misc |= REG_DMA_MISC_INT_AUTO_CLEAR; 69046bee1fSMika Westerberg iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); 70046bee1fSMika Westerberg } 71046bee1fSMika Westerberg 72046bee1fSMika Westerberg ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; 73046bee1fSMika Westerberg step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; 74046bee1fSMika Westerberg shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; 75046bee1fSMika Westerberg ivr = ioread32(ivr_base + step); 76046bee1fSMika Westerberg ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift); 77046bee1fSMika Westerberg if (active) 78046bee1fSMika Westerberg ivr |= ring->vector << shift; 79046bee1fSMika Westerberg iowrite32(ivr, ivr_base + step); 80046bee1fSMika Westerberg } 81046bee1fSMika Westerberg 8216603153SAndreas Noever old = ioread32(ring->nhi->iobase + reg); 8316603153SAndreas Noever if (active) 8416603153SAndreas Noever new = old | mask; 8516603153SAndreas Noever else 8616603153SAndreas Noever new = old & ~mask; 8716603153SAndreas Noever 8816603153SAndreas Noever dev_info(&ring->nhi->pdev->dev, 8916603153SAndreas Noever "%s interrupt at register %#x bit %d (%#x -> %#x)\n", 9016603153SAndreas Noever active ? "enabling" : "disabling", reg, bit, old, new); 9116603153SAndreas Noever 9216603153SAndreas Noever if (new == old) 9316603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, 9416603153SAndreas Noever "interrupt for %s %d is already %s\n", 9516603153SAndreas Noever RING_TYPE(ring), ring->hop, 9616603153SAndreas Noever active ? "enabled" : "disabled"); 9716603153SAndreas Noever iowrite32(new, ring->nhi->iobase + reg); 9816603153SAndreas Noever } 9916603153SAndreas Noever 10016603153SAndreas Noever /** 10116603153SAndreas Noever * nhi_disable_interrupts() - disable interrupts for all rings 10216603153SAndreas Noever * 10316603153SAndreas Noever * Use only during init and shutdown. 10416603153SAndreas Noever */ 10516603153SAndreas Noever static void nhi_disable_interrupts(struct tb_nhi *nhi) 10616603153SAndreas Noever { 10716603153SAndreas Noever int i = 0; 10816603153SAndreas Noever /* disable interrupts */ 10916603153SAndreas Noever for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) 11016603153SAndreas Noever iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); 11116603153SAndreas Noever 11216603153SAndreas Noever /* clear interrupt status bits */ 11316603153SAndreas Noever for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) 11416603153SAndreas Noever ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); 11516603153SAndreas Noever } 11616603153SAndreas Noever 11716603153SAndreas Noever /* ring helper methods */ 11816603153SAndreas Noever 11916603153SAndreas Noever static void __iomem *ring_desc_base(struct tb_ring *ring) 12016603153SAndreas Noever { 12116603153SAndreas Noever void __iomem *io = ring->nhi->iobase; 12216603153SAndreas Noever io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; 12316603153SAndreas Noever io += ring->hop * 16; 12416603153SAndreas Noever return io; 12516603153SAndreas Noever } 12616603153SAndreas Noever 12716603153SAndreas Noever static void __iomem *ring_options_base(struct tb_ring *ring) 12816603153SAndreas Noever { 12916603153SAndreas Noever void __iomem *io = ring->nhi->iobase; 13016603153SAndreas Noever io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; 13116603153SAndreas Noever io += ring->hop * 32; 13216603153SAndreas Noever return io; 13316603153SAndreas Noever } 13416603153SAndreas Noever 13516603153SAndreas Noever static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) 13616603153SAndreas Noever { 13716603153SAndreas Noever iowrite16(value, ring_desc_base(ring) + offset); 13816603153SAndreas Noever } 13916603153SAndreas Noever 14016603153SAndreas Noever static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) 14116603153SAndreas Noever { 14216603153SAndreas Noever iowrite32(value, ring_desc_base(ring) + offset); 14316603153SAndreas Noever } 14416603153SAndreas Noever 14516603153SAndreas Noever static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) 14616603153SAndreas Noever { 14716603153SAndreas Noever iowrite32(value, ring_desc_base(ring) + offset); 14816603153SAndreas Noever iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); 14916603153SAndreas Noever } 15016603153SAndreas Noever 15116603153SAndreas Noever static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) 15216603153SAndreas Noever { 15316603153SAndreas Noever iowrite32(value, ring_options_base(ring) + offset); 15416603153SAndreas Noever } 15516603153SAndreas Noever 15616603153SAndreas Noever static bool ring_full(struct tb_ring *ring) 15716603153SAndreas Noever { 15816603153SAndreas Noever return ((ring->head + 1) % ring->size) == ring->tail; 15916603153SAndreas Noever } 16016603153SAndreas Noever 16116603153SAndreas Noever static bool ring_empty(struct tb_ring *ring) 16216603153SAndreas Noever { 16316603153SAndreas Noever return ring->head == ring->tail; 16416603153SAndreas Noever } 16516603153SAndreas Noever 16616603153SAndreas Noever /** 16716603153SAndreas Noever * ring_write_descriptors() - post frames from ring->queue to the controller 16816603153SAndreas Noever * 16916603153SAndreas Noever * ring->lock is held. 17016603153SAndreas Noever */ 17116603153SAndreas Noever static void ring_write_descriptors(struct tb_ring *ring) 17216603153SAndreas Noever { 17316603153SAndreas Noever struct ring_frame *frame, *n; 17416603153SAndreas Noever struct ring_desc *descriptor; 17516603153SAndreas Noever list_for_each_entry_safe(frame, n, &ring->queue, list) { 17616603153SAndreas Noever if (ring_full(ring)) 17716603153SAndreas Noever break; 17816603153SAndreas Noever list_move_tail(&frame->list, &ring->in_flight); 17916603153SAndreas Noever descriptor = &ring->descriptors[ring->head]; 18016603153SAndreas Noever descriptor->phys = frame->buffer_phy; 18116603153SAndreas Noever descriptor->time = 0; 18216603153SAndreas Noever descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT; 18316603153SAndreas Noever if (ring->is_tx) { 18416603153SAndreas Noever descriptor->length = frame->size; 18516603153SAndreas Noever descriptor->eof = frame->eof; 18616603153SAndreas Noever descriptor->sof = frame->sof; 18716603153SAndreas Noever } 18816603153SAndreas Noever ring->head = (ring->head + 1) % ring->size; 18916603153SAndreas Noever ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); 19016603153SAndreas Noever } 19116603153SAndreas Noever } 19216603153SAndreas Noever 19316603153SAndreas Noever /** 19416603153SAndreas Noever * ring_work() - progress completed frames 19516603153SAndreas Noever * 19616603153SAndreas Noever * If the ring is shutting down then all frames are marked as canceled and 19716603153SAndreas Noever * their callbacks are invoked. 19816603153SAndreas Noever * 19916603153SAndreas Noever * Otherwise we collect all completed frame from the ring buffer, write new 20016603153SAndreas Noever * frame to the ring buffer and invoke the callbacks for the completed frames. 20116603153SAndreas Noever */ 20216603153SAndreas Noever static void ring_work(struct work_struct *work) 20316603153SAndreas Noever { 20416603153SAndreas Noever struct tb_ring *ring = container_of(work, typeof(*ring), work); 20516603153SAndreas Noever struct ring_frame *frame; 20616603153SAndreas Noever bool canceled = false; 20716603153SAndreas Noever LIST_HEAD(done); 20816603153SAndreas Noever mutex_lock(&ring->lock); 20916603153SAndreas Noever 21016603153SAndreas Noever if (!ring->running) { 21116603153SAndreas Noever /* Move all frames to done and mark them as canceled. */ 21216603153SAndreas Noever list_splice_tail_init(&ring->in_flight, &done); 21316603153SAndreas Noever list_splice_tail_init(&ring->queue, &done); 21416603153SAndreas Noever canceled = true; 21516603153SAndreas Noever goto invoke_callback; 21616603153SAndreas Noever } 21716603153SAndreas Noever 21816603153SAndreas Noever while (!ring_empty(ring)) { 21916603153SAndreas Noever if (!(ring->descriptors[ring->tail].flags 22016603153SAndreas Noever & RING_DESC_COMPLETED)) 22116603153SAndreas Noever break; 22216603153SAndreas Noever frame = list_first_entry(&ring->in_flight, typeof(*frame), 22316603153SAndreas Noever list); 22416603153SAndreas Noever list_move_tail(&frame->list, &done); 22516603153SAndreas Noever if (!ring->is_tx) { 22616603153SAndreas Noever frame->size = ring->descriptors[ring->tail].length; 22716603153SAndreas Noever frame->eof = ring->descriptors[ring->tail].eof; 22816603153SAndreas Noever frame->sof = ring->descriptors[ring->tail].sof; 22916603153SAndreas Noever frame->flags = ring->descriptors[ring->tail].flags; 23016603153SAndreas Noever if (frame->sof != 0) 23116603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, 23216603153SAndreas Noever "%s %d got unexpected SOF: %#x\n", 23316603153SAndreas Noever RING_TYPE(ring), ring->hop, 23416603153SAndreas Noever frame->sof); 23516603153SAndreas Noever /* 23616603153SAndreas Noever * known flags: 23716603153SAndreas Noever * raw not enabled, interupt not set: 0x2=0010 23816603153SAndreas Noever * raw enabled: 0xa=1010 23916603153SAndreas Noever * raw not enabled: 0xb=1011 24016603153SAndreas Noever * partial frame (>MAX_FRAME_SIZE): 0xe=1110 24116603153SAndreas Noever */ 24216603153SAndreas Noever if (frame->flags != 0xa) 24316603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, 24416603153SAndreas Noever "%s %d got unexpected flags: %#x\n", 24516603153SAndreas Noever RING_TYPE(ring), ring->hop, 24616603153SAndreas Noever frame->flags); 24716603153SAndreas Noever } 24816603153SAndreas Noever ring->tail = (ring->tail + 1) % ring->size; 24916603153SAndreas Noever } 25016603153SAndreas Noever ring_write_descriptors(ring); 25116603153SAndreas Noever 25216603153SAndreas Noever invoke_callback: 25316603153SAndreas Noever mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */ 25416603153SAndreas Noever while (!list_empty(&done)) { 25516603153SAndreas Noever frame = list_first_entry(&done, typeof(*frame), list); 25616603153SAndreas Noever /* 25716603153SAndreas Noever * The callback may reenqueue or delete frame. 25816603153SAndreas Noever * Do not hold on to it. 25916603153SAndreas Noever */ 26016603153SAndreas Noever list_del_init(&frame->list); 26116603153SAndreas Noever frame->callback(ring, frame, canceled); 26216603153SAndreas Noever } 26316603153SAndreas Noever } 26416603153SAndreas Noever 26516603153SAndreas Noever int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) 26616603153SAndreas Noever { 26716603153SAndreas Noever int ret = 0; 26816603153SAndreas Noever mutex_lock(&ring->lock); 26916603153SAndreas Noever if (ring->running) { 27016603153SAndreas Noever list_add_tail(&frame->list, &ring->queue); 27116603153SAndreas Noever ring_write_descriptors(ring); 27216603153SAndreas Noever } else { 27316603153SAndreas Noever ret = -ESHUTDOWN; 27416603153SAndreas Noever } 27516603153SAndreas Noever mutex_unlock(&ring->lock); 27616603153SAndreas Noever return ret; 27716603153SAndreas Noever } 27816603153SAndreas Noever 279046bee1fSMika Westerberg static irqreturn_t ring_msix(int irq, void *data) 280046bee1fSMika Westerberg { 281046bee1fSMika Westerberg struct tb_ring *ring = data; 282046bee1fSMika Westerberg 283046bee1fSMika Westerberg schedule_work(&ring->work); 284046bee1fSMika Westerberg return IRQ_HANDLED; 285046bee1fSMika Westerberg } 286046bee1fSMika Westerberg 287046bee1fSMika Westerberg static int ring_request_msix(struct tb_ring *ring, bool no_suspend) 288046bee1fSMika Westerberg { 289046bee1fSMika Westerberg struct tb_nhi *nhi = ring->nhi; 290046bee1fSMika Westerberg unsigned long irqflags; 291046bee1fSMika Westerberg int ret; 292046bee1fSMika Westerberg 293046bee1fSMika Westerberg if (!nhi->pdev->msix_enabled) 294046bee1fSMika Westerberg return 0; 295046bee1fSMika Westerberg 296046bee1fSMika Westerberg ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL); 297046bee1fSMika Westerberg if (ret < 0) 298046bee1fSMika Westerberg return ret; 299046bee1fSMika Westerberg 300046bee1fSMika Westerberg ring->vector = ret; 301046bee1fSMika Westerberg 302046bee1fSMika Westerberg ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector); 303046bee1fSMika Westerberg if (ring->irq < 0) 304046bee1fSMika Westerberg return ring->irq; 305046bee1fSMika Westerberg 306046bee1fSMika Westerberg irqflags = no_suspend ? IRQF_NO_SUSPEND : 0; 307046bee1fSMika Westerberg return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); 308046bee1fSMika Westerberg } 309046bee1fSMika Westerberg 310046bee1fSMika Westerberg static void ring_release_msix(struct tb_ring *ring) 311046bee1fSMika Westerberg { 312046bee1fSMika Westerberg if (ring->irq <= 0) 313046bee1fSMika Westerberg return; 314046bee1fSMika Westerberg 315046bee1fSMika Westerberg free_irq(ring->irq, ring); 316046bee1fSMika Westerberg ida_simple_remove(&ring->nhi->msix_ida, ring->vector); 317046bee1fSMika Westerberg ring->vector = 0; 318046bee1fSMika Westerberg ring->irq = 0; 319046bee1fSMika Westerberg } 320046bee1fSMika Westerberg 32116603153SAndreas Noever static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size, 322046bee1fSMika Westerberg bool transmit, unsigned int flags) 32316603153SAndreas Noever { 32416603153SAndreas Noever struct tb_ring *ring = NULL; 32516603153SAndreas Noever dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", 32616603153SAndreas Noever transmit ? "TX" : "RX", hop, size); 32716603153SAndreas Noever 32816603153SAndreas Noever mutex_lock(&nhi->lock); 32916603153SAndreas Noever if (hop >= nhi->hop_count) { 33016603153SAndreas Noever dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop); 33116603153SAndreas Noever goto err; 33216603153SAndreas Noever } 33316603153SAndreas Noever if (transmit && nhi->tx_rings[hop]) { 33416603153SAndreas Noever dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop); 33516603153SAndreas Noever goto err; 33616603153SAndreas Noever } else if (!transmit && nhi->rx_rings[hop]) { 33716603153SAndreas Noever dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop); 33816603153SAndreas Noever goto err; 33916603153SAndreas Noever } 34016603153SAndreas Noever ring = kzalloc(sizeof(*ring), GFP_KERNEL); 34116603153SAndreas Noever if (!ring) 34216603153SAndreas Noever goto err; 34316603153SAndreas Noever 34416603153SAndreas Noever mutex_init(&ring->lock); 34516603153SAndreas Noever INIT_LIST_HEAD(&ring->queue); 34616603153SAndreas Noever INIT_LIST_HEAD(&ring->in_flight); 34716603153SAndreas Noever INIT_WORK(&ring->work, ring_work); 34816603153SAndreas Noever 34916603153SAndreas Noever ring->nhi = nhi; 35016603153SAndreas Noever ring->hop = hop; 35116603153SAndreas Noever ring->is_tx = transmit; 35216603153SAndreas Noever ring->size = size; 353046bee1fSMika Westerberg ring->flags = flags; 35416603153SAndreas Noever ring->head = 0; 35516603153SAndreas Noever ring->tail = 0; 35616603153SAndreas Noever ring->running = false; 357046bee1fSMika Westerberg 358046bee1fSMika Westerberg if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND)) 359046bee1fSMika Westerberg goto err; 360046bee1fSMika Westerberg 36116603153SAndreas Noever ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, 36216603153SAndreas Noever size * sizeof(*ring->descriptors), 36316603153SAndreas Noever &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); 36416603153SAndreas Noever if (!ring->descriptors) 36516603153SAndreas Noever goto err; 36616603153SAndreas Noever 36716603153SAndreas Noever if (transmit) 36816603153SAndreas Noever nhi->tx_rings[hop] = ring; 36916603153SAndreas Noever else 37016603153SAndreas Noever nhi->rx_rings[hop] = ring; 37116603153SAndreas Noever mutex_unlock(&nhi->lock); 37216603153SAndreas Noever return ring; 37316603153SAndreas Noever 37416603153SAndreas Noever err: 37516603153SAndreas Noever if (ring) 37616603153SAndreas Noever mutex_destroy(&ring->lock); 37716603153SAndreas Noever kfree(ring); 37816603153SAndreas Noever mutex_unlock(&nhi->lock); 37916603153SAndreas Noever return NULL; 38016603153SAndreas Noever } 38116603153SAndreas Noever 382046bee1fSMika Westerberg struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, 383046bee1fSMika Westerberg unsigned int flags) 38416603153SAndreas Noever { 385046bee1fSMika Westerberg return ring_alloc(nhi, hop, size, true, flags); 38616603153SAndreas Noever } 38716603153SAndreas Noever 388046bee1fSMika Westerberg struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, 389046bee1fSMika Westerberg unsigned int flags) 39016603153SAndreas Noever { 391046bee1fSMika Westerberg return ring_alloc(nhi, hop, size, false, flags); 39216603153SAndreas Noever } 39316603153SAndreas Noever 39416603153SAndreas Noever /** 39516603153SAndreas Noever * ring_start() - enable a ring 39616603153SAndreas Noever * 39716603153SAndreas Noever * Must not be invoked in parallel with ring_stop(). 39816603153SAndreas Noever */ 39916603153SAndreas Noever void ring_start(struct tb_ring *ring) 40016603153SAndreas Noever { 40116603153SAndreas Noever mutex_lock(&ring->nhi->lock); 40216603153SAndreas Noever mutex_lock(&ring->lock); 40316603153SAndreas Noever if (ring->running) { 40416603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); 40516603153SAndreas Noever goto err; 40616603153SAndreas Noever } 40716603153SAndreas Noever dev_info(&ring->nhi->pdev->dev, "starting %s %d\n", 40816603153SAndreas Noever RING_TYPE(ring), ring->hop); 40916603153SAndreas Noever 41016603153SAndreas Noever ring_iowrite64desc(ring, ring->descriptors_dma, 0); 41116603153SAndreas Noever if (ring->is_tx) { 41216603153SAndreas Noever ring_iowrite32desc(ring, ring->size, 12); 41316603153SAndreas Noever ring_iowrite32options(ring, 0, 4); /* time releated ? */ 41416603153SAndreas Noever ring_iowrite32options(ring, 41516603153SAndreas Noever RING_FLAG_ENABLE | RING_FLAG_RAW, 0); 41616603153SAndreas Noever } else { 41716603153SAndreas Noever ring_iowrite32desc(ring, 41816603153SAndreas Noever (TB_FRAME_SIZE << 16) | ring->size, 12); 41916603153SAndreas Noever ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */ 42016603153SAndreas Noever ring_iowrite32options(ring, 42116603153SAndreas Noever RING_FLAG_ENABLE | RING_FLAG_RAW, 0); 42216603153SAndreas Noever } 42316603153SAndreas Noever ring_interrupt_active(ring, true); 42416603153SAndreas Noever ring->running = true; 42516603153SAndreas Noever err: 42616603153SAndreas Noever mutex_unlock(&ring->lock); 42716603153SAndreas Noever mutex_unlock(&ring->nhi->lock); 42816603153SAndreas Noever } 42916603153SAndreas Noever 43016603153SAndreas Noever 43116603153SAndreas Noever /** 43216603153SAndreas Noever * ring_stop() - shutdown a ring 43316603153SAndreas Noever * 43416603153SAndreas Noever * Must not be invoked from a callback. 43516603153SAndreas Noever * 43616603153SAndreas Noever * This method will disable the ring. Further calls to ring_tx/ring_rx will 43716603153SAndreas Noever * return -ESHUTDOWN until ring_stop has been called. 43816603153SAndreas Noever * 43916603153SAndreas Noever * All enqueued frames will be canceled and their callbacks will be executed 44016603153SAndreas Noever * with frame->canceled set to true (on the callback thread). This method 44116603153SAndreas Noever * returns only after all callback invocations have finished. 44216603153SAndreas Noever */ 44316603153SAndreas Noever void ring_stop(struct tb_ring *ring) 44416603153SAndreas Noever { 44516603153SAndreas Noever mutex_lock(&ring->nhi->lock); 44616603153SAndreas Noever mutex_lock(&ring->lock); 44716603153SAndreas Noever dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n", 44816603153SAndreas Noever RING_TYPE(ring), ring->hop); 44916603153SAndreas Noever if (!ring->running) { 45016603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", 45116603153SAndreas Noever RING_TYPE(ring), ring->hop); 45216603153SAndreas Noever goto err; 45316603153SAndreas Noever } 45416603153SAndreas Noever ring_interrupt_active(ring, false); 45516603153SAndreas Noever 45616603153SAndreas Noever ring_iowrite32options(ring, 0, 0); 45716603153SAndreas Noever ring_iowrite64desc(ring, 0, 0); 45816603153SAndreas Noever ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); 45916603153SAndreas Noever ring_iowrite32desc(ring, 0, 12); 46016603153SAndreas Noever ring->head = 0; 46116603153SAndreas Noever ring->tail = 0; 46216603153SAndreas Noever ring->running = false; 46316603153SAndreas Noever 46416603153SAndreas Noever err: 46516603153SAndreas Noever mutex_unlock(&ring->lock); 46616603153SAndreas Noever mutex_unlock(&ring->nhi->lock); 46716603153SAndreas Noever 46816603153SAndreas Noever /* 46916603153SAndreas Noever * schedule ring->work to invoke callbacks on all remaining frames. 47016603153SAndreas Noever */ 47116603153SAndreas Noever schedule_work(&ring->work); 47216603153SAndreas Noever flush_work(&ring->work); 47316603153SAndreas Noever } 47416603153SAndreas Noever 47516603153SAndreas Noever /* 47616603153SAndreas Noever * ring_free() - free ring 47716603153SAndreas Noever * 47816603153SAndreas Noever * When this method returns all invocations of ring->callback will have 47916603153SAndreas Noever * finished. 48016603153SAndreas Noever * 48116603153SAndreas Noever * Ring must be stopped. 48216603153SAndreas Noever * 48316603153SAndreas Noever * Must NOT be called from ring_frame->callback! 48416603153SAndreas Noever */ 48516603153SAndreas Noever void ring_free(struct tb_ring *ring) 48616603153SAndreas Noever { 48716603153SAndreas Noever mutex_lock(&ring->nhi->lock); 48816603153SAndreas Noever /* 48916603153SAndreas Noever * Dissociate the ring from the NHI. This also ensures that 49016603153SAndreas Noever * nhi_interrupt_work cannot reschedule ring->work. 49116603153SAndreas Noever */ 49216603153SAndreas Noever if (ring->is_tx) 49316603153SAndreas Noever ring->nhi->tx_rings[ring->hop] = NULL; 49416603153SAndreas Noever else 49516603153SAndreas Noever ring->nhi->rx_rings[ring->hop] = NULL; 49616603153SAndreas Noever 49716603153SAndreas Noever if (ring->running) { 49816603153SAndreas Noever dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", 49916603153SAndreas Noever RING_TYPE(ring), ring->hop); 50016603153SAndreas Noever } 50116603153SAndreas Noever 502046bee1fSMika Westerberg ring_release_msix(ring); 503046bee1fSMika Westerberg 50416603153SAndreas Noever dma_free_coherent(&ring->nhi->pdev->dev, 50516603153SAndreas Noever ring->size * sizeof(*ring->descriptors), 50616603153SAndreas Noever ring->descriptors, ring->descriptors_dma); 50716603153SAndreas Noever 508f19b72c6SSachin Kamat ring->descriptors = NULL; 50916603153SAndreas Noever ring->descriptors_dma = 0; 51016603153SAndreas Noever 51116603153SAndreas Noever 51216603153SAndreas Noever dev_info(&ring->nhi->pdev->dev, 51316603153SAndreas Noever "freeing %s %d\n", 51416603153SAndreas Noever RING_TYPE(ring), 51516603153SAndreas Noever ring->hop); 51616603153SAndreas Noever 51716603153SAndreas Noever mutex_unlock(&ring->nhi->lock); 51816603153SAndreas Noever /** 519046bee1fSMika Westerberg * ring->work can no longer be scheduled (it is scheduled only 520046bee1fSMika Westerberg * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it 521046bee1fSMika Westerberg * to finish before freeing the ring. 52216603153SAndreas Noever */ 52316603153SAndreas Noever flush_work(&ring->work); 52416603153SAndreas Noever mutex_destroy(&ring->lock); 52516603153SAndreas Noever kfree(ring); 52616603153SAndreas Noever } 52716603153SAndreas Noever 52816603153SAndreas Noever static void nhi_interrupt_work(struct work_struct *work) 52916603153SAndreas Noever { 53016603153SAndreas Noever struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); 53116603153SAndreas Noever int value = 0; /* Suppress uninitialized usage warning. */ 53216603153SAndreas Noever int bit; 53316603153SAndreas Noever int hop = -1; 53416603153SAndreas Noever int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */ 53516603153SAndreas Noever struct tb_ring *ring; 53616603153SAndreas Noever 53716603153SAndreas Noever mutex_lock(&nhi->lock); 53816603153SAndreas Noever 53916603153SAndreas Noever /* 54016603153SAndreas Noever * Starting at REG_RING_NOTIFY_BASE there are three status bitfields 54116603153SAndreas Noever * (TX, RX, RX overflow). We iterate over the bits and read a new 54216603153SAndreas Noever * dwords as required. The registers are cleared on read. 54316603153SAndreas Noever */ 54416603153SAndreas Noever for (bit = 0; bit < 3 * nhi->hop_count; bit++) { 54516603153SAndreas Noever if (bit % 32 == 0) 54616603153SAndreas Noever value = ioread32(nhi->iobase 54716603153SAndreas Noever + REG_RING_NOTIFY_BASE 54816603153SAndreas Noever + 4 * (bit / 32)); 54916603153SAndreas Noever if (++hop == nhi->hop_count) { 55016603153SAndreas Noever hop = 0; 55116603153SAndreas Noever type++; 55216603153SAndreas Noever } 55316603153SAndreas Noever if ((value & (1 << (bit % 32))) == 0) 55416603153SAndreas Noever continue; 55516603153SAndreas Noever if (type == 2) { 55616603153SAndreas Noever dev_warn(&nhi->pdev->dev, 55716603153SAndreas Noever "RX overflow for ring %d\n", 55816603153SAndreas Noever hop); 55916603153SAndreas Noever continue; 56016603153SAndreas Noever } 56116603153SAndreas Noever if (type == 0) 56216603153SAndreas Noever ring = nhi->tx_rings[hop]; 56316603153SAndreas Noever else 56416603153SAndreas Noever ring = nhi->rx_rings[hop]; 56516603153SAndreas Noever if (ring == NULL) { 56616603153SAndreas Noever dev_warn(&nhi->pdev->dev, 56716603153SAndreas Noever "got interrupt for inactive %s ring %d\n", 56816603153SAndreas Noever type ? "RX" : "TX", 56916603153SAndreas Noever hop); 57016603153SAndreas Noever continue; 57116603153SAndreas Noever } 57216603153SAndreas Noever /* we do not check ring->running, this is done in ring->work */ 57316603153SAndreas Noever schedule_work(&ring->work); 57416603153SAndreas Noever } 57516603153SAndreas Noever mutex_unlock(&nhi->lock); 57616603153SAndreas Noever } 57716603153SAndreas Noever 57816603153SAndreas Noever static irqreturn_t nhi_msi(int irq, void *data) 57916603153SAndreas Noever { 58016603153SAndreas Noever struct tb_nhi *nhi = data; 58116603153SAndreas Noever schedule_work(&nhi->interrupt_work); 58216603153SAndreas Noever return IRQ_HANDLED; 58316603153SAndreas Noever } 58416603153SAndreas Noever 58523dd5bb4SAndreas Noever static int nhi_suspend_noirq(struct device *dev) 58623dd5bb4SAndreas Noever { 58723dd5bb4SAndreas Noever struct pci_dev *pdev = to_pci_dev(dev); 58823dd5bb4SAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 5899d3cce0bSMika Westerberg 5909d3cce0bSMika Westerberg return tb_domain_suspend_noirq(tb); 59123dd5bb4SAndreas Noever } 59223dd5bb4SAndreas Noever 59323dd5bb4SAndreas Noever static int nhi_resume_noirq(struct device *dev) 59423dd5bb4SAndreas Noever { 59523dd5bb4SAndreas Noever struct pci_dev *pdev = to_pci_dev(dev); 59623dd5bb4SAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 5979d3cce0bSMika Westerberg 5989d3cce0bSMika Westerberg return tb_domain_resume_noirq(tb); 59923dd5bb4SAndreas Noever } 60023dd5bb4SAndreas Noever 60116603153SAndreas Noever static void nhi_shutdown(struct tb_nhi *nhi) 60216603153SAndreas Noever { 60316603153SAndreas Noever int i; 60416603153SAndreas Noever dev_info(&nhi->pdev->dev, "shutdown\n"); 60516603153SAndreas Noever 60616603153SAndreas Noever for (i = 0; i < nhi->hop_count; i++) { 60716603153SAndreas Noever if (nhi->tx_rings[i]) 60816603153SAndreas Noever dev_WARN(&nhi->pdev->dev, 60916603153SAndreas Noever "TX ring %d is still active\n", i); 61016603153SAndreas Noever if (nhi->rx_rings[i]) 61116603153SAndreas Noever dev_WARN(&nhi->pdev->dev, 61216603153SAndreas Noever "RX ring %d is still active\n", i); 61316603153SAndreas Noever } 61416603153SAndreas Noever nhi_disable_interrupts(nhi); 61516603153SAndreas Noever /* 61616603153SAndreas Noever * We have to release the irq before calling flush_work. Otherwise an 61716603153SAndreas Noever * already executing IRQ handler could call schedule_work again. 61816603153SAndreas Noever */ 619046bee1fSMika Westerberg if (!nhi->pdev->msix_enabled) { 62016603153SAndreas Noever devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); 62116603153SAndreas Noever flush_work(&nhi->interrupt_work); 622046bee1fSMika Westerberg } 62316603153SAndreas Noever mutex_destroy(&nhi->lock); 624046bee1fSMika Westerberg ida_destroy(&nhi->msix_ida); 625046bee1fSMika Westerberg } 626046bee1fSMika Westerberg 627046bee1fSMika Westerberg static int nhi_init_msi(struct tb_nhi *nhi) 628046bee1fSMika Westerberg { 629046bee1fSMika Westerberg struct pci_dev *pdev = nhi->pdev; 630046bee1fSMika Westerberg int res, irq, nvec; 631046bee1fSMika Westerberg 632046bee1fSMika Westerberg /* In case someone left them on. */ 633046bee1fSMika Westerberg nhi_disable_interrupts(nhi); 634046bee1fSMika Westerberg 635046bee1fSMika Westerberg ida_init(&nhi->msix_ida); 636046bee1fSMika Westerberg 637046bee1fSMika Westerberg /* 638046bee1fSMika Westerberg * The NHI has 16 MSI-X vectors or a single MSI. We first try to 639046bee1fSMika Westerberg * get all MSI-X vectors and if we succeed, each ring will have 640046bee1fSMika Westerberg * one MSI-X. If for some reason that does not work out, we 641046bee1fSMika Westerberg * fallback to a single MSI. 642046bee1fSMika Westerberg */ 643046bee1fSMika Westerberg nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS, 644046bee1fSMika Westerberg PCI_IRQ_MSIX); 645046bee1fSMika Westerberg if (nvec < 0) { 646046bee1fSMika Westerberg nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 647046bee1fSMika Westerberg if (nvec < 0) 648046bee1fSMika Westerberg return nvec; 649046bee1fSMika Westerberg 650046bee1fSMika Westerberg INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); 651046bee1fSMika Westerberg 652046bee1fSMika Westerberg irq = pci_irq_vector(nhi->pdev, 0); 653046bee1fSMika Westerberg if (irq < 0) 654046bee1fSMika Westerberg return irq; 655046bee1fSMika Westerberg 656046bee1fSMika Westerberg res = devm_request_irq(&pdev->dev, irq, nhi_msi, 657046bee1fSMika Westerberg IRQF_NO_SUSPEND, "thunderbolt", nhi); 658046bee1fSMika Westerberg if (res) { 659046bee1fSMika Westerberg dev_err(&pdev->dev, "request_irq failed, aborting\n"); 660046bee1fSMika Westerberg return res; 661046bee1fSMika Westerberg } 662046bee1fSMika Westerberg } 663046bee1fSMika Westerberg 664046bee1fSMika Westerberg return 0; 66516603153SAndreas Noever } 66616603153SAndreas Noever 66716603153SAndreas Noever static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) 66816603153SAndreas Noever { 66916603153SAndreas Noever struct tb_nhi *nhi; 670d6cc51cdSAndreas Noever struct tb *tb; 67116603153SAndreas Noever int res; 67216603153SAndreas Noever 67316603153SAndreas Noever res = pcim_enable_device(pdev); 67416603153SAndreas Noever if (res) { 67516603153SAndreas Noever dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); 67616603153SAndreas Noever return res; 67716603153SAndreas Noever } 67816603153SAndreas Noever 67916603153SAndreas Noever res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt"); 68016603153SAndreas Noever if (res) { 68116603153SAndreas Noever dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n"); 68216603153SAndreas Noever return res; 68316603153SAndreas Noever } 68416603153SAndreas Noever 68516603153SAndreas Noever nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); 68616603153SAndreas Noever if (!nhi) 68716603153SAndreas Noever return -ENOMEM; 68816603153SAndreas Noever 68916603153SAndreas Noever nhi->pdev = pdev; 69016603153SAndreas Noever /* cannot fail - table is allocated bin pcim_iomap_regions */ 69116603153SAndreas Noever nhi->iobase = pcim_iomap_table(pdev)[0]; 69216603153SAndreas Noever nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; 69319bf4d4fSLukas Wunner if (nhi->hop_count != 12 && nhi->hop_count != 32) 69416603153SAndreas Noever dev_warn(&pdev->dev, "unexpected hop count: %d\n", 69516603153SAndreas Noever nhi->hop_count); 69616603153SAndreas Noever 6972a211f32SHimangi Saraogi nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, 6982a211f32SHimangi Saraogi sizeof(*nhi->tx_rings), GFP_KERNEL); 6992a211f32SHimangi Saraogi nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, 7002a211f32SHimangi Saraogi sizeof(*nhi->rx_rings), GFP_KERNEL); 70116603153SAndreas Noever if (!nhi->tx_rings || !nhi->rx_rings) 70216603153SAndreas Noever return -ENOMEM; 70316603153SAndreas Noever 704046bee1fSMika Westerberg res = nhi_init_msi(nhi); 70516603153SAndreas Noever if (res) { 706046bee1fSMika Westerberg dev_err(&pdev->dev, "cannot enable MSI, aborting\n"); 70716603153SAndreas Noever return res; 70816603153SAndreas Noever } 70916603153SAndreas Noever 71016603153SAndreas Noever mutex_init(&nhi->lock); 71116603153SAndreas Noever 71216603153SAndreas Noever pci_set_master(pdev); 71316603153SAndreas Noever 71416603153SAndreas Noever /* magic value - clock related? */ 71516603153SAndreas Noever iowrite32(3906250 / 10000, nhi->iobase + 0x38c00); 71616603153SAndreas Noever 717d6cc51cdSAndreas Noever dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); 7189d3cce0bSMika Westerberg tb = tb_probe(nhi); 7199d3cce0bSMika Westerberg if (!tb) 7209d3cce0bSMika Westerberg return -ENODEV; 7219d3cce0bSMika Westerberg 7229d3cce0bSMika Westerberg res = tb_domain_add(tb); 7239d3cce0bSMika Westerberg if (res) { 724d6cc51cdSAndreas Noever /* 725d6cc51cdSAndreas Noever * At this point the RX/TX rings might already have been 726d6cc51cdSAndreas Noever * activated. Do a proper shutdown. 727d6cc51cdSAndreas Noever */ 7289d3cce0bSMika Westerberg tb_domain_put(tb); 729d6cc51cdSAndreas Noever nhi_shutdown(nhi); 730d6cc51cdSAndreas Noever return -EIO; 731d6cc51cdSAndreas Noever } 732d6cc51cdSAndreas Noever pci_set_drvdata(pdev, tb); 73316603153SAndreas Noever 73416603153SAndreas Noever return 0; 73516603153SAndreas Noever } 73616603153SAndreas Noever 73716603153SAndreas Noever static void nhi_remove(struct pci_dev *pdev) 73816603153SAndreas Noever { 739d6cc51cdSAndreas Noever struct tb *tb = pci_get_drvdata(pdev); 740d6cc51cdSAndreas Noever struct tb_nhi *nhi = tb->nhi; 7419d3cce0bSMika Westerberg 7429d3cce0bSMika Westerberg tb_domain_remove(tb); 74316603153SAndreas Noever nhi_shutdown(nhi); 74416603153SAndreas Noever } 74516603153SAndreas Noever 74623dd5bb4SAndreas Noever /* 74723dd5bb4SAndreas Noever * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable 74823dd5bb4SAndreas Noever * the tunnels asap. A corresponding pci quirk blocks the downstream bridges 74923dd5bb4SAndreas Noever * resume_noirq until we are done. 75023dd5bb4SAndreas Noever */ 75123dd5bb4SAndreas Noever static const struct dev_pm_ops nhi_pm_ops = { 75223dd5bb4SAndreas Noever .suspend_noirq = nhi_suspend_noirq, 75323dd5bb4SAndreas Noever .resume_noirq = nhi_resume_noirq, 75423dd5bb4SAndreas Noever .freeze_noirq = nhi_suspend_noirq, /* 75523dd5bb4SAndreas Noever * we just disable hotplug, the 75623dd5bb4SAndreas Noever * pci-tunnels stay alive. 75723dd5bb4SAndreas Noever */ 75823dd5bb4SAndreas Noever .restore_noirq = nhi_resume_noirq, 75923dd5bb4SAndreas Noever }; 76023dd5bb4SAndreas Noever 761620863f7SSachin Kamat static struct pci_device_id nhi_ids[] = { 76216603153SAndreas Noever /* 76316603153SAndreas Noever * We have to specify class, the TB bridges use the same device and 7641d111406SLukas Wunner * vendor (sub)id on gen 1 and gen 2 controllers. 76516603153SAndreas Noever */ 76616603153SAndreas Noever { 76716603153SAndreas Noever .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 7681d111406SLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 76919bf4d4fSLukas Wunner .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, 77019bf4d4fSLukas Wunner .subvendor = 0x2222, .subdevice = 0x1111, 77119bf4d4fSLukas Wunner }, 77219bf4d4fSLukas Wunner { 77319bf4d4fSLukas Wunner .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 77419bf4d4fSLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 7751d111406SLukas Wunner .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, 77616603153SAndreas Noever .subvendor = 0x2222, .subdevice = 0x1111, 77716603153SAndreas Noever }, 77816603153SAndreas Noever { 77916603153SAndreas Noever .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 7801d111406SLukas Wunner .vendor = PCI_VENDOR_ID_INTEL, 78182a6a81cSXavier Gnata .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI, 78282a6a81cSXavier Gnata .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 78382a6a81cSXavier Gnata }, 78482a6a81cSXavier Gnata { 78582a6a81cSXavier Gnata .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 78682a6a81cSXavier Gnata .vendor = PCI_VENDOR_ID_INTEL, 7871d111406SLukas Wunner .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, 788a42fb351SKnuth Posern .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 78916603153SAndreas Noever }, 790*5e2781bcSMika Westerberg 791*5e2781bcSMika Westerberg /* Thunderbolt 3 */ 792*5e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) }, 793*5e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) }, 794*5e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) }, 795*5e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) }, 796*5e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) }, 797*5e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) }, 798*5e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) }, 799*5e2781bcSMika Westerberg { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, 800*5e2781bcSMika Westerberg 80116603153SAndreas Noever { 0,} 80216603153SAndreas Noever }; 80316603153SAndreas Noever 80416603153SAndreas Noever MODULE_DEVICE_TABLE(pci, nhi_ids); 80516603153SAndreas Noever MODULE_LICENSE("GPL"); 80616603153SAndreas Noever 80716603153SAndreas Noever static struct pci_driver nhi_driver = { 80816603153SAndreas Noever .name = "thunderbolt", 80916603153SAndreas Noever .id_table = nhi_ids, 81016603153SAndreas Noever .probe = nhi_probe, 81116603153SAndreas Noever .remove = nhi_remove, 81223dd5bb4SAndreas Noever .driver.pm = &nhi_pm_ops, 81316603153SAndreas Noever }; 81416603153SAndreas Noever 81516603153SAndreas Noever static int __init nhi_init(void) 81616603153SAndreas Noever { 8179d3cce0bSMika Westerberg int ret; 8189d3cce0bSMika Westerberg 81916603153SAndreas Noever if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc.")) 82016603153SAndreas Noever return -ENOSYS; 8219d3cce0bSMika Westerberg ret = tb_domain_init(); 8229d3cce0bSMika Westerberg if (ret) 8239d3cce0bSMika Westerberg return ret; 8249d3cce0bSMika Westerberg ret = pci_register_driver(&nhi_driver); 8259d3cce0bSMika Westerberg if (ret) 8269d3cce0bSMika Westerberg tb_domain_exit(); 8279d3cce0bSMika Westerberg return ret; 82816603153SAndreas Noever } 82916603153SAndreas Noever 83016603153SAndreas Noever static void __exit nhi_unload(void) 83116603153SAndreas Noever { 83216603153SAndreas Noever pci_unregister_driver(&nhi_driver); 8339d3cce0bSMika Westerberg tb_domain_exit(); 83416603153SAndreas Noever } 83516603153SAndreas Noever 83616603153SAndreas Noever module_init(nhi_init); 83716603153SAndreas Noever module_exit(nhi_unload); 838