xref: /openbmc/linux/drivers/thunderbolt/nhi.c (revision 4ffe722eefcb07c76701f03e0d759fbaecedf79f)
116603153SAndreas Noever /*
216603153SAndreas Noever  * Thunderbolt Cactus Ridge driver - NHI driver
316603153SAndreas Noever  *
416603153SAndreas Noever  * The NHI (native host interface) is the pci device that allows us to send and
516603153SAndreas Noever  * receive frames from the thunderbolt bus.
616603153SAndreas Noever  *
716603153SAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
816603153SAndreas Noever  */
916603153SAndreas Noever 
1023dd5bb4SAndreas Noever #include <linux/pm_runtime.h>
1116603153SAndreas Noever #include <linux/slab.h>
1216603153SAndreas Noever #include <linux/errno.h>
1316603153SAndreas Noever #include <linux/pci.h>
1416603153SAndreas Noever #include <linux/interrupt.h>
1516603153SAndreas Noever #include <linux/module.h>
16cd446ee2SMika Westerberg #include <linux/delay.h>
1716603153SAndreas Noever 
1816603153SAndreas Noever #include "nhi.h"
1916603153SAndreas Noever #include "nhi_regs.h"
20d6cc51cdSAndreas Noever #include "tb.h"
2116603153SAndreas Noever 
2216603153SAndreas Noever #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
2316603153SAndreas Noever 
24046bee1fSMika Westerberg /*
259fb1e654SMika Westerberg  * Used to enable end-to-end workaround for missing RX packets. Do not
269fb1e654SMika Westerberg  * use this ring for anything else.
279fb1e654SMika Westerberg  */
289fb1e654SMika Westerberg #define RING_E2E_UNUSED_HOPID	2
299fb1e654SMika Westerberg 
309fb1e654SMika Westerberg /*
31046bee1fSMika Westerberg  * Minimal number of vectors when we use MSI-X. Two for control channel
32046bee1fSMika Westerberg  * Rx/Tx and the rest four are for cross domain DMA paths.
33046bee1fSMika Westerberg  */
34046bee1fSMika Westerberg #define MSIX_MIN_VECS		6
35046bee1fSMika Westerberg #define MSIX_MAX_VECS		16
3616603153SAndreas Noever 
37cd446ee2SMika Westerberg #define NHI_MAILBOX_TIMEOUT	500 /* ms */
38cd446ee2SMika Westerberg 
3916603153SAndreas Noever static int ring_interrupt_index(struct tb_ring *ring)
4016603153SAndreas Noever {
4116603153SAndreas Noever 	int bit = ring->hop;
4216603153SAndreas Noever 	if (!ring->is_tx)
4316603153SAndreas Noever 		bit += ring->nhi->hop_count;
4416603153SAndreas Noever 	return bit;
4516603153SAndreas Noever }
4616603153SAndreas Noever 
4716603153SAndreas Noever /**
4816603153SAndreas Noever  * ring_interrupt_active() - activate/deactivate interrupts for a single ring
4916603153SAndreas Noever  *
5016603153SAndreas Noever  * ring->nhi->lock must be held.
5116603153SAndreas Noever  */
5216603153SAndreas Noever static void ring_interrupt_active(struct tb_ring *ring, bool active)
5316603153SAndreas Noever {
5419bf4d4fSLukas Wunner 	int reg = REG_RING_INTERRUPT_BASE +
5519bf4d4fSLukas Wunner 		  ring_interrupt_index(ring) / 32 * 4;
5616603153SAndreas Noever 	int bit = ring_interrupt_index(ring) & 31;
5716603153SAndreas Noever 	int mask = 1 << bit;
5816603153SAndreas Noever 	u32 old, new;
59046bee1fSMika Westerberg 
60046bee1fSMika Westerberg 	if (ring->irq > 0) {
61046bee1fSMika Westerberg 		u32 step, shift, ivr, misc;
62046bee1fSMika Westerberg 		void __iomem *ivr_base;
63046bee1fSMika Westerberg 		int index;
64046bee1fSMika Westerberg 
65046bee1fSMika Westerberg 		if (ring->is_tx)
66046bee1fSMika Westerberg 			index = ring->hop;
67046bee1fSMika Westerberg 		else
68046bee1fSMika Westerberg 			index = ring->hop + ring->nhi->hop_count;
69046bee1fSMika Westerberg 
70046bee1fSMika Westerberg 		/*
71046bee1fSMika Westerberg 		 * Ask the hardware to clear interrupt status bits automatically
72046bee1fSMika Westerberg 		 * since we already know which interrupt was triggered.
73046bee1fSMika Westerberg 		 */
74046bee1fSMika Westerberg 		misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
75046bee1fSMika Westerberg 		if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
76046bee1fSMika Westerberg 			misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
77046bee1fSMika Westerberg 			iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
78046bee1fSMika Westerberg 		}
79046bee1fSMika Westerberg 
80046bee1fSMika Westerberg 		ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
81046bee1fSMika Westerberg 		step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
82046bee1fSMika Westerberg 		shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
83046bee1fSMika Westerberg 		ivr = ioread32(ivr_base + step);
84046bee1fSMika Westerberg 		ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
85046bee1fSMika Westerberg 		if (active)
86046bee1fSMika Westerberg 			ivr |= ring->vector << shift;
87046bee1fSMika Westerberg 		iowrite32(ivr, ivr_base + step);
88046bee1fSMika Westerberg 	}
89046bee1fSMika Westerberg 
9016603153SAndreas Noever 	old = ioread32(ring->nhi->iobase + reg);
9116603153SAndreas Noever 	if (active)
9216603153SAndreas Noever 		new = old | mask;
9316603153SAndreas Noever 	else
9416603153SAndreas Noever 		new = old & ~mask;
9516603153SAndreas Noever 
9616603153SAndreas Noever 	dev_info(&ring->nhi->pdev->dev,
9716603153SAndreas Noever 		 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
9816603153SAndreas Noever 		 active ? "enabling" : "disabling", reg, bit, old, new);
9916603153SAndreas Noever 
10016603153SAndreas Noever 	if (new == old)
10116603153SAndreas Noever 		dev_WARN(&ring->nhi->pdev->dev,
10216603153SAndreas Noever 					 "interrupt for %s %d is already %s\n",
10316603153SAndreas Noever 					 RING_TYPE(ring), ring->hop,
10416603153SAndreas Noever 					 active ? "enabled" : "disabled");
10516603153SAndreas Noever 	iowrite32(new, ring->nhi->iobase + reg);
10616603153SAndreas Noever }
10716603153SAndreas Noever 
10816603153SAndreas Noever /**
10916603153SAndreas Noever  * nhi_disable_interrupts() - disable interrupts for all rings
11016603153SAndreas Noever  *
11116603153SAndreas Noever  * Use only during init and shutdown.
11216603153SAndreas Noever  */
11316603153SAndreas Noever static void nhi_disable_interrupts(struct tb_nhi *nhi)
11416603153SAndreas Noever {
11516603153SAndreas Noever 	int i = 0;
11616603153SAndreas Noever 	/* disable interrupts */
11716603153SAndreas Noever 	for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
11816603153SAndreas Noever 		iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
11916603153SAndreas Noever 
12016603153SAndreas Noever 	/* clear interrupt status bits */
12116603153SAndreas Noever 	for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
12216603153SAndreas Noever 		ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
12316603153SAndreas Noever }
12416603153SAndreas Noever 
12516603153SAndreas Noever /* ring helper methods */
12616603153SAndreas Noever 
12716603153SAndreas Noever static void __iomem *ring_desc_base(struct tb_ring *ring)
12816603153SAndreas Noever {
12916603153SAndreas Noever 	void __iomem *io = ring->nhi->iobase;
13016603153SAndreas Noever 	io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
13116603153SAndreas Noever 	io += ring->hop * 16;
13216603153SAndreas Noever 	return io;
13316603153SAndreas Noever }
13416603153SAndreas Noever 
13516603153SAndreas Noever static void __iomem *ring_options_base(struct tb_ring *ring)
13616603153SAndreas Noever {
13716603153SAndreas Noever 	void __iomem *io = ring->nhi->iobase;
13816603153SAndreas Noever 	io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
13916603153SAndreas Noever 	io += ring->hop * 32;
14016603153SAndreas Noever 	return io;
14116603153SAndreas Noever }
14216603153SAndreas Noever 
14316603153SAndreas Noever static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
14416603153SAndreas Noever {
14516603153SAndreas Noever 	iowrite16(value, ring_desc_base(ring) + offset);
14616603153SAndreas Noever }
14716603153SAndreas Noever 
14816603153SAndreas Noever static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
14916603153SAndreas Noever {
15016603153SAndreas Noever 	iowrite32(value, ring_desc_base(ring) + offset);
15116603153SAndreas Noever }
15216603153SAndreas Noever 
15316603153SAndreas Noever static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
15416603153SAndreas Noever {
15516603153SAndreas Noever 	iowrite32(value, ring_desc_base(ring) + offset);
15616603153SAndreas Noever 	iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
15716603153SAndreas Noever }
15816603153SAndreas Noever 
15916603153SAndreas Noever static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
16016603153SAndreas Noever {
16116603153SAndreas Noever 	iowrite32(value, ring_options_base(ring) + offset);
16216603153SAndreas Noever }
16316603153SAndreas Noever 
16416603153SAndreas Noever static bool ring_full(struct tb_ring *ring)
16516603153SAndreas Noever {
16616603153SAndreas Noever 	return ((ring->head + 1) % ring->size) == ring->tail;
16716603153SAndreas Noever }
16816603153SAndreas Noever 
16916603153SAndreas Noever static bool ring_empty(struct tb_ring *ring)
17016603153SAndreas Noever {
17116603153SAndreas Noever 	return ring->head == ring->tail;
17216603153SAndreas Noever }
17316603153SAndreas Noever 
17416603153SAndreas Noever /**
17516603153SAndreas Noever  * ring_write_descriptors() - post frames from ring->queue to the controller
17616603153SAndreas Noever  *
17716603153SAndreas Noever  * ring->lock is held.
17816603153SAndreas Noever  */
17916603153SAndreas Noever static void ring_write_descriptors(struct tb_ring *ring)
18016603153SAndreas Noever {
18116603153SAndreas Noever 	struct ring_frame *frame, *n;
18216603153SAndreas Noever 	struct ring_desc *descriptor;
18316603153SAndreas Noever 	list_for_each_entry_safe(frame, n, &ring->queue, list) {
18416603153SAndreas Noever 		if (ring_full(ring))
18516603153SAndreas Noever 			break;
18616603153SAndreas Noever 		list_move_tail(&frame->list, &ring->in_flight);
18716603153SAndreas Noever 		descriptor = &ring->descriptors[ring->head];
18816603153SAndreas Noever 		descriptor->phys = frame->buffer_phy;
18916603153SAndreas Noever 		descriptor->time = 0;
19016603153SAndreas Noever 		descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
19116603153SAndreas Noever 		if (ring->is_tx) {
19216603153SAndreas Noever 			descriptor->length = frame->size;
19316603153SAndreas Noever 			descriptor->eof = frame->eof;
19416603153SAndreas Noever 			descriptor->sof = frame->sof;
19516603153SAndreas Noever 		}
19616603153SAndreas Noever 		ring->head = (ring->head + 1) % ring->size;
19716603153SAndreas Noever 		ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
19816603153SAndreas Noever 	}
19916603153SAndreas Noever }
20016603153SAndreas Noever 
20116603153SAndreas Noever /**
20216603153SAndreas Noever  * ring_work() - progress completed frames
20316603153SAndreas Noever  *
20416603153SAndreas Noever  * If the ring is shutting down then all frames are marked as canceled and
20516603153SAndreas Noever  * their callbacks are invoked.
20616603153SAndreas Noever  *
20716603153SAndreas Noever  * Otherwise we collect all completed frame from the ring buffer, write new
20816603153SAndreas Noever  * frame to the ring buffer and invoke the callbacks for the completed frames.
20916603153SAndreas Noever  */
21016603153SAndreas Noever static void ring_work(struct work_struct *work)
21116603153SAndreas Noever {
21216603153SAndreas Noever 	struct tb_ring *ring = container_of(work, typeof(*ring), work);
21316603153SAndreas Noever 	struct ring_frame *frame;
21416603153SAndreas Noever 	bool canceled = false;
21522b7de10SMika Westerberg 	unsigned long flags;
21616603153SAndreas Noever 	LIST_HEAD(done);
21722b7de10SMika Westerberg 
21822b7de10SMika Westerberg 	spin_lock_irqsave(&ring->lock, flags);
21916603153SAndreas Noever 
22016603153SAndreas Noever 	if (!ring->running) {
22116603153SAndreas Noever 		/*  Move all frames to done and mark them as canceled. */
22216603153SAndreas Noever 		list_splice_tail_init(&ring->in_flight, &done);
22316603153SAndreas Noever 		list_splice_tail_init(&ring->queue, &done);
22416603153SAndreas Noever 		canceled = true;
22516603153SAndreas Noever 		goto invoke_callback;
22616603153SAndreas Noever 	}
22716603153SAndreas Noever 
22816603153SAndreas Noever 	while (!ring_empty(ring)) {
22916603153SAndreas Noever 		if (!(ring->descriptors[ring->tail].flags
23016603153SAndreas Noever 				& RING_DESC_COMPLETED))
23116603153SAndreas Noever 			break;
23216603153SAndreas Noever 		frame = list_first_entry(&ring->in_flight, typeof(*frame),
23316603153SAndreas Noever 					 list);
23416603153SAndreas Noever 		list_move_tail(&frame->list, &done);
23516603153SAndreas Noever 		if (!ring->is_tx) {
23616603153SAndreas Noever 			frame->size = ring->descriptors[ring->tail].length;
23716603153SAndreas Noever 			frame->eof = ring->descriptors[ring->tail].eof;
23816603153SAndreas Noever 			frame->sof = ring->descriptors[ring->tail].sof;
23916603153SAndreas Noever 			frame->flags = ring->descriptors[ring->tail].flags;
24016603153SAndreas Noever 		}
24116603153SAndreas Noever 		ring->tail = (ring->tail + 1) % ring->size;
24216603153SAndreas Noever 	}
24316603153SAndreas Noever 	ring_write_descriptors(ring);
24416603153SAndreas Noever 
24516603153SAndreas Noever invoke_callback:
24622b7de10SMika Westerberg 	/* allow callbacks to schedule new work */
24722b7de10SMika Westerberg 	spin_unlock_irqrestore(&ring->lock, flags);
24816603153SAndreas Noever 	while (!list_empty(&done)) {
24916603153SAndreas Noever 		frame = list_first_entry(&done, typeof(*frame), list);
25016603153SAndreas Noever 		/*
25116603153SAndreas Noever 		 * The callback may reenqueue or delete frame.
25216603153SAndreas Noever 		 * Do not hold on to it.
25316603153SAndreas Noever 		 */
25416603153SAndreas Noever 		list_del_init(&frame->list);
255*4ffe722eSMika Westerberg 		if (frame->callback)
25616603153SAndreas Noever 			frame->callback(ring, frame, canceled);
25716603153SAndreas Noever 	}
25816603153SAndreas Noever }
25916603153SAndreas Noever 
2603b3d9f4dSMika Westerberg int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
26116603153SAndreas Noever {
26222b7de10SMika Westerberg 	unsigned long flags;
26316603153SAndreas Noever 	int ret = 0;
26422b7de10SMika Westerberg 
26522b7de10SMika Westerberg 	spin_lock_irqsave(&ring->lock, flags);
26616603153SAndreas Noever 	if (ring->running) {
26716603153SAndreas Noever 		list_add_tail(&frame->list, &ring->queue);
26816603153SAndreas Noever 		ring_write_descriptors(ring);
26916603153SAndreas Noever 	} else {
27016603153SAndreas Noever 		ret = -ESHUTDOWN;
27116603153SAndreas Noever 	}
27222b7de10SMika Westerberg 	spin_unlock_irqrestore(&ring->lock, flags);
27316603153SAndreas Noever 	return ret;
27416603153SAndreas Noever }
2753b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
27616603153SAndreas Noever 
277*4ffe722eSMika Westerberg /**
278*4ffe722eSMika Westerberg  * tb_ring_poll() - Poll one completed frame from the ring
279*4ffe722eSMika Westerberg  * @ring: Ring to poll
280*4ffe722eSMika Westerberg  *
281*4ffe722eSMika Westerberg  * This function can be called when @start_poll callback of the @ring
282*4ffe722eSMika Westerberg  * has been called. It will read one completed frame from the ring and
283*4ffe722eSMika Westerberg  * return it to the caller. Returns %NULL if there is no more completed
284*4ffe722eSMika Westerberg  * frames.
285*4ffe722eSMika Westerberg  */
286*4ffe722eSMika Westerberg struct ring_frame *tb_ring_poll(struct tb_ring *ring)
287*4ffe722eSMika Westerberg {
288*4ffe722eSMika Westerberg 	struct ring_frame *frame = NULL;
289*4ffe722eSMika Westerberg 	unsigned long flags;
290*4ffe722eSMika Westerberg 
291*4ffe722eSMika Westerberg 	spin_lock_irqsave(&ring->lock, flags);
292*4ffe722eSMika Westerberg 	if (!ring->running)
293*4ffe722eSMika Westerberg 		goto unlock;
294*4ffe722eSMika Westerberg 	if (ring_empty(ring))
295*4ffe722eSMika Westerberg 		goto unlock;
296*4ffe722eSMika Westerberg 
297*4ffe722eSMika Westerberg 	if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
298*4ffe722eSMika Westerberg 		frame = list_first_entry(&ring->in_flight, typeof(*frame),
299*4ffe722eSMika Westerberg 					 list);
300*4ffe722eSMika Westerberg 		list_del_init(&frame->list);
301*4ffe722eSMika Westerberg 
302*4ffe722eSMika Westerberg 		if (!ring->is_tx) {
303*4ffe722eSMika Westerberg 			frame->size = ring->descriptors[ring->tail].length;
304*4ffe722eSMika Westerberg 			frame->eof = ring->descriptors[ring->tail].eof;
305*4ffe722eSMika Westerberg 			frame->sof = ring->descriptors[ring->tail].sof;
306*4ffe722eSMika Westerberg 			frame->flags = ring->descriptors[ring->tail].flags;
307*4ffe722eSMika Westerberg 		}
308*4ffe722eSMika Westerberg 
309*4ffe722eSMika Westerberg 		ring->tail = (ring->tail + 1) % ring->size;
310*4ffe722eSMika Westerberg 	}
311*4ffe722eSMika Westerberg 
312*4ffe722eSMika Westerberg unlock:
313*4ffe722eSMika Westerberg 	spin_unlock_irqrestore(&ring->lock, flags);
314*4ffe722eSMika Westerberg 	return frame;
315*4ffe722eSMika Westerberg }
316*4ffe722eSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_poll);
317*4ffe722eSMika Westerberg 
318*4ffe722eSMika Westerberg static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
319*4ffe722eSMika Westerberg {
320*4ffe722eSMika Westerberg 	int idx = ring_interrupt_index(ring);
321*4ffe722eSMika Westerberg 	int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
322*4ffe722eSMika Westerberg 	int bit = idx % 32;
323*4ffe722eSMika Westerberg 	u32 val;
324*4ffe722eSMika Westerberg 
325*4ffe722eSMika Westerberg 	val = ioread32(ring->nhi->iobase + reg);
326*4ffe722eSMika Westerberg 	if (mask)
327*4ffe722eSMika Westerberg 		val &= ~BIT(bit);
328*4ffe722eSMika Westerberg 	else
329*4ffe722eSMika Westerberg 		val |= BIT(bit);
330*4ffe722eSMika Westerberg 	iowrite32(val, ring->nhi->iobase + reg);
331*4ffe722eSMika Westerberg }
332*4ffe722eSMika Westerberg 
333*4ffe722eSMika Westerberg /* Both @nhi->lock and @ring->lock should be held */
334*4ffe722eSMika Westerberg static void __ring_interrupt(struct tb_ring *ring)
335*4ffe722eSMika Westerberg {
336*4ffe722eSMika Westerberg 	if (!ring->running)
337*4ffe722eSMika Westerberg 		return;
338*4ffe722eSMika Westerberg 
339*4ffe722eSMika Westerberg 	if (ring->start_poll) {
340*4ffe722eSMika Westerberg 		__ring_interrupt_mask(ring, false);
341*4ffe722eSMika Westerberg 		ring->start_poll(ring->poll_data);
342*4ffe722eSMika Westerberg 	} else {
343*4ffe722eSMika Westerberg 		schedule_work(&ring->work);
344*4ffe722eSMika Westerberg 	}
345*4ffe722eSMika Westerberg }
346*4ffe722eSMika Westerberg 
347*4ffe722eSMika Westerberg /**
348*4ffe722eSMika Westerberg  * tb_ring_poll_complete() - Re-start interrupt for the ring
349*4ffe722eSMika Westerberg  * @ring: Ring to re-start the interrupt
350*4ffe722eSMika Westerberg  *
351*4ffe722eSMika Westerberg  * This will re-start (unmask) the ring interrupt once the user is done
352*4ffe722eSMika Westerberg  * with polling.
353*4ffe722eSMika Westerberg  */
354*4ffe722eSMika Westerberg void tb_ring_poll_complete(struct tb_ring *ring)
355*4ffe722eSMika Westerberg {
356*4ffe722eSMika Westerberg 	unsigned long flags;
357*4ffe722eSMika Westerberg 
358*4ffe722eSMika Westerberg 	spin_lock_irqsave(&ring->nhi->lock, flags);
359*4ffe722eSMika Westerberg 	spin_lock(&ring->lock);
360*4ffe722eSMika Westerberg 	if (ring->start_poll)
361*4ffe722eSMika Westerberg 		__ring_interrupt_mask(ring, false);
362*4ffe722eSMika Westerberg 	spin_unlock(&ring->lock);
363*4ffe722eSMika Westerberg 	spin_unlock_irqrestore(&ring->nhi->lock, flags);
364*4ffe722eSMika Westerberg }
365*4ffe722eSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
366*4ffe722eSMika Westerberg 
367046bee1fSMika Westerberg static irqreturn_t ring_msix(int irq, void *data)
368046bee1fSMika Westerberg {
369046bee1fSMika Westerberg 	struct tb_ring *ring = data;
370046bee1fSMika Westerberg 
371*4ffe722eSMika Westerberg 	spin_lock(&ring->nhi->lock);
372*4ffe722eSMika Westerberg 	spin_lock(&ring->lock);
373*4ffe722eSMika Westerberg 	__ring_interrupt(ring);
374*4ffe722eSMika Westerberg 	spin_unlock(&ring->lock);
375*4ffe722eSMika Westerberg 	spin_unlock(&ring->nhi->lock);
376*4ffe722eSMika Westerberg 
377046bee1fSMika Westerberg 	return IRQ_HANDLED;
378046bee1fSMika Westerberg }
379046bee1fSMika Westerberg 
380046bee1fSMika Westerberg static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
381046bee1fSMika Westerberg {
382046bee1fSMika Westerberg 	struct tb_nhi *nhi = ring->nhi;
383046bee1fSMika Westerberg 	unsigned long irqflags;
384046bee1fSMika Westerberg 	int ret;
385046bee1fSMika Westerberg 
386046bee1fSMika Westerberg 	if (!nhi->pdev->msix_enabled)
387046bee1fSMika Westerberg 		return 0;
388046bee1fSMika Westerberg 
389046bee1fSMika Westerberg 	ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
390046bee1fSMika Westerberg 	if (ret < 0)
391046bee1fSMika Westerberg 		return ret;
392046bee1fSMika Westerberg 
393046bee1fSMika Westerberg 	ring->vector = ret;
394046bee1fSMika Westerberg 
395046bee1fSMika Westerberg 	ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
396046bee1fSMika Westerberg 	if (ring->irq < 0)
397046bee1fSMika Westerberg 		return ring->irq;
398046bee1fSMika Westerberg 
399046bee1fSMika Westerberg 	irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
400046bee1fSMika Westerberg 	return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
401046bee1fSMika Westerberg }
402046bee1fSMika Westerberg 
403046bee1fSMika Westerberg static void ring_release_msix(struct tb_ring *ring)
404046bee1fSMika Westerberg {
405046bee1fSMika Westerberg 	if (ring->irq <= 0)
406046bee1fSMika Westerberg 		return;
407046bee1fSMika Westerberg 
408046bee1fSMika Westerberg 	free_irq(ring->irq, ring);
409046bee1fSMika Westerberg 	ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
410046bee1fSMika Westerberg 	ring->vector = 0;
411046bee1fSMika Westerberg 	ring->irq = 0;
412046bee1fSMika Westerberg }
413046bee1fSMika Westerberg 
4143b3d9f4dSMika Westerberg static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
4159fb1e654SMika Westerberg 				     bool transmit, unsigned int flags,
416*4ffe722eSMika Westerberg 				     u16 sof_mask, u16 eof_mask,
417*4ffe722eSMika Westerberg 				     void (*start_poll)(void *),
418*4ffe722eSMika Westerberg 				     void *poll_data)
41916603153SAndreas Noever {
42016603153SAndreas Noever 	struct tb_ring *ring = NULL;
42116603153SAndreas Noever 	dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
42216603153SAndreas Noever 		 transmit ? "TX" : "RX", hop, size);
42316603153SAndreas Noever 
4249fb1e654SMika Westerberg 	/* Tx Ring 2 is reserved for E2E workaround */
4259fb1e654SMika Westerberg 	if (transmit && hop == RING_E2E_UNUSED_HOPID)
4269fb1e654SMika Westerberg 		return NULL;
4279fb1e654SMika Westerberg 
42816603153SAndreas Noever 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
42916603153SAndreas Noever 	if (!ring)
43059120e06SMika Westerberg 		return NULL;
43116603153SAndreas Noever 
43222b7de10SMika Westerberg 	spin_lock_init(&ring->lock);
43316603153SAndreas Noever 	INIT_LIST_HEAD(&ring->queue);
43416603153SAndreas Noever 	INIT_LIST_HEAD(&ring->in_flight);
43516603153SAndreas Noever 	INIT_WORK(&ring->work, ring_work);
43616603153SAndreas Noever 
43716603153SAndreas Noever 	ring->nhi = nhi;
43816603153SAndreas Noever 	ring->hop = hop;
43916603153SAndreas Noever 	ring->is_tx = transmit;
44016603153SAndreas Noever 	ring->size = size;
441046bee1fSMika Westerberg 	ring->flags = flags;
4429fb1e654SMika Westerberg 	ring->sof_mask = sof_mask;
4439fb1e654SMika Westerberg 	ring->eof_mask = eof_mask;
44416603153SAndreas Noever 	ring->head = 0;
44516603153SAndreas Noever 	ring->tail = 0;
44616603153SAndreas Noever 	ring->running = false;
447*4ffe722eSMika Westerberg 	ring->start_poll = start_poll;
448*4ffe722eSMika Westerberg 	ring->poll_data = poll_data;
449046bee1fSMika Westerberg 
45016603153SAndreas Noever 	ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
45116603153SAndreas Noever 			size * sizeof(*ring->descriptors),
45216603153SAndreas Noever 			&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
45316603153SAndreas Noever 	if (!ring->descriptors)
45459120e06SMika Westerberg 		goto err_free_ring;
45516603153SAndreas Noever 
45659120e06SMika Westerberg 	if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
45759120e06SMika Westerberg 		goto err_free_descs;
45859120e06SMika Westerberg 
45959120e06SMika Westerberg 	spin_lock_irq(&nhi->lock);
46059120e06SMika Westerberg 	if (hop >= nhi->hop_count) {
46159120e06SMika Westerberg 		dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
46259120e06SMika Westerberg 		goto err_release_msix;
46359120e06SMika Westerberg 	}
46459120e06SMika Westerberg 	if (transmit && nhi->tx_rings[hop]) {
46559120e06SMika Westerberg 		dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
46659120e06SMika Westerberg 		goto err_release_msix;
46759120e06SMika Westerberg 	} else if (!transmit && nhi->rx_rings[hop]) {
46859120e06SMika Westerberg 		dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
46959120e06SMika Westerberg 		goto err_release_msix;
47059120e06SMika Westerberg 	}
47116603153SAndreas Noever 	if (transmit)
47216603153SAndreas Noever 		nhi->tx_rings[hop] = ring;
47316603153SAndreas Noever 	else
47416603153SAndreas Noever 		nhi->rx_rings[hop] = ring;
47559120e06SMika Westerberg 	spin_unlock_irq(&nhi->lock);
47659120e06SMika Westerberg 
47716603153SAndreas Noever 	return ring;
47816603153SAndreas Noever 
47959120e06SMika Westerberg err_release_msix:
48059120e06SMika Westerberg 	spin_unlock_irq(&nhi->lock);
48159120e06SMika Westerberg 	ring_release_msix(ring);
48259120e06SMika Westerberg err_free_descs:
48359120e06SMika Westerberg 	dma_free_coherent(&ring->nhi->pdev->dev,
48459120e06SMika Westerberg 			  ring->size * sizeof(*ring->descriptors),
48559120e06SMika Westerberg 			  ring->descriptors, ring->descriptors_dma);
48659120e06SMika Westerberg err_free_ring:
48716603153SAndreas Noever 	kfree(ring);
48859120e06SMika Westerberg 
48916603153SAndreas Noever 	return NULL;
49016603153SAndreas Noever }
49116603153SAndreas Noever 
4923b3d9f4dSMika Westerberg /**
4933b3d9f4dSMika Westerberg  * tb_ring_alloc_tx() - Allocate DMA ring for transmit
4943b3d9f4dSMika Westerberg  * @nhi: Pointer to the NHI the ring is to be allocated
4953b3d9f4dSMika Westerberg  * @hop: HopID (ring) to allocate
4963b3d9f4dSMika Westerberg  * @size: Number of entries in the ring
4973b3d9f4dSMika Westerberg  * @flags: Flags for the ring
4983b3d9f4dSMika Westerberg  */
4993b3d9f4dSMika Westerberg struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
500046bee1fSMika Westerberg 				 unsigned int flags)
50116603153SAndreas Noever {
502*4ffe722eSMika Westerberg 	return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
50316603153SAndreas Noever }
5043b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
50516603153SAndreas Noever 
50616603153SAndreas Noever /**
5073b3d9f4dSMika Westerberg  * tb_ring_alloc_rx() - Allocate DMA ring for receive
5083b3d9f4dSMika Westerberg  * @nhi: Pointer to the NHI the ring is to be allocated
5093b3d9f4dSMika Westerberg  * @hop: HopID (ring) to allocate
5103b3d9f4dSMika Westerberg  * @size: Number of entries in the ring
5113b3d9f4dSMika Westerberg  * @flags: Flags for the ring
5123b3d9f4dSMika Westerberg  * @sof_mask: Mask of PDF values that start a frame
5133b3d9f4dSMika Westerberg  * @eof_mask: Mask of PDF values that end a frame
514*4ffe722eSMika Westerberg  * @start_poll: If not %NULL the ring will call this function when an
515*4ffe722eSMika Westerberg  *		interrupt is triggered and masked, instead of callback
516*4ffe722eSMika Westerberg  *		in each Rx frame.
517*4ffe722eSMika Westerberg  * @poll_data: Optional data passed to @start_poll
51816603153SAndreas Noever  */
5193b3d9f4dSMika Westerberg struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
520*4ffe722eSMika Westerberg 				 unsigned int flags, u16 sof_mask, u16 eof_mask,
521*4ffe722eSMika Westerberg 				 void (*start_poll)(void *), void *poll_data)
5223b3d9f4dSMika Westerberg {
523*4ffe722eSMika Westerberg 	return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
524*4ffe722eSMika Westerberg 			     start_poll, poll_data);
5253b3d9f4dSMika Westerberg }
5263b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
5273b3d9f4dSMika Westerberg 
5283b3d9f4dSMika Westerberg /**
5293b3d9f4dSMika Westerberg  * tb_ring_start() - enable a ring
5303b3d9f4dSMika Westerberg  *
5313b3d9f4dSMika Westerberg  * Must not be invoked in parallel with tb_ring_stop().
5323b3d9f4dSMika Westerberg  */
5333b3d9f4dSMika Westerberg void tb_ring_start(struct tb_ring *ring)
53416603153SAndreas Noever {
5359fb1e654SMika Westerberg 	u16 frame_size;
5369fb1e654SMika Westerberg 	u32 flags;
5379fb1e654SMika Westerberg 
53859120e06SMika Westerberg 	spin_lock_irq(&ring->nhi->lock);
53959120e06SMika Westerberg 	spin_lock(&ring->lock);
540bdccf295SMika Westerberg 	if (ring->nhi->going_away)
541bdccf295SMika Westerberg 		goto err;
54216603153SAndreas Noever 	if (ring->running) {
54316603153SAndreas Noever 		dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
54416603153SAndreas Noever 		goto err;
54516603153SAndreas Noever 	}
54616603153SAndreas Noever 	dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
54716603153SAndreas Noever 		 RING_TYPE(ring), ring->hop);
54816603153SAndreas Noever 
5499fb1e654SMika Westerberg 	if (ring->flags & RING_FLAG_FRAME) {
5509fb1e654SMika Westerberg 		/* Means 4096 */
5519fb1e654SMika Westerberg 		frame_size = 0;
5529fb1e654SMika Westerberg 		flags = RING_FLAG_ENABLE;
5539fb1e654SMika Westerberg 	} else {
5549fb1e654SMika Westerberg 		frame_size = TB_FRAME_SIZE;
5559fb1e654SMika Westerberg 		flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
5569fb1e654SMika Westerberg 	}
5579fb1e654SMika Westerberg 
5589fb1e654SMika Westerberg 	if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
5599fb1e654SMika Westerberg 		u32 hop;
5609fb1e654SMika Westerberg 
5619fb1e654SMika Westerberg 		/*
5629fb1e654SMika Westerberg 		 * In order not to lose Rx packets we enable end-to-end
5639fb1e654SMika Westerberg 		 * workaround which transfers Rx credits to an unused Tx
5649fb1e654SMika Westerberg 		 * HopID.
5659fb1e654SMika Westerberg 		 */
5669fb1e654SMika Westerberg 		hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT;
5679fb1e654SMika Westerberg 		hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
5689fb1e654SMika Westerberg 		flags |= hop | RING_FLAG_E2E_FLOW_CONTROL;
5699fb1e654SMika Westerberg 	}
5709fb1e654SMika Westerberg 
57116603153SAndreas Noever 	ring_iowrite64desc(ring, ring->descriptors_dma, 0);
57216603153SAndreas Noever 	if (ring->is_tx) {
57316603153SAndreas Noever 		ring_iowrite32desc(ring, ring->size, 12);
57416603153SAndreas Noever 		ring_iowrite32options(ring, 0, 4); /* time releated ? */
5759fb1e654SMika Westerberg 		ring_iowrite32options(ring, flags, 0);
57616603153SAndreas Noever 	} else {
5779fb1e654SMika Westerberg 		u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
5789fb1e654SMika Westerberg 
5799fb1e654SMika Westerberg 		ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
5809fb1e654SMika Westerberg 		ring_iowrite32options(ring, sof_eof_mask, 4);
5819fb1e654SMika Westerberg 		ring_iowrite32options(ring, flags, 0);
58216603153SAndreas Noever 	}
58316603153SAndreas Noever 	ring_interrupt_active(ring, true);
58416603153SAndreas Noever 	ring->running = true;
58516603153SAndreas Noever err:
58659120e06SMika Westerberg 	spin_unlock(&ring->lock);
58759120e06SMika Westerberg 	spin_unlock_irq(&ring->nhi->lock);
58816603153SAndreas Noever }
5893b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_start);
59016603153SAndreas Noever 
59116603153SAndreas Noever /**
5923b3d9f4dSMika Westerberg  * tb_ring_stop() - shutdown a ring
59316603153SAndreas Noever  *
59416603153SAndreas Noever  * Must not be invoked from a callback.
59516603153SAndreas Noever  *
5963b3d9f4dSMika Westerberg  * This method will disable the ring. Further calls to
5973b3d9f4dSMika Westerberg  * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
5983b3d9f4dSMika Westerberg  * called.
59916603153SAndreas Noever  *
60016603153SAndreas Noever  * All enqueued frames will be canceled and their callbacks will be executed
60116603153SAndreas Noever  * with frame->canceled set to true (on the callback thread). This method
60216603153SAndreas Noever  * returns only after all callback invocations have finished.
60316603153SAndreas Noever  */
6043b3d9f4dSMika Westerberg void tb_ring_stop(struct tb_ring *ring)
60516603153SAndreas Noever {
60659120e06SMika Westerberg 	spin_lock_irq(&ring->nhi->lock);
60759120e06SMika Westerberg 	spin_lock(&ring->lock);
60816603153SAndreas Noever 	dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
60916603153SAndreas Noever 		 RING_TYPE(ring), ring->hop);
610bdccf295SMika Westerberg 	if (ring->nhi->going_away)
611bdccf295SMika Westerberg 		goto err;
61216603153SAndreas Noever 	if (!ring->running) {
61316603153SAndreas Noever 		dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
61416603153SAndreas Noever 			 RING_TYPE(ring), ring->hop);
61516603153SAndreas Noever 		goto err;
61616603153SAndreas Noever 	}
61716603153SAndreas Noever 	ring_interrupt_active(ring, false);
61816603153SAndreas Noever 
61916603153SAndreas Noever 	ring_iowrite32options(ring, 0, 0);
62016603153SAndreas Noever 	ring_iowrite64desc(ring, 0, 0);
62116603153SAndreas Noever 	ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
62216603153SAndreas Noever 	ring_iowrite32desc(ring, 0, 12);
62316603153SAndreas Noever 	ring->head = 0;
62416603153SAndreas Noever 	ring->tail = 0;
62516603153SAndreas Noever 	ring->running = false;
62616603153SAndreas Noever 
62716603153SAndreas Noever err:
62859120e06SMika Westerberg 	spin_unlock(&ring->lock);
62959120e06SMika Westerberg 	spin_unlock_irq(&ring->nhi->lock);
63016603153SAndreas Noever 
63116603153SAndreas Noever 	/*
63216603153SAndreas Noever 	 * schedule ring->work to invoke callbacks on all remaining frames.
63316603153SAndreas Noever 	 */
63416603153SAndreas Noever 	schedule_work(&ring->work);
63516603153SAndreas Noever 	flush_work(&ring->work);
63616603153SAndreas Noever }
6373b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_stop);
63816603153SAndreas Noever 
63916603153SAndreas Noever /*
6403b3d9f4dSMika Westerberg  * tb_ring_free() - free ring
64116603153SAndreas Noever  *
64216603153SAndreas Noever  * When this method returns all invocations of ring->callback will have
64316603153SAndreas Noever  * finished.
64416603153SAndreas Noever  *
64516603153SAndreas Noever  * Ring must be stopped.
64616603153SAndreas Noever  *
64716603153SAndreas Noever  * Must NOT be called from ring_frame->callback!
64816603153SAndreas Noever  */
6493b3d9f4dSMika Westerberg void tb_ring_free(struct tb_ring *ring)
65016603153SAndreas Noever {
65159120e06SMika Westerberg 	spin_lock_irq(&ring->nhi->lock);
65216603153SAndreas Noever 	/*
65316603153SAndreas Noever 	 * Dissociate the ring from the NHI. This also ensures that
65416603153SAndreas Noever 	 * nhi_interrupt_work cannot reschedule ring->work.
65516603153SAndreas Noever 	 */
65616603153SAndreas Noever 	if (ring->is_tx)
65716603153SAndreas Noever 		ring->nhi->tx_rings[ring->hop] = NULL;
65816603153SAndreas Noever 	else
65916603153SAndreas Noever 		ring->nhi->rx_rings[ring->hop] = NULL;
66016603153SAndreas Noever 
66116603153SAndreas Noever 	if (ring->running) {
66216603153SAndreas Noever 		dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
66316603153SAndreas Noever 			 RING_TYPE(ring), ring->hop);
66416603153SAndreas Noever 	}
665*4ffe722eSMika Westerberg 	spin_unlock_irq(&ring->nhi->lock);
66616603153SAndreas Noever 
667046bee1fSMika Westerberg 	ring_release_msix(ring);
668046bee1fSMika Westerberg 
66916603153SAndreas Noever 	dma_free_coherent(&ring->nhi->pdev->dev,
67016603153SAndreas Noever 			  ring->size * sizeof(*ring->descriptors),
67116603153SAndreas Noever 			  ring->descriptors, ring->descriptors_dma);
67216603153SAndreas Noever 
673f19b72c6SSachin Kamat 	ring->descriptors = NULL;
67416603153SAndreas Noever 	ring->descriptors_dma = 0;
67516603153SAndreas Noever 
67616603153SAndreas Noever 
67716603153SAndreas Noever 	dev_info(&ring->nhi->pdev->dev,
67816603153SAndreas Noever 		 "freeing %s %d\n",
67916603153SAndreas Noever 		 RING_TYPE(ring),
68016603153SAndreas Noever 		 ring->hop);
68116603153SAndreas Noever 
68216603153SAndreas Noever 	/**
683046bee1fSMika Westerberg 	 * ring->work can no longer be scheduled (it is scheduled only
684046bee1fSMika Westerberg 	 * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
685046bee1fSMika Westerberg 	 * to finish before freeing the ring.
68616603153SAndreas Noever 	 */
68716603153SAndreas Noever 	flush_work(&ring->work);
68816603153SAndreas Noever 	kfree(ring);
68916603153SAndreas Noever }
6903b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_free);
69116603153SAndreas Noever 
692cd446ee2SMika Westerberg /**
693cd446ee2SMika Westerberg  * nhi_mailbox_cmd() - Send a command through NHI mailbox
694cd446ee2SMika Westerberg  * @nhi: Pointer to the NHI structure
695cd446ee2SMika Westerberg  * @cmd: Command to send
696cd446ee2SMika Westerberg  * @data: Data to be send with the command
697cd446ee2SMika Westerberg  *
698cd446ee2SMika Westerberg  * Sends mailbox command to the firmware running on NHI. Returns %0 in
699cd446ee2SMika Westerberg  * case of success and negative errno in case of failure.
700cd446ee2SMika Westerberg  */
701cd446ee2SMika Westerberg int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
702cd446ee2SMika Westerberg {
703cd446ee2SMika Westerberg 	ktime_t timeout;
704cd446ee2SMika Westerberg 	u32 val;
705cd446ee2SMika Westerberg 
706cd446ee2SMika Westerberg 	iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
707cd446ee2SMika Westerberg 
708cd446ee2SMika Westerberg 	val = ioread32(nhi->iobase + REG_INMAIL_CMD);
709cd446ee2SMika Westerberg 	val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
710cd446ee2SMika Westerberg 	val |= REG_INMAIL_OP_REQUEST | cmd;
711cd446ee2SMika Westerberg 	iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
712cd446ee2SMika Westerberg 
713cd446ee2SMika Westerberg 	timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
714cd446ee2SMika Westerberg 	do {
715cd446ee2SMika Westerberg 		val = ioread32(nhi->iobase + REG_INMAIL_CMD);
716cd446ee2SMika Westerberg 		if (!(val & REG_INMAIL_OP_REQUEST))
717cd446ee2SMika Westerberg 			break;
718cd446ee2SMika Westerberg 		usleep_range(10, 20);
719cd446ee2SMika Westerberg 	} while (ktime_before(ktime_get(), timeout));
720cd446ee2SMika Westerberg 
721cd446ee2SMika Westerberg 	if (val & REG_INMAIL_OP_REQUEST)
722cd446ee2SMika Westerberg 		return -ETIMEDOUT;
723cd446ee2SMika Westerberg 	if (val & REG_INMAIL_ERROR)
724cd446ee2SMika Westerberg 		return -EIO;
725cd446ee2SMika Westerberg 
726cd446ee2SMika Westerberg 	return 0;
727cd446ee2SMika Westerberg }
728cd446ee2SMika Westerberg 
729cd446ee2SMika Westerberg /**
730cd446ee2SMika Westerberg  * nhi_mailbox_mode() - Return current firmware operation mode
731cd446ee2SMika Westerberg  * @nhi: Pointer to the NHI structure
732cd446ee2SMika Westerberg  *
733cd446ee2SMika Westerberg  * The function reads current firmware operation mode using NHI mailbox
734cd446ee2SMika Westerberg  * registers and returns it to the caller.
735cd446ee2SMika Westerberg  */
736cd446ee2SMika Westerberg enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
737cd446ee2SMika Westerberg {
738cd446ee2SMika Westerberg 	u32 val;
739cd446ee2SMika Westerberg 
740cd446ee2SMika Westerberg 	val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
741cd446ee2SMika Westerberg 	val &= REG_OUTMAIL_CMD_OPMODE_MASK;
742cd446ee2SMika Westerberg 	val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
743cd446ee2SMika Westerberg 
744cd446ee2SMika Westerberg 	return (enum nhi_fw_mode)val;
745cd446ee2SMika Westerberg }
746cd446ee2SMika Westerberg 
74716603153SAndreas Noever static void nhi_interrupt_work(struct work_struct *work)
74816603153SAndreas Noever {
74916603153SAndreas Noever 	struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
75016603153SAndreas Noever 	int value = 0; /* Suppress uninitialized usage warning. */
75116603153SAndreas Noever 	int bit;
75216603153SAndreas Noever 	int hop = -1;
75316603153SAndreas Noever 	int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
75416603153SAndreas Noever 	struct tb_ring *ring;
75516603153SAndreas Noever 
75659120e06SMika Westerberg 	spin_lock_irq(&nhi->lock);
75716603153SAndreas Noever 
75816603153SAndreas Noever 	/*
75916603153SAndreas Noever 	 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
76016603153SAndreas Noever 	 * (TX, RX, RX overflow). We iterate over the bits and read a new
76116603153SAndreas Noever 	 * dwords as required. The registers are cleared on read.
76216603153SAndreas Noever 	 */
76316603153SAndreas Noever 	for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
76416603153SAndreas Noever 		if (bit % 32 == 0)
76516603153SAndreas Noever 			value = ioread32(nhi->iobase
76616603153SAndreas Noever 					 + REG_RING_NOTIFY_BASE
76716603153SAndreas Noever 					 + 4 * (bit / 32));
76816603153SAndreas Noever 		if (++hop == nhi->hop_count) {
76916603153SAndreas Noever 			hop = 0;
77016603153SAndreas Noever 			type++;
77116603153SAndreas Noever 		}
77216603153SAndreas Noever 		if ((value & (1 << (bit % 32))) == 0)
77316603153SAndreas Noever 			continue;
77416603153SAndreas Noever 		if (type == 2) {
77516603153SAndreas Noever 			dev_warn(&nhi->pdev->dev,
77616603153SAndreas Noever 				 "RX overflow for ring %d\n",
77716603153SAndreas Noever 				 hop);
77816603153SAndreas Noever 			continue;
77916603153SAndreas Noever 		}
78016603153SAndreas Noever 		if (type == 0)
78116603153SAndreas Noever 			ring = nhi->tx_rings[hop];
78216603153SAndreas Noever 		else
78316603153SAndreas Noever 			ring = nhi->rx_rings[hop];
78416603153SAndreas Noever 		if (ring == NULL) {
78516603153SAndreas Noever 			dev_warn(&nhi->pdev->dev,
78616603153SAndreas Noever 				 "got interrupt for inactive %s ring %d\n",
78716603153SAndreas Noever 				 type ? "RX" : "TX",
78816603153SAndreas Noever 				 hop);
78916603153SAndreas Noever 			continue;
79016603153SAndreas Noever 		}
791*4ffe722eSMika Westerberg 
792*4ffe722eSMika Westerberg 		spin_lock(&ring->lock);
793*4ffe722eSMika Westerberg 		__ring_interrupt(ring);
794*4ffe722eSMika Westerberg 		spin_unlock(&ring->lock);
79516603153SAndreas Noever 	}
79659120e06SMika Westerberg 	spin_unlock_irq(&nhi->lock);
79716603153SAndreas Noever }
79816603153SAndreas Noever 
79916603153SAndreas Noever static irqreturn_t nhi_msi(int irq, void *data)
80016603153SAndreas Noever {
80116603153SAndreas Noever 	struct tb_nhi *nhi = data;
80216603153SAndreas Noever 	schedule_work(&nhi->interrupt_work);
80316603153SAndreas Noever 	return IRQ_HANDLED;
80416603153SAndreas Noever }
80516603153SAndreas Noever 
80623dd5bb4SAndreas Noever static int nhi_suspend_noirq(struct device *dev)
80723dd5bb4SAndreas Noever {
80823dd5bb4SAndreas Noever 	struct pci_dev *pdev = to_pci_dev(dev);
80923dd5bb4SAndreas Noever 	struct tb *tb = pci_get_drvdata(pdev);
8109d3cce0bSMika Westerberg 
8119d3cce0bSMika Westerberg 	return tb_domain_suspend_noirq(tb);
81223dd5bb4SAndreas Noever }
81323dd5bb4SAndreas Noever 
8148c6bba10SMika Westerberg static void nhi_enable_int_throttling(struct tb_nhi *nhi)
8158c6bba10SMika Westerberg {
8168c6bba10SMika Westerberg 	/* Throttling is specified in 256ns increments */
8178c6bba10SMika Westerberg 	u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
8188c6bba10SMika Westerberg 	unsigned int i;
8198c6bba10SMika Westerberg 
8208c6bba10SMika Westerberg 	/*
8218c6bba10SMika Westerberg 	 * Configure interrupt throttling for all vectors even if we
8228c6bba10SMika Westerberg 	 * only use few.
8238c6bba10SMika Westerberg 	 */
8248c6bba10SMika Westerberg 	for (i = 0; i < MSIX_MAX_VECS; i++) {
8258c6bba10SMika Westerberg 		u32 reg = REG_INT_THROTTLING_RATE + i * 4;
8268c6bba10SMika Westerberg 		iowrite32(throttle, nhi->iobase + reg);
8278c6bba10SMika Westerberg 	}
8288c6bba10SMika Westerberg }
8298c6bba10SMika Westerberg 
83023dd5bb4SAndreas Noever static int nhi_resume_noirq(struct device *dev)
83123dd5bb4SAndreas Noever {
83223dd5bb4SAndreas Noever 	struct pci_dev *pdev = to_pci_dev(dev);
83323dd5bb4SAndreas Noever 	struct tb *tb = pci_get_drvdata(pdev);
8349d3cce0bSMika Westerberg 
835bdccf295SMika Westerberg 	/*
836bdccf295SMika Westerberg 	 * Check that the device is still there. It may be that the user
837bdccf295SMika Westerberg 	 * unplugged last device which causes the host controller to go
838bdccf295SMika Westerberg 	 * away on PCs.
839bdccf295SMika Westerberg 	 */
840bdccf295SMika Westerberg 	if (!pci_device_is_present(pdev))
841bdccf295SMika Westerberg 		tb->nhi->going_away = true;
8428c6bba10SMika Westerberg 	else
8438c6bba10SMika Westerberg 		nhi_enable_int_throttling(tb->nhi);
844bdccf295SMika Westerberg 
8459d3cce0bSMika Westerberg 	return tb_domain_resume_noirq(tb);
84623dd5bb4SAndreas Noever }
84723dd5bb4SAndreas Noever 
848f67cf491SMika Westerberg static int nhi_suspend(struct device *dev)
849f67cf491SMika Westerberg {
850f67cf491SMika Westerberg 	struct pci_dev *pdev = to_pci_dev(dev);
851f67cf491SMika Westerberg 	struct tb *tb = pci_get_drvdata(pdev);
852f67cf491SMika Westerberg 
853f67cf491SMika Westerberg 	return tb_domain_suspend(tb);
854f67cf491SMika Westerberg }
855f67cf491SMika Westerberg 
856f67cf491SMika Westerberg static void nhi_complete(struct device *dev)
857f67cf491SMika Westerberg {
858f67cf491SMika Westerberg 	struct pci_dev *pdev = to_pci_dev(dev);
859f67cf491SMika Westerberg 	struct tb *tb = pci_get_drvdata(pdev);
860f67cf491SMika Westerberg 
861f67cf491SMika Westerberg 	tb_domain_complete(tb);
862f67cf491SMika Westerberg }
863f67cf491SMika Westerberg 
86416603153SAndreas Noever static void nhi_shutdown(struct tb_nhi *nhi)
86516603153SAndreas Noever {
86616603153SAndreas Noever 	int i;
86716603153SAndreas Noever 	dev_info(&nhi->pdev->dev, "shutdown\n");
86816603153SAndreas Noever 
86916603153SAndreas Noever 	for (i = 0; i < nhi->hop_count; i++) {
87016603153SAndreas Noever 		if (nhi->tx_rings[i])
87116603153SAndreas Noever 			dev_WARN(&nhi->pdev->dev,
87216603153SAndreas Noever 				 "TX ring %d is still active\n", i);
87316603153SAndreas Noever 		if (nhi->rx_rings[i])
87416603153SAndreas Noever 			dev_WARN(&nhi->pdev->dev,
87516603153SAndreas Noever 				 "RX ring %d is still active\n", i);
87616603153SAndreas Noever 	}
87716603153SAndreas Noever 	nhi_disable_interrupts(nhi);
87816603153SAndreas Noever 	/*
87916603153SAndreas Noever 	 * We have to release the irq before calling flush_work. Otherwise an
88016603153SAndreas Noever 	 * already executing IRQ handler could call schedule_work again.
88116603153SAndreas Noever 	 */
882046bee1fSMika Westerberg 	if (!nhi->pdev->msix_enabled) {
88316603153SAndreas Noever 		devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
88416603153SAndreas Noever 		flush_work(&nhi->interrupt_work);
885046bee1fSMika Westerberg 	}
886046bee1fSMika Westerberg 	ida_destroy(&nhi->msix_ida);
887046bee1fSMika Westerberg }
888046bee1fSMika Westerberg 
889046bee1fSMika Westerberg static int nhi_init_msi(struct tb_nhi *nhi)
890046bee1fSMika Westerberg {
891046bee1fSMika Westerberg 	struct pci_dev *pdev = nhi->pdev;
892046bee1fSMika Westerberg 	int res, irq, nvec;
893046bee1fSMika Westerberg 
894046bee1fSMika Westerberg 	/* In case someone left them on. */
895046bee1fSMika Westerberg 	nhi_disable_interrupts(nhi);
896046bee1fSMika Westerberg 
8978c6bba10SMika Westerberg 	nhi_enable_int_throttling(nhi);
8988c6bba10SMika Westerberg 
899046bee1fSMika Westerberg 	ida_init(&nhi->msix_ida);
900046bee1fSMika Westerberg 
901046bee1fSMika Westerberg 	/*
902046bee1fSMika Westerberg 	 * The NHI has 16 MSI-X vectors or a single MSI. We first try to
903046bee1fSMika Westerberg 	 * get all MSI-X vectors and if we succeed, each ring will have
904046bee1fSMika Westerberg 	 * one MSI-X. If for some reason that does not work out, we
905046bee1fSMika Westerberg 	 * fallback to a single MSI.
906046bee1fSMika Westerberg 	 */
907046bee1fSMika Westerberg 	nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
908046bee1fSMika Westerberg 				     PCI_IRQ_MSIX);
909046bee1fSMika Westerberg 	if (nvec < 0) {
910046bee1fSMika Westerberg 		nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
911046bee1fSMika Westerberg 		if (nvec < 0)
912046bee1fSMika Westerberg 			return nvec;
913046bee1fSMika Westerberg 
914046bee1fSMika Westerberg 		INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
915046bee1fSMika Westerberg 
916046bee1fSMika Westerberg 		irq = pci_irq_vector(nhi->pdev, 0);
917046bee1fSMika Westerberg 		if (irq < 0)
918046bee1fSMika Westerberg 			return irq;
919046bee1fSMika Westerberg 
920046bee1fSMika Westerberg 		res = devm_request_irq(&pdev->dev, irq, nhi_msi,
921046bee1fSMika Westerberg 				       IRQF_NO_SUSPEND, "thunderbolt", nhi);
922046bee1fSMika Westerberg 		if (res) {
923046bee1fSMika Westerberg 			dev_err(&pdev->dev, "request_irq failed, aborting\n");
924046bee1fSMika Westerberg 			return res;
925046bee1fSMika Westerberg 		}
926046bee1fSMika Westerberg 	}
927046bee1fSMika Westerberg 
928046bee1fSMika Westerberg 	return 0;
92916603153SAndreas Noever }
93016603153SAndreas Noever 
93116603153SAndreas Noever static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
93216603153SAndreas Noever {
93316603153SAndreas Noever 	struct tb_nhi *nhi;
934d6cc51cdSAndreas Noever 	struct tb *tb;
93516603153SAndreas Noever 	int res;
93616603153SAndreas Noever 
93716603153SAndreas Noever 	res = pcim_enable_device(pdev);
93816603153SAndreas Noever 	if (res) {
93916603153SAndreas Noever 		dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
94016603153SAndreas Noever 		return res;
94116603153SAndreas Noever 	}
94216603153SAndreas Noever 
94316603153SAndreas Noever 	res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
94416603153SAndreas Noever 	if (res) {
94516603153SAndreas Noever 		dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
94616603153SAndreas Noever 		return res;
94716603153SAndreas Noever 	}
94816603153SAndreas Noever 
94916603153SAndreas Noever 	nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
95016603153SAndreas Noever 	if (!nhi)
95116603153SAndreas Noever 		return -ENOMEM;
95216603153SAndreas Noever 
95316603153SAndreas Noever 	nhi->pdev = pdev;
95416603153SAndreas Noever 	/* cannot fail - table is allocated bin pcim_iomap_regions */
95516603153SAndreas Noever 	nhi->iobase = pcim_iomap_table(pdev)[0];
95616603153SAndreas Noever 	nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
95719bf4d4fSLukas Wunner 	if (nhi->hop_count != 12 && nhi->hop_count != 32)
95816603153SAndreas Noever 		dev_warn(&pdev->dev, "unexpected hop count: %d\n",
95916603153SAndreas Noever 			 nhi->hop_count);
96016603153SAndreas Noever 
9612a211f32SHimangi Saraogi 	nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
9622a211f32SHimangi Saraogi 				     sizeof(*nhi->tx_rings), GFP_KERNEL);
9632a211f32SHimangi Saraogi 	nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
9642a211f32SHimangi Saraogi 				     sizeof(*nhi->rx_rings), GFP_KERNEL);
96516603153SAndreas Noever 	if (!nhi->tx_rings || !nhi->rx_rings)
96616603153SAndreas Noever 		return -ENOMEM;
96716603153SAndreas Noever 
968046bee1fSMika Westerberg 	res = nhi_init_msi(nhi);
96916603153SAndreas Noever 	if (res) {
970046bee1fSMika Westerberg 		dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
97116603153SAndreas Noever 		return res;
97216603153SAndreas Noever 	}
97316603153SAndreas Noever 
97459120e06SMika Westerberg 	spin_lock_init(&nhi->lock);
97516603153SAndreas Noever 
97616603153SAndreas Noever 	pci_set_master(pdev);
97716603153SAndreas Noever 
978f67cf491SMika Westerberg 	tb = icm_probe(nhi);
9799d3cce0bSMika Westerberg 	if (!tb)
980f67cf491SMika Westerberg 		tb = tb_probe(nhi);
981f67cf491SMika Westerberg 	if (!tb) {
982f67cf491SMika Westerberg 		dev_err(&nhi->pdev->dev,
983f67cf491SMika Westerberg 			"failed to determine connection manager, aborting\n");
9849d3cce0bSMika Westerberg 		return -ENODEV;
985f67cf491SMika Westerberg 	}
986f67cf491SMika Westerberg 
987f67cf491SMika Westerberg 	dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
9889d3cce0bSMika Westerberg 
9899d3cce0bSMika Westerberg 	res = tb_domain_add(tb);
9909d3cce0bSMika Westerberg 	if (res) {
991d6cc51cdSAndreas Noever 		/*
992d6cc51cdSAndreas Noever 		 * At this point the RX/TX rings might already have been
993d6cc51cdSAndreas Noever 		 * activated. Do a proper shutdown.
994d6cc51cdSAndreas Noever 		 */
9959d3cce0bSMika Westerberg 		tb_domain_put(tb);
996d6cc51cdSAndreas Noever 		nhi_shutdown(nhi);
997d6cc51cdSAndreas Noever 		return -EIO;
998d6cc51cdSAndreas Noever 	}
999d6cc51cdSAndreas Noever 	pci_set_drvdata(pdev, tb);
100016603153SAndreas Noever 
100116603153SAndreas Noever 	return 0;
100216603153SAndreas Noever }
100316603153SAndreas Noever 
100416603153SAndreas Noever static void nhi_remove(struct pci_dev *pdev)
100516603153SAndreas Noever {
1006d6cc51cdSAndreas Noever 	struct tb *tb = pci_get_drvdata(pdev);
1007d6cc51cdSAndreas Noever 	struct tb_nhi *nhi = tb->nhi;
10089d3cce0bSMika Westerberg 
10099d3cce0bSMika Westerberg 	tb_domain_remove(tb);
101016603153SAndreas Noever 	nhi_shutdown(nhi);
101116603153SAndreas Noever }
101216603153SAndreas Noever 
101323dd5bb4SAndreas Noever /*
101423dd5bb4SAndreas Noever  * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
101523dd5bb4SAndreas Noever  * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
101623dd5bb4SAndreas Noever  * resume_noirq until we are done.
101723dd5bb4SAndreas Noever  */
101823dd5bb4SAndreas Noever static const struct dev_pm_ops nhi_pm_ops = {
101923dd5bb4SAndreas Noever 	.suspend_noirq = nhi_suspend_noirq,
102023dd5bb4SAndreas Noever 	.resume_noirq = nhi_resume_noirq,
102123dd5bb4SAndreas Noever 	.freeze_noirq = nhi_suspend_noirq, /*
102223dd5bb4SAndreas Noever 					    * we just disable hotplug, the
102323dd5bb4SAndreas Noever 					    * pci-tunnels stay alive.
102423dd5bb4SAndreas Noever 					    */
102523dd5bb4SAndreas Noever 	.restore_noirq = nhi_resume_noirq,
1026f67cf491SMika Westerberg 	.suspend = nhi_suspend,
1027f67cf491SMika Westerberg 	.freeze = nhi_suspend,
1028f67cf491SMika Westerberg 	.poweroff = nhi_suspend,
1029f67cf491SMika Westerberg 	.complete = nhi_complete,
103023dd5bb4SAndreas Noever };
103123dd5bb4SAndreas Noever 
1032620863f7SSachin Kamat static struct pci_device_id nhi_ids[] = {
103316603153SAndreas Noever 	/*
103416603153SAndreas Noever 	 * We have to specify class, the TB bridges use the same device and
10351d111406SLukas Wunner 	 * vendor (sub)id on gen 1 and gen 2 controllers.
103616603153SAndreas Noever 	 */
103716603153SAndreas Noever 	{
103816603153SAndreas Noever 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
10391d111406SLukas Wunner 		.vendor = PCI_VENDOR_ID_INTEL,
104019bf4d4fSLukas Wunner 		.device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
104119bf4d4fSLukas Wunner 		.subvendor = 0x2222, .subdevice = 0x1111,
104219bf4d4fSLukas Wunner 	},
104319bf4d4fSLukas Wunner 	{
104419bf4d4fSLukas Wunner 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
104519bf4d4fSLukas Wunner 		.vendor = PCI_VENDOR_ID_INTEL,
10461d111406SLukas Wunner 		.device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
104716603153SAndreas Noever 		.subvendor = 0x2222, .subdevice = 0x1111,
104816603153SAndreas Noever 	},
104916603153SAndreas Noever 	{
105016603153SAndreas Noever 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
10511d111406SLukas Wunner 		.vendor = PCI_VENDOR_ID_INTEL,
105282a6a81cSXavier Gnata 		.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
105382a6a81cSXavier Gnata 		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
105482a6a81cSXavier Gnata 	},
105582a6a81cSXavier Gnata 	{
105682a6a81cSXavier Gnata 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
105782a6a81cSXavier Gnata 		.vendor = PCI_VENDOR_ID_INTEL,
10581d111406SLukas Wunner 		.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
1059a42fb351SKnuth Posern 		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
106016603153SAndreas Noever 	},
10615e2781bcSMika Westerberg 
10625e2781bcSMika Westerberg 	/* Thunderbolt 3 */
10635e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
10645e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
10655e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
10665e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
10675e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
10685e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
10695e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
10705e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
10715e2781bcSMika Westerberg 
107216603153SAndreas Noever 	{ 0,}
107316603153SAndreas Noever };
107416603153SAndreas Noever 
107516603153SAndreas Noever MODULE_DEVICE_TABLE(pci, nhi_ids);
107616603153SAndreas Noever MODULE_LICENSE("GPL");
107716603153SAndreas Noever 
107816603153SAndreas Noever static struct pci_driver nhi_driver = {
107916603153SAndreas Noever 	.name = "thunderbolt",
108016603153SAndreas Noever 	.id_table = nhi_ids,
108116603153SAndreas Noever 	.probe = nhi_probe,
108216603153SAndreas Noever 	.remove = nhi_remove,
108323dd5bb4SAndreas Noever 	.driver.pm = &nhi_pm_ops,
108416603153SAndreas Noever };
108516603153SAndreas Noever 
108616603153SAndreas Noever static int __init nhi_init(void)
108716603153SAndreas Noever {
10889d3cce0bSMika Westerberg 	int ret;
10899d3cce0bSMika Westerberg 
10909d3cce0bSMika Westerberg 	ret = tb_domain_init();
10919d3cce0bSMika Westerberg 	if (ret)
10929d3cce0bSMika Westerberg 		return ret;
10939d3cce0bSMika Westerberg 	ret = pci_register_driver(&nhi_driver);
10949d3cce0bSMika Westerberg 	if (ret)
10959d3cce0bSMika Westerberg 		tb_domain_exit();
10969d3cce0bSMika Westerberg 	return ret;
109716603153SAndreas Noever }
109816603153SAndreas Noever 
109916603153SAndreas Noever static void __exit nhi_unload(void)
110016603153SAndreas Noever {
110116603153SAndreas Noever 	pci_unregister_driver(&nhi_driver);
11029d3cce0bSMika Westerberg 	tb_domain_exit();
110316603153SAndreas Noever }
110416603153SAndreas Noever 
110516603153SAndreas Noever module_init(nhi_init);
110616603153SAndreas Noever module_exit(nhi_unload);
1107