xref: /openbmc/linux/drivers/thunderbolt/nhi.c (revision 943795219d3cb9f8ce6ce51cad3ffe1f61e95c6b)
109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
216603153SAndreas Noever /*
315c6784cSMika Westerberg  * Thunderbolt driver - NHI driver
416603153SAndreas Noever  *
516603153SAndreas Noever  * The NHI (native host interface) is the pci device that allows us to send and
616603153SAndreas Noever  * receive frames from the thunderbolt bus.
716603153SAndreas Noever  *
816603153SAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
915c6784cSMika Westerberg  * Copyright (C) 2018, Intel Corporation
1016603153SAndreas Noever  */
1116603153SAndreas Noever 
1223dd5bb4SAndreas Noever #include <linux/pm_runtime.h>
1316603153SAndreas Noever #include <linux/slab.h>
1416603153SAndreas Noever #include <linux/errno.h>
1516603153SAndreas Noever #include <linux/pci.h>
1616603153SAndreas Noever #include <linux/interrupt.h>
1716603153SAndreas Noever #include <linux/module.h>
18cd446ee2SMika Westerberg #include <linux/delay.h>
1916603153SAndreas Noever 
2016603153SAndreas Noever #include "nhi.h"
2116603153SAndreas Noever #include "nhi_regs.h"
22d6cc51cdSAndreas Noever #include "tb.h"
2316603153SAndreas Noever 
2416603153SAndreas Noever #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
2516603153SAndreas Noever 
26046bee1fSMika Westerberg /*
279fb1e654SMika Westerberg  * Used to enable end-to-end workaround for missing RX packets. Do not
289fb1e654SMika Westerberg  * use this ring for anything else.
299fb1e654SMika Westerberg  */
309fb1e654SMika Westerberg #define RING_E2E_UNUSED_HOPID	2
310b2863acSMika Westerberg #define RING_FIRST_USABLE_HOPID	TB_PATH_MIN_HOPID
329fb1e654SMika Westerberg 
339fb1e654SMika Westerberg /*
34046bee1fSMika Westerberg  * Minimal number of vectors when we use MSI-X. Two for control channel
35046bee1fSMika Westerberg  * Rx/Tx and the rest four are for cross domain DMA paths.
36046bee1fSMika Westerberg  */
37046bee1fSMika Westerberg #define MSIX_MIN_VECS		6
38046bee1fSMika Westerberg #define MSIX_MAX_VECS		16
3916603153SAndreas Noever 
40cd446ee2SMika Westerberg #define NHI_MAILBOX_TIMEOUT	500 /* ms */
41cd446ee2SMika Westerberg 
4216603153SAndreas Noever static int ring_interrupt_index(struct tb_ring *ring)
4316603153SAndreas Noever {
4416603153SAndreas Noever 	int bit = ring->hop;
4516603153SAndreas Noever 	if (!ring->is_tx)
4616603153SAndreas Noever 		bit += ring->nhi->hop_count;
4716603153SAndreas Noever 	return bit;
4816603153SAndreas Noever }
4916603153SAndreas Noever 
5016603153SAndreas Noever /**
5116603153SAndreas Noever  * ring_interrupt_active() - activate/deactivate interrupts for a single ring
5216603153SAndreas Noever  *
5316603153SAndreas Noever  * ring->nhi->lock must be held.
5416603153SAndreas Noever  */
5516603153SAndreas Noever static void ring_interrupt_active(struct tb_ring *ring, bool active)
5616603153SAndreas Noever {
5719bf4d4fSLukas Wunner 	int reg = REG_RING_INTERRUPT_BASE +
5819bf4d4fSLukas Wunner 		  ring_interrupt_index(ring) / 32 * 4;
5916603153SAndreas Noever 	int bit = ring_interrupt_index(ring) & 31;
6016603153SAndreas Noever 	int mask = 1 << bit;
6116603153SAndreas Noever 	u32 old, new;
62046bee1fSMika Westerberg 
63046bee1fSMika Westerberg 	if (ring->irq > 0) {
64046bee1fSMika Westerberg 		u32 step, shift, ivr, misc;
65046bee1fSMika Westerberg 		void __iomem *ivr_base;
66046bee1fSMika Westerberg 		int index;
67046bee1fSMika Westerberg 
68046bee1fSMika Westerberg 		if (ring->is_tx)
69046bee1fSMika Westerberg 			index = ring->hop;
70046bee1fSMika Westerberg 		else
71046bee1fSMika Westerberg 			index = ring->hop + ring->nhi->hop_count;
72046bee1fSMika Westerberg 
73046bee1fSMika Westerberg 		/*
74046bee1fSMika Westerberg 		 * Ask the hardware to clear interrupt status bits automatically
75046bee1fSMika Westerberg 		 * since we already know which interrupt was triggered.
76046bee1fSMika Westerberg 		 */
77046bee1fSMika Westerberg 		misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
78046bee1fSMika Westerberg 		if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
79046bee1fSMika Westerberg 			misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
80046bee1fSMika Westerberg 			iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
81046bee1fSMika Westerberg 		}
82046bee1fSMika Westerberg 
83046bee1fSMika Westerberg 		ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
84046bee1fSMika Westerberg 		step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
85046bee1fSMika Westerberg 		shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
86046bee1fSMika Westerberg 		ivr = ioread32(ivr_base + step);
87046bee1fSMika Westerberg 		ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
88046bee1fSMika Westerberg 		if (active)
89046bee1fSMika Westerberg 			ivr |= ring->vector << shift;
90046bee1fSMika Westerberg 		iowrite32(ivr, ivr_base + step);
91046bee1fSMika Westerberg 	}
92046bee1fSMika Westerberg 
9316603153SAndreas Noever 	old = ioread32(ring->nhi->iobase + reg);
9416603153SAndreas Noever 	if (active)
9516603153SAndreas Noever 		new = old | mask;
9616603153SAndreas Noever 	else
9716603153SAndreas Noever 		new = old & ~mask;
9816603153SAndreas Noever 
99daa5140fSMika Westerberg 	dev_dbg(&ring->nhi->pdev->dev,
10016603153SAndreas Noever 		"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
10116603153SAndreas Noever 		active ? "enabling" : "disabling", reg, bit, old, new);
10216603153SAndreas Noever 
10316603153SAndreas Noever 	if (new == old)
10416603153SAndreas Noever 		dev_WARN(&ring->nhi->pdev->dev,
10516603153SAndreas Noever 					 "interrupt for %s %d is already %s\n",
10616603153SAndreas Noever 					 RING_TYPE(ring), ring->hop,
10716603153SAndreas Noever 					 active ? "enabled" : "disabled");
10816603153SAndreas Noever 	iowrite32(new, ring->nhi->iobase + reg);
10916603153SAndreas Noever }
11016603153SAndreas Noever 
11116603153SAndreas Noever /**
11216603153SAndreas Noever  * nhi_disable_interrupts() - disable interrupts for all rings
11316603153SAndreas Noever  *
11416603153SAndreas Noever  * Use only during init and shutdown.
11516603153SAndreas Noever  */
11616603153SAndreas Noever static void nhi_disable_interrupts(struct tb_nhi *nhi)
11716603153SAndreas Noever {
11816603153SAndreas Noever 	int i = 0;
11916603153SAndreas Noever 	/* disable interrupts */
12016603153SAndreas Noever 	for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
12116603153SAndreas Noever 		iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
12216603153SAndreas Noever 
12316603153SAndreas Noever 	/* clear interrupt status bits */
12416603153SAndreas Noever 	for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
12516603153SAndreas Noever 		ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
12616603153SAndreas Noever }
12716603153SAndreas Noever 
12816603153SAndreas Noever /* ring helper methods */
12916603153SAndreas Noever 
13016603153SAndreas Noever static void __iomem *ring_desc_base(struct tb_ring *ring)
13116603153SAndreas Noever {
13216603153SAndreas Noever 	void __iomem *io = ring->nhi->iobase;
13316603153SAndreas Noever 	io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
13416603153SAndreas Noever 	io += ring->hop * 16;
13516603153SAndreas Noever 	return io;
13616603153SAndreas Noever }
13716603153SAndreas Noever 
13816603153SAndreas Noever static void __iomem *ring_options_base(struct tb_ring *ring)
13916603153SAndreas Noever {
14016603153SAndreas Noever 	void __iomem *io = ring->nhi->iobase;
14116603153SAndreas Noever 	io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
14216603153SAndreas Noever 	io += ring->hop * 32;
14316603153SAndreas Noever 	return io;
14416603153SAndreas Noever }
14516603153SAndreas Noever 
146*94379521SMika Westerberg static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
14716603153SAndreas Noever {
148*94379521SMika Westerberg 	/*
149*94379521SMika Westerberg 	 * The other 16-bits in the register is read-only and writes to it
150*94379521SMika Westerberg 	 * are ignored by the hardware so we can save one ioread32() by
151*94379521SMika Westerberg 	 * filling the read-only bits with zeroes.
152*94379521SMika Westerberg 	 */
153*94379521SMika Westerberg 	iowrite32(cons, ring_desc_base(ring) + 8);
154*94379521SMika Westerberg }
155*94379521SMika Westerberg 
156*94379521SMika Westerberg static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
157*94379521SMika Westerberg {
158*94379521SMika Westerberg 	/* See ring_iowrite_cons() above for explanation */
159*94379521SMika Westerberg 	iowrite32(prod << 16, ring_desc_base(ring) + 8);
16016603153SAndreas Noever }
16116603153SAndreas Noever 
16216603153SAndreas Noever static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
16316603153SAndreas Noever {
16416603153SAndreas Noever 	iowrite32(value, ring_desc_base(ring) + offset);
16516603153SAndreas Noever }
16616603153SAndreas Noever 
16716603153SAndreas Noever static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
16816603153SAndreas Noever {
16916603153SAndreas Noever 	iowrite32(value, ring_desc_base(ring) + offset);
17016603153SAndreas Noever 	iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
17116603153SAndreas Noever }
17216603153SAndreas Noever 
17316603153SAndreas Noever static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
17416603153SAndreas Noever {
17516603153SAndreas Noever 	iowrite32(value, ring_options_base(ring) + offset);
17616603153SAndreas Noever }
17716603153SAndreas Noever 
17816603153SAndreas Noever static bool ring_full(struct tb_ring *ring)
17916603153SAndreas Noever {
18016603153SAndreas Noever 	return ((ring->head + 1) % ring->size) == ring->tail;
18116603153SAndreas Noever }
18216603153SAndreas Noever 
18316603153SAndreas Noever static bool ring_empty(struct tb_ring *ring)
18416603153SAndreas Noever {
18516603153SAndreas Noever 	return ring->head == ring->tail;
18616603153SAndreas Noever }
18716603153SAndreas Noever 
18816603153SAndreas Noever /**
18916603153SAndreas Noever  * ring_write_descriptors() - post frames from ring->queue to the controller
19016603153SAndreas Noever  *
19116603153SAndreas Noever  * ring->lock is held.
19216603153SAndreas Noever  */
19316603153SAndreas Noever static void ring_write_descriptors(struct tb_ring *ring)
19416603153SAndreas Noever {
19516603153SAndreas Noever 	struct ring_frame *frame, *n;
19616603153SAndreas Noever 	struct ring_desc *descriptor;
19716603153SAndreas Noever 	list_for_each_entry_safe(frame, n, &ring->queue, list) {
19816603153SAndreas Noever 		if (ring_full(ring))
19916603153SAndreas Noever 			break;
20016603153SAndreas Noever 		list_move_tail(&frame->list, &ring->in_flight);
20116603153SAndreas Noever 		descriptor = &ring->descriptors[ring->head];
20216603153SAndreas Noever 		descriptor->phys = frame->buffer_phy;
20316603153SAndreas Noever 		descriptor->time = 0;
20416603153SAndreas Noever 		descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
20516603153SAndreas Noever 		if (ring->is_tx) {
20616603153SAndreas Noever 			descriptor->length = frame->size;
20716603153SAndreas Noever 			descriptor->eof = frame->eof;
20816603153SAndreas Noever 			descriptor->sof = frame->sof;
20916603153SAndreas Noever 		}
21016603153SAndreas Noever 		ring->head = (ring->head + 1) % ring->size;
211*94379521SMika Westerberg 		if (ring->is_tx)
212*94379521SMika Westerberg 			ring_iowrite_prod(ring, ring->head);
213*94379521SMika Westerberg 		else
214*94379521SMika Westerberg 			ring_iowrite_cons(ring, ring->head);
21516603153SAndreas Noever 	}
21616603153SAndreas Noever }
21716603153SAndreas Noever 
21816603153SAndreas Noever /**
21916603153SAndreas Noever  * ring_work() - progress completed frames
22016603153SAndreas Noever  *
22116603153SAndreas Noever  * If the ring is shutting down then all frames are marked as canceled and
22216603153SAndreas Noever  * their callbacks are invoked.
22316603153SAndreas Noever  *
22416603153SAndreas Noever  * Otherwise we collect all completed frame from the ring buffer, write new
22516603153SAndreas Noever  * frame to the ring buffer and invoke the callbacks for the completed frames.
22616603153SAndreas Noever  */
22716603153SAndreas Noever static void ring_work(struct work_struct *work)
22816603153SAndreas Noever {
22916603153SAndreas Noever 	struct tb_ring *ring = container_of(work, typeof(*ring), work);
23016603153SAndreas Noever 	struct ring_frame *frame;
23116603153SAndreas Noever 	bool canceled = false;
23222b7de10SMika Westerberg 	unsigned long flags;
23316603153SAndreas Noever 	LIST_HEAD(done);
23422b7de10SMika Westerberg 
23522b7de10SMika Westerberg 	spin_lock_irqsave(&ring->lock, flags);
23616603153SAndreas Noever 
23716603153SAndreas Noever 	if (!ring->running) {
23816603153SAndreas Noever 		/*  Move all frames to done and mark them as canceled. */
23916603153SAndreas Noever 		list_splice_tail_init(&ring->in_flight, &done);
24016603153SAndreas Noever 		list_splice_tail_init(&ring->queue, &done);
24116603153SAndreas Noever 		canceled = true;
24216603153SAndreas Noever 		goto invoke_callback;
24316603153SAndreas Noever 	}
24416603153SAndreas Noever 
24516603153SAndreas Noever 	while (!ring_empty(ring)) {
24616603153SAndreas Noever 		if (!(ring->descriptors[ring->tail].flags
24716603153SAndreas Noever 				& RING_DESC_COMPLETED))
24816603153SAndreas Noever 			break;
24916603153SAndreas Noever 		frame = list_first_entry(&ring->in_flight, typeof(*frame),
25016603153SAndreas Noever 					 list);
25116603153SAndreas Noever 		list_move_tail(&frame->list, &done);
25216603153SAndreas Noever 		if (!ring->is_tx) {
25316603153SAndreas Noever 			frame->size = ring->descriptors[ring->tail].length;
25416603153SAndreas Noever 			frame->eof = ring->descriptors[ring->tail].eof;
25516603153SAndreas Noever 			frame->sof = ring->descriptors[ring->tail].sof;
25616603153SAndreas Noever 			frame->flags = ring->descriptors[ring->tail].flags;
25716603153SAndreas Noever 		}
25816603153SAndreas Noever 		ring->tail = (ring->tail + 1) % ring->size;
25916603153SAndreas Noever 	}
26016603153SAndreas Noever 	ring_write_descriptors(ring);
26116603153SAndreas Noever 
26216603153SAndreas Noever invoke_callback:
26322b7de10SMika Westerberg 	/* allow callbacks to schedule new work */
26422b7de10SMika Westerberg 	spin_unlock_irqrestore(&ring->lock, flags);
26516603153SAndreas Noever 	while (!list_empty(&done)) {
26616603153SAndreas Noever 		frame = list_first_entry(&done, typeof(*frame), list);
26716603153SAndreas Noever 		/*
26816603153SAndreas Noever 		 * The callback may reenqueue or delete frame.
26916603153SAndreas Noever 		 * Do not hold on to it.
27016603153SAndreas Noever 		 */
27116603153SAndreas Noever 		list_del_init(&frame->list);
2724ffe722eSMika Westerberg 		if (frame->callback)
27316603153SAndreas Noever 			frame->callback(ring, frame, canceled);
27416603153SAndreas Noever 	}
27516603153SAndreas Noever }
27616603153SAndreas Noever 
2773b3d9f4dSMika Westerberg int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
27816603153SAndreas Noever {
27922b7de10SMika Westerberg 	unsigned long flags;
28016603153SAndreas Noever 	int ret = 0;
28122b7de10SMika Westerberg 
28222b7de10SMika Westerberg 	spin_lock_irqsave(&ring->lock, flags);
28316603153SAndreas Noever 	if (ring->running) {
28416603153SAndreas Noever 		list_add_tail(&frame->list, &ring->queue);
28516603153SAndreas Noever 		ring_write_descriptors(ring);
28616603153SAndreas Noever 	} else {
28716603153SAndreas Noever 		ret = -ESHUTDOWN;
28816603153SAndreas Noever 	}
28922b7de10SMika Westerberg 	spin_unlock_irqrestore(&ring->lock, flags);
29016603153SAndreas Noever 	return ret;
29116603153SAndreas Noever }
2923b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
29316603153SAndreas Noever 
2944ffe722eSMika Westerberg /**
2954ffe722eSMika Westerberg  * tb_ring_poll() - Poll one completed frame from the ring
2964ffe722eSMika Westerberg  * @ring: Ring to poll
2974ffe722eSMika Westerberg  *
2984ffe722eSMika Westerberg  * This function can be called when @start_poll callback of the @ring
2994ffe722eSMika Westerberg  * has been called. It will read one completed frame from the ring and
3004ffe722eSMika Westerberg  * return it to the caller. Returns %NULL if there is no more completed
3014ffe722eSMika Westerberg  * frames.
3024ffe722eSMika Westerberg  */
3034ffe722eSMika Westerberg struct ring_frame *tb_ring_poll(struct tb_ring *ring)
3044ffe722eSMika Westerberg {
3054ffe722eSMika Westerberg 	struct ring_frame *frame = NULL;
3064ffe722eSMika Westerberg 	unsigned long flags;
3074ffe722eSMika Westerberg 
3084ffe722eSMika Westerberg 	spin_lock_irqsave(&ring->lock, flags);
3094ffe722eSMika Westerberg 	if (!ring->running)
3104ffe722eSMika Westerberg 		goto unlock;
3114ffe722eSMika Westerberg 	if (ring_empty(ring))
3124ffe722eSMika Westerberg 		goto unlock;
3134ffe722eSMika Westerberg 
3144ffe722eSMika Westerberg 	if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
3154ffe722eSMika Westerberg 		frame = list_first_entry(&ring->in_flight, typeof(*frame),
3164ffe722eSMika Westerberg 					 list);
3174ffe722eSMika Westerberg 		list_del_init(&frame->list);
3184ffe722eSMika Westerberg 
3194ffe722eSMika Westerberg 		if (!ring->is_tx) {
3204ffe722eSMika Westerberg 			frame->size = ring->descriptors[ring->tail].length;
3214ffe722eSMika Westerberg 			frame->eof = ring->descriptors[ring->tail].eof;
3224ffe722eSMika Westerberg 			frame->sof = ring->descriptors[ring->tail].sof;
3234ffe722eSMika Westerberg 			frame->flags = ring->descriptors[ring->tail].flags;
3244ffe722eSMika Westerberg 		}
3254ffe722eSMika Westerberg 
3264ffe722eSMika Westerberg 		ring->tail = (ring->tail + 1) % ring->size;
3274ffe722eSMika Westerberg 	}
3284ffe722eSMika Westerberg 
3294ffe722eSMika Westerberg unlock:
3304ffe722eSMika Westerberg 	spin_unlock_irqrestore(&ring->lock, flags);
3314ffe722eSMika Westerberg 	return frame;
3324ffe722eSMika Westerberg }
3334ffe722eSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_poll);
3344ffe722eSMika Westerberg 
3354ffe722eSMika Westerberg static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
3364ffe722eSMika Westerberg {
3374ffe722eSMika Westerberg 	int idx = ring_interrupt_index(ring);
3384ffe722eSMika Westerberg 	int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
3394ffe722eSMika Westerberg 	int bit = idx % 32;
3404ffe722eSMika Westerberg 	u32 val;
3414ffe722eSMika Westerberg 
3424ffe722eSMika Westerberg 	val = ioread32(ring->nhi->iobase + reg);
3434ffe722eSMika Westerberg 	if (mask)
3444ffe722eSMika Westerberg 		val &= ~BIT(bit);
3454ffe722eSMika Westerberg 	else
3464ffe722eSMika Westerberg 		val |= BIT(bit);
3474ffe722eSMika Westerberg 	iowrite32(val, ring->nhi->iobase + reg);
3484ffe722eSMika Westerberg }
3494ffe722eSMika Westerberg 
3504ffe722eSMika Westerberg /* Both @nhi->lock and @ring->lock should be held */
3514ffe722eSMika Westerberg static void __ring_interrupt(struct tb_ring *ring)
3524ffe722eSMika Westerberg {
3534ffe722eSMika Westerberg 	if (!ring->running)
3544ffe722eSMika Westerberg 		return;
3554ffe722eSMika Westerberg 
3564ffe722eSMika Westerberg 	if (ring->start_poll) {
35774657181SMika Westerberg 		__ring_interrupt_mask(ring, true);
3584ffe722eSMika Westerberg 		ring->start_poll(ring->poll_data);
3594ffe722eSMika Westerberg 	} else {
3604ffe722eSMika Westerberg 		schedule_work(&ring->work);
3614ffe722eSMika Westerberg 	}
3624ffe722eSMika Westerberg }
3634ffe722eSMika Westerberg 
3644ffe722eSMika Westerberg /**
3654ffe722eSMika Westerberg  * tb_ring_poll_complete() - Re-start interrupt for the ring
3664ffe722eSMika Westerberg  * @ring: Ring to re-start the interrupt
3674ffe722eSMika Westerberg  *
3684ffe722eSMika Westerberg  * This will re-start (unmask) the ring interrupt once the user is done
3694ffe722eSMika Westerberg  * with polling.
3704ffe722eSMika Westerberg  */
3714ffe722eSMika Westerberg void tb_ring_poll_complete(struct tb_ring *ring)
3724ffe722eSMika Westerberg {
3734ffe722eSMika Westerberg 	unsigned long flags;
3744ffe722eSMika Westerberg 
3754ffe722eSMika Westerberg 	spin_lock_irqsave(&ring->nhi->lock, flags);
3764ffe722eSMika Westerberg 	spin_lock(&ring->lock);
3774ffe722eSMika Westerberg 	if (ring->start_poll)
3784ffe722eSMika Westerberg 		__ring_interrupt_mask(ring, false);
3794ffe722eSMika Westerberg 	spin_unlock(&ring->lock);
3804ffe722eSMika Westerberg 	spin_unlock_irqrestore(&ring->nhi->lock, flags);
3814ffe722eSMika Westerberg }
3824ffe722eSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
3834ffe722eSMika Westerberg 
384046bee1fSMika Westerberg static irqreturn_t ring_msix(int irq, void *data)
385046bee1fSMika Westerberg {
386046bee1fSMika Westerberg 	struct tb_ring *ring = data;
387046bee1fSMika Westerberg 
3884ffe722eSMika Westerberg 	spin_lock(&ring->nhi->lock);
3894ffe722eSMika Westerberg 	spin_lock(&ring->lock);
3904ffe722eSMika Westerberg 	__ring_interrupt(ring);
3914ffe722eSMika Westerberg 	spin_unlock(&ring->lock);
3924ffe722eSMika Westerberg 	spin_unlock(&ring->nhi->lock);
3934ffe722eSMika Westerberg 
394046bee1fSMika Westerberg 	return IRQ_HANDLED;
395046bee1fSMika Westerberg }
396046bee1fSMika Westerberg 
397046bee1fSMika Westerberg static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
398046bee1fSMika Westerberg {
399046bee1fSMika Westerberg 	struct tb_nhi *nhi = ring->nhi;
400046bee1fSMika Westerberg 	unsigned long irqflags;
401046bee1fSMika Westerberg 	int ret;
402046bee1fSMika Westerberg 
403046bee1fSMika Westerberg 	if (!nhi->pdev->msix_enabled)
404046bee1fSMika Westerberg 		return 0;
405046bee1fSMika Westerberg 
406046bee1fSMika Westerberg 	ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
407046bee1fSMika Westerberg 	if (ret < 0)
408046bee1fSMika Westerberg 		return ret;
409046bee1fSMika Westerberg 
410046bee1fSMika Westerberg 	ring->vector = ret;
411046bee1fSMika Westerberg 
412046bee1fSMika Westerberg 	ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
413046bee1fSMika Westerberg 	if (ring->irq < 0)
414046bee1fSMika Westerberg 		return ring->irq;
415046bee1fSMika Westerberg 
416046bee1fSMika Westerberg 	irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
417046bee1fSMika Westerberg 	return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
418046bee1fSMika Westerberg }
419046bee1fSMika Westerberg 
420046bee1fSMika Westerberg static void ring_release_msix(struct tb_ring *ring)
421046bee1fSMika Westerberg {
422046bee1fSMika Westerberg 	if (ring->irq <= 0)
423046bee1fSMika Westerberg 		return;
424046bee1fSMika Westerberg 
425046bee1fSMika Westerberg 	free_irq(ring->irq, ring);
426046bee1fSMika Westerberg 	ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
427046bee1fSMika Westerberg 	ring->vector = 0;
428046bee1fSMika Westerberg 	ring->irq = 0;
429046bee1fSMika Westerberg }
430046bee1fSMika Westerberg 
4319a01c7c2SMika Westerberg static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
4329a01c7c2SMika Westerberg {
4339a01c7c2SMika Westerberg 	int ret = 0;
4349a01c7c2SMika Westerberg 
4359a01c7c2SMika Westerberg 	spin_lock_irq(&nhi->lock);
4369a01c7c2SMika Westerberg 
4379a01c7c2SMika Westerberg 	if (ring->hop < 0) {
4389a01c7c2SMika Westerberg 		unsigned int i;
4399a01c7c2SMika Westerberg 
4409a01c7c2SMika Westerberg 		/*
4419a01c7c2SMika Westerberg 		 * Automatically allocate HopID from the non-reserved
4429a01c7c2SMika Westerberg 		 * range 8 .. hop_count - 1.
4439a01c7c2SMika Westerberg 		 */
4449a01c7c2SMika Westerberg 		for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
4459a01c7c2SMika Westerberg 			if (ring->is_tx) {
4469a01c7c2SMika Westerberg 				if (!nhi->tx_rings[i]) {
4479a01c7c2SMika Westerberg 					ring->hop = i;
4489a01c7c2SMika Westerberg 					break;
4499a01c7c2SMika Westerberg 				}
4509a01c7c2SMika Westerberg 			} else {
4519a01c7c2SMika Westerberg 				if (!nhi->rx_rings[i]) {
4529a01c7c2SMika Westerberg 					ring->hop = i;
4539a01c7c2SMika Westerberg 					break;
4549a01c7c2SMika Westerberg 				}
4559a01c7c2SMika Westerberg 			}
4569a01c7c2SMika Westerberg 		}
4579a01c7c2SMika Westerberg 	}
4589a01c7c2SMika Westerberg 
4599a01c7c2SMika Westerberg 	if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
4609a01c7c2SMika Westerberg 		dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
4619a01c7c2SMika Westerberg 		ret = -EINVAL;
4629a01c7c2SMika Westerberg 		goto err_unlock;
4639a01c7c2SMika Westerberg 	}
4649a01c7c2SMika Westerberg 	if (ring->is_tx && nhi->tx_rings[ring->hop]) {
4659a01c7c2SMika Westerberg 		dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
4669a01c7c2SMika Westerberg 			 ring->hop);
4679a01c7c2SMika Westerberg 		ret = -EBUSY;
4689a01c7c2SMika Westerberg 		goto err_unlock;
4699a01c7c2SMika Westerberg 	} else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
4709a01c7c2SMika Westerberg 		dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
4719a01c7c2SMika Westerberg 			 ring->hop);
4729a01c7c2SMika Westerberg 		ret = -EBUSY;
4739a01c7c2SMika Westerberg 		goto err_unlock;
4749a01c7c2SMika Westerberg 	}
4759a01c7c2SMika Westerberg 
4769a01c7c2SMika Westerberg 	if (ring->is_tx)
4779a01c7c2SMika Westerberg 		nhi->tx_rings[ring->hop] = ring;
4789a01c7c2SMika Westerberg 	else
4799a01c7c2SMika Westerberg 		nhi->rx_rings[ring->hop] = ring;
4809a01c7c2SMika Westerberg 
4819a01c7c2SMika Westerberg err_unlock:
4829a01c7c2SMika Westerberg 	spin_unlock_irq(&nhi->lock);
4839a01c7c2SMika Westerberg 
4849a01c7c2SMika Westerberg 	return ret;
4859a01c7c2SMika Westerberg }
4869a01c7c2SMika Westerberg 
4873b3d9f4dSMika Westerberg static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
4889fb1e654SMika Westerberg 				     bool transmit, unsigned int flags,
4894ffe722eSMika Westerberg 				     u16 sof_mask, u16 eof_mask,
4904ffe722eSMika Westerberg 				     void (*start_poll)(void *),
4914ffe722eSMika Westerberg 				     void *poll_data)
49216603153SAndreas Noever {
49316603153SAndreas Noever 	struct tb_ring *ring = NULL;
494daa5140fSMika Westerberg 
495daa5140fSMika Westerberg 	dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
49616603153SAndreas Noever 		transmit ? "TX" : "RX", hop, size);
49716603153SAndreas Noever 
4989fb1e654SMika Westerberg 	/* Tx Ring 2 is reserved for E2E workaround */
4999fb1e654SMika Westerberg 	if (transmit && hop == RING_E2E_UNUSED_HOPID)
5009fb1e654SMika Westerberg 		return NULL;
5019fb1e654SMika Westerberg 
50216603153SAndreas Noever 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
50316603153SAndreas Noever 	if (!ring)
50459120e06SMika Westerberg 		return NULL;
50516603153SAndreas Noever 
50622b7de10SMika Westerberg 	spin_lock_init(&ring->lock);
50716603153SAndreas Noever 	INIT_LIST_HEAD(&ring->queue);
50816603153SAndreas Noever 	INIT_LIST_HEAD(&ring->in_flight);
50916603153SAndreas Noever 	INIT_WORK(&ring->work, ring_work);
51016603153SAndreas Noever 
51116603153SAndreas Noever 	ring->nhi = nhi;
51216603153SAndreas Noever 	ring->hop = hop;
51316603153SAndreas Noever 	ring->is_tx = transmit;
51416603153SAndreas Noever 	ring->size = size;
515046bee1fSMika Westerberg 	ring->flags = flags;
5169fb1e654SMika Westerberg 	ring->sof_mask = sof_mask;
5179fb1e654SMika Westerberg 	ring->eof_mask = eof_mask;
51816603153SAndreas Noever 	ring->head = 0;
51916603153SAndreas Noever 	ring->tail = 0;
52016603153SAndreas Noever 	ring->running = false;
5214ffe722eSMika Westerberg 	ring->start_poll = start_poll;
5224ffe722eSMika Westerberg 	ring->poll_data = poll_data;
523046bee1fSMika Westerberg 
52416603153SAndreas Noever 	ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
52516603153SAndreas Noever 			size * sizeof(*ring->descriptors),
52616603153SAndreas Noever 			&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
52716603153SAndreas Noever 	if (!ring->descriptors)
52859120e06SMika Westerberg 		goto err_free_ring;
52916603153SAndreas Noever 
53059120e06SMika Westerberg 	if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
53159120e06SMika Westerberg 		goto err_free_descs;
53259120e06SMika Westerberg 
5339a01c7c2SMika Westerberg 	if (nhi_alloc_hop(nhi, ring))
53459120e06SMika Westerberg 		goto err_release_msix;
53559120e06SMika Westerberg 
53616603153SAndreas Noever 	return ring;
53716603153SAndreas Noever 
53859120e06SMika Westerberg err_release_msix:
53959120e06SMika Westerberg 	ring_release_msix(ring);
54059120e06SMika Westerberg err_free_descs:
54159120e06SMika Westerberg 	dma_free_coherent(&ring->nhi->pdev->dev,
54259120e06SMika Westerberg 			  ring->size * sizeof(*ring->descriptors),
54359120e06SMika Westerberg 			  ring->descriptors, ring->descriptors_dma);
54459120e06SMika Westerberg err_free_ring:
54516603153SAndreas Noever 	kfree(ring);
54659120e06SMika Westerberg 
54716603153SAndreas Noever 	return NULL;
54816603153SAndreas Noever }
54916603153SAndreas Noever 
5503b3d9f4dSMika Westerberg /**
5513b3d9f4dSMika Westerberg  * tb_ring_alloc_tx() - Allocate DMA ring for transmit
5523b3d9f4dSMika Westerberg  * @nhi: Pointer to the NHI the ring is to be allocated
5533b3d9f4dSMika Westerberg  * @hop: HopID (ring) to allocate
5543b3d9f4dSMika Westerberg  * @size: Number of entries in the ring
5553b3d9f4dSMika Westerberg  * @flags: Flags for the ring
5563b3d9f4dSMika Westerberg  */
5573b3d9f4dSMika Westerberg struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
558046bee1fSMika Westerberg 				 unsigned int flags)
55916603153SAndreas Noever {
5604ffe722eSMika Westerberg 	return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
56116603153SAndreas Noever }
5623b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
56316603153SAndreas Noever 
56416603153SAndreas Noever /**
5653b3d9f4dSMika Westerberg  * tb_ring_alloc_rx() - Allocate DMA ring for receive
5663b3d9f4dSMika Westerberg  * @nhi: Pointer to the NHI the ring is to be allocated
5679a01c7c2SMika Westerberg  * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
5683b3d9f4dSMika Westerberg  * @size: Number of entries in the ring
5693b3d9f4dSMika Westerberg  * @flags: Flags for the ring
5703b3d9f4dSMika Westerberg  * @sof_mask: Mask of PDF values that start a frame
5713b3d9f4dSMika Westerberg  * @eof_mask: Mask of PDF values that end a frame
5724ffe722eSMika Westerberg  * @start_poll: If not %NULL the ring will call this function when an
5734ffe722eSMika Westerberg  *		interrupt is triggered and masked, instead of callback
5744ffe722eSMika Westerberg  *		in each Rx frame.
5754ffe722eSMika Westerberg  * @poll_data: Optional data passed to @start_poll
57616603153SAndreas Noever  */
5773b3d9f4dSMika Westerberg struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
5784ffe722eSMika Westerberg 				 unsigned int flags, u16 sof_mask, u16 eof_mask,
5794ffe722eSMika Westerberg 				 void (*start_poll)(void *), void *poll_data)
5803b3d9f4dSMika Westerberg {
5814ffe722eSMika Westerberg 	return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
5824ffe722eSMika Westerberg 			     start_poll, poll_data);
5833b3d9f4dSMika Westerberg }
5843b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
5853b3d9f4dSMika Westerberg 
5863b3d9f4dSMika Westerberg /**
5873b3d9f4dSMika Westerberg  * tb_ring_start() - enable a ring
5883b3d9f4dSMika Westerberg  *
5893b3d9f4dSMika Westerberg  * Must not be invoked in parallel with tb_ring_stop().
5903b3d9f4dSMika Westerberg  */
5913b3d9f4dSMika Westerberg void tb_ring_start(struct tb_ring *ring)
59216603153SAndreas Noever {
5939fb1e654SMika Westerberg 	u16 frame_size;
5949fb1e654SMika Westerberg 	u32 flags;
5959fb1e654SMika Westerberg 
59659120e06SMika Westerberg 	spin_lock_irq(&ring->nhi->lock);
59759120e06SMika Westerberg 	spin_lock(&ring->lock);
598bdccf295SMika Westerberg 	if (ring->nhi->going_away)
599bdccf295SMika Westerberg 		goto err;
60016603153SAndreas Noever 	if (ring->running) {
60116603153SAndreas Noever 		dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
60216603153SAndreas Noever 		goto err;
60316603153SAndreas Noever 	}
604daa5140fSMika Westerberg 	dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
60516603153SAndreas Noever 		RING_TYPE(ring), ring->hop);
60616603153SAndreas Noever 
6079fb1e654SMika Westerberg 	if (ring->flags & RING_FLAG_FRAME) {
6089fb1e654SMika Westerberg 		/* Means 4096 */
6099fb1e654SMika Westerberg 		frame_size = 0;
6109fb1e654SMika Westerberg 		flags = RING_FLAG_ENABLE;
6119fb1e654SMika Westerberg 	} else {
6129fb1e654SMika Westerberg 		frame_size = TB_FRAME_SIZE;
6139fb1e654SMika Westerberg 		flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
6149fb1e654SMika Westerberg 	}
6159fb1e654SMika Westerberg 
6169fb1e654SMika Westerberg 	if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
6179fb1e654SMika Westerberg 		u32 hop;
6189fb1e654SMika Westerberg 
6199fb1e654SMika Westerberg 		/*
6209fb1e654SMika Westerberg 		 * In order not to lose Rx packets we enable end-to-end
6219fb1e654SMika Westerberg 		 * workaround which transfers Rx credits to an unused Tx
6229fb1e654SMika Westerberg 		 * HopID.
6239fb1e654SMika Westerberg 		 */
6249fb1e654SMika Westerberg 		hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT;
6259fb1e654SMika Westerberg 		hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
6269fb1e654SMika Westerberg 		flags |= hop | RING_FLAG_E2E_FLOW_CONTROL;
6279fb1e654SMika Westerberg 	}
6289fb1e654SMika Westerberg 
62916603153SAndreas Noever 	ring_iowrite64desc(ring, ring->descriptors_dma, 0);
63016603153SAndreas Noever 	if (ring->is_tx) {
63116603153SAndreas Noever 		ring_iowrite32desc(ring, ring->size, 12);
63216603153SAndreas Noever 		ring_iowrite32options(ring, 0, 4); /* time releated ? */
6339fb1e654SMika Westerberg 		ring_iowrite32options(ring, flags, 0);
63416603153SAndreas Noever 	} else {
6359fb1e654SMika Westerberg 		u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
6369fb1e654SMika Westerberg 
6379fb1e654SMika Westerberg 		ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
6389fb1e654SMika Westerberg 		ring_iowrite32options(ring, sof_eof_mask, 4);
6399fb1e654SMika Westerberg 		ring_iowrite32options(ring, flags, 0);
64016603153SAndreas Noever 	}
64116603153SAndreas Noever 	ring_interrupt_active(ring, true);
64216603153SAndreas Noever 	ring->running = true;
64316603153SAndreas Noever err:
64459120e06SMika Westerberg 	spin_unlock(&ring->lock);
64559120e06SMika Westerberg 	spin_unlock_irq(&ring->nhi->lock);
64616603153SAndreas Noever }
6473b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_start);
64816603153SAndreas Noever 
64916603153SAndreas Noever /**
6503b3d9f4dSMika Westerberg  * tb_ring_stop() - shutdown a ring
65116603153SAndreas Noever  *
65216603153SAndreas Noever  * Must not be invoked from a callback.
65316603153SAndreas Noever  *
6543b3d9f4dSMika Westerberg  * This method will disable the ring. Further calls to
6553b3d9f4dSMika Westerberg  * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
6563b3d9f4dSMika Westerberg  * called.
65716603153SAndreas Noever  *
65816603153SAndreas Noever  * All enqueued frames will be canceled and their callbacks will be executed
65916603153SAndreas Noever  * with frame->canceled set to true (on the callback thread). This method
66016603153SAndreas Noever  * returns only after all callback invocations have finished.
66116603153SAndreas Noever  */
6623b3d9f4dSMika Westerberg void tb_ring_stop(struct tb_ring *ring)
66316603153SAndreas Noever {
66459120e06SMika Westerberg 	spin_lock_irq(&ring->nhi->lock);
66559120e06SMika Westerberg 	spin_lock(&ring->lock);
666daa5140fSMika Westerberg 	dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
66716603153SAndreas Noever 		RING_TYPE(ring), ring->hop);
668bdccf295SMika Westerberg 	if (ring->nhi->going_away)
669bdccf295SMika Westerberg 		goto err;
67016603153SAndreas Noever 	if (!ring->running) {
67116603153SAndreas Noever 		dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
67216603153SAndreas Noever 			 RING_TYPE(ring), ring->hop);
67316603153SAndreas Noever 		goto err;
67416603153SAndreas Noever 	}
67516603153SAndreas Noever 	ring_interrupt_active(ring, false);
67616603153SAndreas Noever 
67716603153SAndreas Noever 	ring_iowrite32options(ring, 0, 0);
67816603153SAndreas Noever 	ring_iowrite64desc(ring, 0, 0);
679*94379521SMika Westerberg 	ring_iowrite32desc(ring, 0, 8);
68016603153SAndreas Noever 	ring_iowrite32desc(ring, 0, 12);
68116603153SAndreas Noever 	ring->head = 0;
68216603153SAndreas Noever 	ring->tail = 0;
68316603153SAndreas Noever 	ring->running = false;
68416603153SAndreas Noever 
68516603153SAndreas Noever err:
68659120e06SMika Westerberg 	spin_unlock(&ring->lock);
68759120e06SMika Westerberg 	spin_unlock_irq(&ring->nhi->lock);
68816603153SAndreas Noever 
68916603153SAndreas Noever 	/*
69016603153SAndreas Noever 	 * schedule ring->work to invoke callbacks on all remaining frames.
69116603153SAndreas Noever 	 */
69216603153SAndreas Noever 	schedule_work(&ring->work);
69316603153SAndreas Noever 	flush_work(&ring->work);
69416603153SAndreas Noever }
6953b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_stop);
69616603153SAndreas Noever 
69716603153SAndreas Noever /*
6983b3d9f4dSMika Westerberg  * tb_ring_free() - free ring
69916603153SAndreas Noever  *
70016603153SAndreas Noever  * When this method returns all invocations of ring->callback will have
70116603153SAndreas Noever  * finished.
70216603153SAndreas Noever  *
70316603153SAndreas Noever  * Ring must be stopped.
70416603153SAndreas Noever  *
70516603153SAndreas Noever  * Must NOT be called from ring_frame->callback!
70616603153SAndreas Noever  */
7073b3d9f4dSMika Westerberg void tb_ring_free(struct tb_ring *ring)
70816603153SAndreas Noever {
70959120e06SMika Westerberg 	spin_lock_irq(&ring->nhi->lock);
71016603153SAndreas Noever 	/*
71116603153SAndreas Noever 	 * Dissociate the ring from the NHI. This also ensures that
71216603153SAndreas Noever 	 * nhi_interrupt_work cannot reschedule ring->work.
71316603153SAndreas Noever 	 */
71416603153SAndreas Noever 	if (ring->is_tx)
71516603153SAndreas Noever 		ring->nhi->tx_rings[ring->hop] = NULL;
71616603153SAndreas Noever 	else
71716603153SAndreas Noever 		ring->nhi->rx_rings[ring->hop] = NULL;
71816603153SAndreas Noever 
71916603153SAndreas Noever 	if (ring->running) {
72016603153SAndreas Noever 		dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
72116603153SAndreas Noever 			 RING_TYPE(ring), ring->hop);
72216603153SAndreas Noever 	}
7234ffe722eSMika Westerberg 	spin_unlock_irq(&ring->nhi->lock);
72416603153SAndreas Noever 
725046bee1fSMika Westerberg 	ring_release_msix(ring);
726046bee1fSMika Westerberg 
72716603153SAndreas Noever 	dma_free_coherent(&ring->nhi->pdev->dev,
72816603153SAndreas Noever 			  ring->size * sizeof(*ring->descriptors),
72916603153SAndreas Noever 			  ring->descriptors, ring->descriptors_dma);
73016603153SAndreas Noever 
731f19b72c6SSachin Kamat 	ring->descriptors = NULL;
73216603153SAndreas Noever 	ring->descriptors_dma = 0;
73316603153SAndreas Noever 
73416603153SAndreas Noever 
735daa5140fSMika Westerberg 	dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
73616603153SAndreas Noever 		ring->hop);
73716603153SAndreas Noever 
73816603153SAndreas Noever 	/**
739046bee1fSMika Westerberg 	 * ring->work can no longer be scheduled (it is scheduled only
740046bee1fSMika Westerberg 	 * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
741046bee1fSMika Westerberg 	 * to finish before freeing the ring.
74216603153SAndreas Noever 	 */
74316603153SAndreas Noever 	flush_work(&ring->work);
74416603153SAndreas Noever 	kfree(ring);
74516603153SAndreas Noever }
7463b3d9f4dSMika Westerberg EXPORT_SYMBOL_GPL(tb_ring_free);
74716603153SAndreas Noever 
748cd446ee2SMika Westerberg /**
749cd446ee2SMika Westerberg  * nhi_mailbox_cmd() - Send a command through NHI mailbox
750cd446ee2SMika Westerberg  * @nhi: Pointer to the NHI structure
751cd446ee2SMika Westerberg  * @cmd: Command to send
752cd446ee2SMika Westerberg  * @data: Data to be send with the command
753cd446ee2SMika Westerberg  *
754cd446ee2SMika Westerberg  * Sends mailbox command to the firmware running on NHI. Returns %0 in
755cd446ee2SMika Westerberg  * case of success and negative errno in case of failure.
756cd446ee2SMika Westerberg  */
757cd446ee2SMika Westerberg int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
758cd446ee2SMika Westerberg {
759cd446ee2SMika Westerberg 	ktime_t timeout;
760cd446ee2SMika Westerberg 	u32 val;
761cd446ee2SMika Westerberg 
762cd446ee2SMika Westerberg 	iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
763cd446ee2SMika Westerberg 
764cd446ee2SMika Westerberg 	val = ioread32(nhi->iobase + REG_INMAIL_CMD);
765cd446ee2SMika Westerberg 	val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
766cd446ee2SMika Westerberg 	val |= REG_INMAIL_OP_REQUEST | cmd;
767cd446ee2SMika Westerberg 	iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
768cd446ee2SMika Westerberg 
769cd446ee2SMika Westerberg 	timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
770cd446ee2SMika Westerberg 	do {
771cd446ee2SMika Westerberg 		val = ioread32(nhi->iobase + REG_INMAIL_CMD);
772cd446ee2SMika Westerberg 		if (!(val & REG_INMAIL_OP_REQUEST))
773cd446ee2SMika Westerberg 			break;
774cd446ee2SMika Westerberg 		usleep_range(10, 20);
775cd446ee2SMika Westerberg 	} while (ktime_before(ktime_get(), timeout));
776cd446ee2SMika Westerberg 
777cd446ee2SMika Westerberg 	if (val & REG_INMAIL_OP_REQUEST)
778cd446ee2SMika Westerberg 		return -ETIMEDOUT;
779cd446ee2SMika Westerberg 	if (val & REG_INMAIL_ERROR)
780cd446ee2SMika Westerberg 		return -EIO;
781cd446ee2SMika Westerberg 
782cd446ee2SMika Westerberg 	return 0;
783cd446ee2SMika Westerberg }
784cd446ee2SMika Westerberg 
785cd446ee2SMika Westerberg /**
786cd446ee2SMika Westerberg  * nhi_mailbox_mode() - Return current firmware operation mode
787cd446ee2SMika Westerberg  * @nhi: Pointer to the NHI structure
788cd446ee2SMika Westerberg  *
789cd446ee2SMika Westerberg  * The function reads current firmware operation mode using NHI mailbox
790cd446ee2SMika Westerberg  * registers and returns it to the caller.
791cd446ee2SMika Westerberg  */
792cd446ee2SMika Westerberg enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
793cd446ee2SMika Westerberg {
794cd446ee2SMika Westerberg 	u32 val;
795cd446ee2SMika Westerberg 
796cd446ee2SMika Westerberg 	val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
797cd446ee2SMika Westerberg 	val &= REG_OUTMAIL_CMD_OPMODE_MASK;
798cd446ee2SMika Westerberg 	val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
799cd446ee2SMika Westerberg 
800cd446ee2SMika Westerberg 	return (enum nhi_fw_mode)val;
801cd446ee2SMika Westerberg }
802cd446ee2SMika Westerberg 
80316603153SAndreas Noever static void nhi_interrupt_work(struct work_struct *work)
80416603153SAndreas Noever {
80516603153SAndreas Noever 	struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
80616603153SAndreas Noever 	int value = 0; /* Suppress uninitialized usage warning. */
80716603153SAndreas Noever 	int bit;
80816603153SAndreas Noever 	int hop = -1;
80916603153SAndreas Noever 	int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
81016603153SAndreas Noever 	struct tb_ring *ring;
81116603153SAndreas Noever 
81259120e06SMika Westerberg 	spin_lock_irq(&nhi->lock);
81316603153SAndreas Noever 
81416603153SAndreas Noever 	/*
81516603153SAndreas Noever 	 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
81616603153SAndreas Noever 	 * (TX, RX, RX overflow). We iterate over the bits and read a new
81716603153SAndreas Noever 	 * dwords as required. The registers are cleared on read.
81816603153SAndreas Noever 	 */
81916603153SAndreas Noever 	for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
82016603153SAndreas Noever 		if (bit % 32 == 0)
82116603153SAndreas Noever 			value = ioread32(nhi->iobase
82216603153SAndreas Noever 					 + REG_RING_NOTIFY_BASE
82316603153SAndreas Noever 					 + 4 * (bit / 32));
82416603153SAndreas Noever 		if (++hop == nhi->hop_count) {
82516603153SAndreas Noever 			hop = 0;
82616603153SAndreas Noever 			type++;
82716603153SAndreas Noever 		}
82816603153SAndreas Noever 		if ((value & (1 << (bit % 32))) == 0)
82916603153SAndreas Noever 			continue;
83016603153SAndreas Noever 		if (type == 2) {
83116603153SAndreas Noever 			dev_warn(&nhi->pdev->dev,
83216603153SAndreas Noever 				 "RX overflow for ring %d\n",
83316603153SAndreas Noever 				 hop);
83416603153SAndreas Noever 			continue;
83516603153SAndreas Noever 		}
83616603153SAndreas Noever 		if (type == 0)
83716603153SAndreas Noever 			ring = nhi->tx_rings[hop];
83816603153SAndreas Noever 		else
83916603153SAndreas Noever 			ring = nhi->rx_rings[hop];
84016603153SAndreas Noever 		if (ring == NULL) {
84116603153SAndreas Noever 			dev_warn(&nhi->pdev->dev,
84216603153SAndreas Noever 				 "got interrupt for inactive %s ring %d\n",
84316603153SAndreas Noever 				 type ? "RX" : "TX",
84416603153SAndreas Noever 				 hop);
84516603153SAndreas Noever 			continue;
84616603153SAndreas Noever 		}
8474ffe722eSMika Westerberg 
8484ffe722eSMika Westerberg 		spin_lock(&ring->lock);
8494ffe722eSMika Westerberg 		__ring_interrupt(ring);
8504ffe722eSMika Westerberg 		spin_unlock(&ring->lock);
85116603153SAndreas Noever 	}
85259120e06SMika Westerberg 	spin_unlock_irq(&nhi->lock);
85316603153SAndreas Noever }
85416603153SAndreas Noever 
85516603153SAndreas Noever static irqreturn_t nhi_msi(int irq, void *data)
85616603153SAndreas Noever {
85716603153SAndreas Noever 	struct tb_nhi *nhi = data;
85816603153SAndreas Noever 	schedule_work(&nhi->interrupt_work);
85916603153SAndreas Noever 	return IRQ_HANDLED;
86016603153SAndreas Noever }
86116603153SAndreas Noever 
86223dd5bb4SAndreas Noever static int nhi_suspend_noirq(struct device *dev)
86323dd5bb4SAndreas Noever {
86423dd5bb4SAndreas Noever 	struct pci_dev *pdev = to_pci_dev(dev);
86523dd5bb4SAndreas Noever 	struct tb *tb = pci_get_drvdata(pdev);
8669d3cce0bSMika Westerberg 
8679d3cce0bSMika Westerberg 	return tb_domain_suspend_noirq(tb);
86823dd5bb4SAndreas Noever }
86923dd5bb4SAndreas Noever 
8708c6bba10SMika Westerberg static void nhi_enable_int_throttling(struct tb_nhi *nhi)
8718c6bba10SMika Westerberg {
8728c6bba10SMika Westerberg 	/* Throttling is specified in 256ns increments */
8738c6bba10SMika Westerberg 	u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
8748c6bba10SMika Westerberg 	unsigned int i;
8758c6bba10SMika Westerberg 
8768c6bba10SMika Westerberg 	/*
8778c6bba10SMika Westerberg 	 * Configure interrupt throttling for all vectors even if we
8788c6bba10SMika Westerberg 	 * only use few.
8798c6bba10SMika Westerberg 	 */
8808c6bba10SMika Westerberg 	for (i = 0; i < MSIX_MAX_VECS; i++) {
8818c6bba10SMika Westerberg 		u32 reg = REG_INT_THROTTLING_RATE + i * 4;
8828c6bba10SMika Westerberg 		iowrite32(throttle, nhi->iobase + reg);
8838c6bba10SMika Westerberg 	}
8848c6bba10SMika Westerberg }
8858c6bba10SMika Westerberg 
88623dd5bb4SAndreas Noever static int nhi_resume_noirq(struct device *dev)
88723dd5bb4SAndreas Noever {
88823dd5bb4SAndreas Noever 	struct pci_dev *pdev = to_pci_dev(dev);
88923dd5bb4SAndreas Noever 	struct tb *tb = pci_get_drvdata(pdev);
8909d3cce0bSMika Westerberg 
891bdccf295SMika Westerberg 	/*
892bdccf295SMika Westerberg 	 * Check that the device is still there. It may be that the user
893bdccf295SMika Westerberg 	 * unplugged last device which causes the host controller to go
894bdccf295SMika Westerberg 	 * away on PCs.
895bdccf295SMika Westerberg 	 */
896bdccf295SMika Westerberg 	if (!pci_device_is_present(pdev))
897bdccf295SMika Westerberg 		tb->nhi->going_away = true;
8988c6bba10SMika Westerberg 	else
8998c6bba10SMika Westerberg 		nhi_enable_int_throttling(tb->nhi);
900bdccf295SMika Westerberg 
9019d3cce0bSMika Westerberg 	return tb_domain_resume_noirq(tb);
90223dd5bb4SAndreas Noever }
90323dd5bb4SAndreas Noever 
904f67cf491SMika Westerberg static int nhi_suspend(struct device *dev)
905f67cf491SMika Westerberg {
906f67cf491SMika Westerberg 	struct pci_dev *pdev = to_pci_dev(dev);
907f67cf491SMika Westerberg 	struct tb *tb = pci_get_drvdata(pdev);
908f67cf491SMika Westerberg 
909f67cf491SMika Westerberg 	return tb_domain_suspend(tb);
910f67cf491SMika Westerberg }
911f67cf491SMika Westerberg 
912f67cf491SMika Westerberg static void nhi_complete(struct device *dev)
913f67cf491SMika Westerberg {
914f67cf491SMika Westerberg 	struct pci_dev *pdev = to_pci_dev(dev);
915f67cf491SMika Westerberg 	struct tb *tb = pci_get_drvdata(pdev);
916f67cf491SMika Westerberg 
9172d8ff0b5SMika Westerberg 	/*
9182d8ff0b5SMika Westerberg 	 * If we were runtime suspended when system suspend started,
9192d8ff0b5SMika Westerberg 	 * schedule runtime resume now. It should bring the domain back
9202d8ff0b5SMika Westerberg 	 * to functional state.
9212d8ff0b5SMika Westerberg 	 */
9222d8ff0b5SMika Westerberg 	if (pm_runtime_suspended(&pdev->dev))
9232d8ff0b5SMika Westerberg 		pm_runtime_resume(&pdev->dev);
9242d8ff0b5SMika Westerberg 	else
925f67cf491SMika Westerberg 		tb_domain_complete(tb);
926f67cf491SMika Westerberg }
927f67cf491SMika Westerberg 
9282d8ff0b5SMika Westerberg static int nhi_runtime_suspend(struct device *dev)
9292d8ff0b5SMika Westerberg {
9302d8ff0b5SMika Westerberg 	struct pci_dev *pdev = to_pci_dev(dev);
9312d8ff0b5SMika Westerberg 	struct tb *tb = pci_get_drvdata(pdev);
9322d8ff0b5SMika Westerberg 
9332d8ff0b5SMika Westerberg 	return tb_domain_runtime_suspend(tb);
9342d8ff0b5SMika Westerberg }
9352d8ff0b5SMika Westerberg 
9362d8ff0b5SMika Westerberg static int nhi_runtime_resume(struct device *dev)
9372d8ff0b5SMika Westerberg {
9382d8ff0b5SMika Westerberg 	struct pci_dev *pdev = to_pci_dev(dev);
9392d8ff0b5SMika Westerberg 	struct tb *tb = pci_get_drvdata(pdev);
9402d8ff0b5SMika Westerberg 
9412d8ff0b5SMika Westerberg 	nhi_enable_int_throttling(tb->nhi);
9422d8ff0b5SMika Westerberg 	return tb_domain_runtime_resume(tb);
9432d8ff0b5SMika Westerberg }
9442d8ff0b5SMika Westerberg 
94516603153SAndreas Noever static void nhi_shutdown(struct tb_nhi *nhi)
94616603153SAndreas Noever {
94716603153SAndreas Noever 	int i;
948daa5140fSMika Westerberg 
949daa5140fSMika Westerberg 	dev_dbg(&nhi->pdev->dev, "shutdown\n");
95016603153SAndreas Noever 
95116603153SAndreas Noever 	for (i = 0; i < nhi->hop_count; i++) {
95216603153SAndreas Noever 		if (nhi->tx_rings[i])
95316603153SAndreas Noever 			dev_WARN(&nhi->pdev->dev,
95416603153SAndreas Noever 				 "TX ring %d is still active\n", i);
95516603153SAndreas Noever 		if (nhi->rx_rings[i])
95616603153SAndreas Noever 			dev_WARN(&nhi->pdev->dev,
95716603153SAndreas Noever 				 "RX ring %d is still active\n", i);
95816603153SAndreas Noever 	}
95916603153SAndreas Noever 	nhi_disable_interrupts(nhi);
96016603153SAndreas Noever 	/*
96116603153SAndreas Noever 	 * We have to release the irq before calling flush_work. Otherwise an
96216603153SAndreas Noever 	 * already executing IRQ handler could call schedule_work again.
96316603153SAndreas Noever 	 */
964046bee1fSMika Westerberg 	if (!nhi->pdev->msix_enabled) {
96516603153SAndreas Noever 		devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
96616603153SAndreas Noever 		flush_work(&nhi->interrupt_work);
967046bee1fSMika Westerberg 	}
968046bee1fSMika Westerberg 	ida_destroy(&nhi->msix_ida);
969046bee1fSMika Westerberg }
970046bee1fSMika Westerberg 
971046bee1fSMika Westerberg static int nhi_init_msi(struct tb_nhi *nhi)
972046bee1fSMika Westerberg {
973046bee1fSMika Westerberg 	struct pci_dev *pdev = nhi->pdev;
974046bee1fSMika Westerberg 	int res, irq, nvec;
975046bee1fSMika Westerberg 
976046bee1fSMika Westerberg 	/* In case someone left them on. */
977046bee1fSMika Westerberg 	nhi_disable_interrupts(nhi);
978046bee1fSMika Westerberg 
9798c6bba10SMika Westerberg 	nhi_enable_int_throttling(nhi);
9808c6bba10SMika Westerberg 
981046bee1fSMika Westerberg 	ida_init(&nhi->msix_ida);
982046bee1fSMika Westerberg 
983046bee1fSMika Westerberg 	/*
984046bee1fSMika Westerberg 	 * The NHI has 16 MSI-X vectors or a single MSI. We first try to
985046bee1fSMika Westerberg 	 * get all MSI-X vectors and if we succeed, each ring will have
986046bee1fSMika Westerberg 	 * one MSI-X. If for some reason that does not work out, we
987046bee1fSMika Westerberg 	 * fallback to a single MSI.
988046bee1fSMika Westerberg 	 */
989046bee1fSMika Westerberg 	nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
990046bee1fSMika Westerberg 				     PCI_IRQ_MSIX);
991046bee1fSMika Westerberg 	if (nvec < 0) {
992046bee1fSMika Westerberg 		nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
993046bee1fSMika Westerberg 		if (nvec < 0)
994046bee1fSMika Westerberg 			return nvec;
995046bee1fSMika Westerberg 
996046bee1fSMika Westerberg 		INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
997046bee1fSMika Westerberg 
998046bee1fSMika Westerberg 		irq = pci_irq_vector(nhi->pdev, 0);
999046bee1fSMika Westerberg 		if (irq < 0)
1000046bee1fSMika Westerberg 			return irq;
1001046bee1fSMika Westerberg 
1002046bee1fSMika Westerberg 		res = devm_request_irq(&pdev->dev, irq, nhi_msi,
1003046bee1fSMika Westerberg 				       IRQF_NO_SUSPEND, "thunderbolt", nhi);
1004046bee1fSMika Westerberg 		if (res) {
1005046bee1fSMika Westerberg 			dev_err(&pdev->dev, "request_irq failed, aborting\n");
1006046bee1fSMika Westerberg 			return res;
1007046bee1fSMika Westerberg 		}
1008046bee1fSMika Westerberg 	}
1009046bee1fSMika Westerberg 
1010046bee1fSMika Westerberg 	return 0;
101116603153SAndreas Noever }
101216603153SAndreas Noever 
101316603153SAndreas Noever static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
101416603153SAndreas Noever {
101516603153SAndreas Noever 	struct tb_nhi *nhi;
1016d6cc51cdSAndreas Noever 	struct tb *tb;
101716603153SAndreas Noever 	int res;
101816603153SAndreas Noever 
101916603153SAndreas Noever 	res = pcim_enable_device(pdev);
102016603153SAndreas Noever 	if (res) {
102116603153SAndreas Noever 		dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
102216603153SAndreas Noever 		return res;
102316603153SAndreas Noever 	}
102416603153SAndreas Noever 
102516603153SAndreas Noever 	res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
102616603153SAndreas Noever 	if (res) {
102716603153SAndreas Noever 		dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
102816603153SAndreas Noever 		return res;
102916603153SAndreas Noever 	}
103016603153SAndreas Noever 
103116603153SAndreas Noever 	nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
103216603153SAndreas Noever 	if (!nhi)
103316603153SAndreas Noever 		return -ENOMEM;
103416603153SAndreas Noever 
103516603153SAndreas Noever 	nhi->pdev = pdev;
103616603153SAndreas Noever 	/* cannot fail - table is allocated bin pcim_iomap_regions */
103716603153SAndreas Noever 	nhi->iobase = pcim_iomap_table(pdev)[0];
103816603153SAndreas Noever 	nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
103919bf4d4fSLukas Wunner 	if (nhi->hop_count != 12 && nhi->hop_count != 32)
104016603153SAndreas Noever 		dev_warn(&pdev->dev, "unexpected hop count: %d\n",
104116603153SAndreas Noever 			 nhi->hop_count);
104216603153SAndreas Noever 
10432a211f32SHimangi Saraogi 	nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
10442a211f32SHimangi Saraogi 				     sizeof(*nhi->tx_rings), GFP_KERNEL);
10452a211f32SHimangi Saraogi 	nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
10462a211f32SHimangi Saraogi 				     sizeof(*nhi->rx_rings), GFP_KERNEL);
104716603153SAndreas Noever 	if (!nhi->tx_rings || !nhi->rx_rings)
104816603153SAndreas Noever 		return -ENOMEM;
104916603153SAndreas Noever 
1050046bee1fSMika Westerberg 	res = nhi_init_msi(nhi);
105116603153SAndreas Noever 	if (res) {
1052046bee1fSMika Westerberg 		dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
105316603153SAndreas Noever 		return res;
105416603153SAndreas Noever 	}
105516603153SAndreas Noever 
105659120e06SMika Westerberg 	spin_lock_init(&nhi->lock);
105716603153SAndreas Noever 
1058dba3caf6SMika Westerberg 	res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1059dba3caf6SMika Westerberg 	if (res)
1060dba3caf6SMika Westerberg 		res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1061dba3caf6SMika Westerberg 	if (res) {
1062dba3caf6SMika Westerberg 		dev_err(&pdev->dev, "failed to set DMA mask\n");
1063dba3caf6SMika Westerberg 		return res;
1064dba3caf6SMika Westerberg 	}
1065dba3caf6SMika Westerberg 
106616603153SAndreas Noever 	pci_set_master(pdev);
106716603153SAndreas Noever 
1068f67cf491SMika Westerberg 	tb = icm_probe(nhi);
10699d3cce0bSMika Westerberg 	if (!tb)
1070f67cf491SMika Westerberg 		tb = tb_probe(nhi);
1071f67cf491SMika Westerberg 	if (!tb) {
1072f67cf491SMika Westerberg 		dev_err(&nhi->pdev->dev,
1073f67cf491SMika Westerberg 			"failed to determine connection manager, aborting\n");
10749d3cce0bSMika Westerberg 		return -ENODEV;
1075f67cf491SMika Westerberg 	}
1076f67cf491SMika Westerberg 
1077daa5140fSMika Westerberg 	dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
10789d3cce0bSMika Westerberg 
10799d3cce0bSMika Westerberg 	res = tb_domain_add(tb);
10809d3cce0bSMika Westerberg 	if (res) {
1081d6cc51cdSAndreas Noever 		/*
1082d6cc51cdSAndreas Noever 		 * At this point the RX/TX rings might already have been
1083d6cc51cdSAndreas Noever 		 * activated. Do a proper shutdown.
1084d6cc51cdSAndreas Noever 		 */
10859d3cce0bSMika Westerberg 		tb_domain_put(tb);
1086d6cc51cdSAndreas Noever 		nhi_shutdown(nhi);
108768a7a2acSMika Westerberg 		return res;
1088d6cc51cdSAndreas Noever 	}
1089d6cc51cdSAndreas Noever 	pci_set_drvdata(pdev, tb);
109016603153SAndreas Noever 
10912d8ff0b5SMika Westerberg 	pm_runtime_allow(&pdev->dev);
10922d8ff0b5SMika Westerberg 	pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
10932d8ff0b5SMika Westerberg 	pm_runtime_use_autosuspend(&pdev->dev);
10942d8ff0b5SMika Westerberg 	pm_runtime_put_autosuspend(&pdev->dev);
10952d8ff0b5SMika Westerberg 
109616603153SAndreas Noever 	return 0;
109716603153SAndreas Noever }
109816603153SAndreas Noever 
109916603153SAndreas Noever static void nhi_remove(struct pci_dev *pdev)
110016603153SAndreas Noever {
1101d6cc51cdSAndreas Noever 	struct tb *tb = pci_get_drvdata(pdev);
1102d6cc51cdSAndreas Noever 	struct tb_nhi *nhi = tb->nhi;
11039d3cce0bSMika Westerberg 
11042d8ff0b5SMika Westerberg 	pm_runtime_get_sync(&pdev->dev);
11052d8ff0b5SMika Westerberg 	pm_runtime_dont_use_autosuspend(&pdev->dev);
11062d8ff0b5SMika Westerberg 	pm_runtime_forbid(&pdev->dev);
11072d8ff0b5SMika Westerberg 
11089d3cce0bSMika Westerberg 	tb_domain_remove(tb);
110916603153SAndreas Noever 	nhi_shutdown(nhi);
111016603153SAndreas Noever }
111116603153SAndreas Noever 
111223dd5bb4SAndreas Noever /*
111323dd5bb4SAndreas Noever  * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
111423dd5bb4SAndreas Noever  * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
111523dd5bb4SAndreas Noever  * resume_noirq until we are done.
111623dd5bb4SAndreas Noever  */
111723dd5bb4SAndreas Noever static const struct dev_pm_ops nhi_pm_ops = {
111823dd5bb4SAndreas Noever 	.suspend_noirq = nhi_suspend_noirq,
111923dd5bb4SAndreas Noever 	.resume_noirq = nhi_resume_noirq,
112023dd5bb4SAndreas Noever 	.freeze_noirq = nhi_suspend_noirq, /*
112123dd5bb4SAndreas Noever 					    * we just disable hotplug, the
112223dd5bb4SAndreas Noever 					    * pci-tunnels stay alive.
112323dd5bb4SAndreas Noever 					    */
1124f2a659f7SMika Westerberg 	.thaw_noirq = nhi_resume_noirq,
112523dd5bb4SAndreas Noever 	.restore_noirq = nhi_resume_noirq,
1126f67cf491SMika Westerberg 	.suspend = nhi_suspend,
1127f67cf491SMika Westerberg 	.freeze = nhi_suspend,
1128f67cf491SMika Westerberg 	.poweroff = nhi_suspend,
1129f67cf491SMika Westerberg 	.complete = nhi_complete,
11302d8ff0b5SMika Westerberg 	.runtime_suspend = nhi_runtime_suspend,
11312d8ff0b5SMika Westerberg 	.runtime_resume = nhi_runtime_resume,
113223dd5bb4SAndreas Noever };
113323dd5bb4SAndreas Noever 
1134620863f7SSachin Kamat static struct pci_device_id nhi_ids[] = {
113516603153SAndreas Noever 	/*
113616603153SAndreas Noever 	 * We have to specify class, the TB bridges use the same device and
11371d111406SLukas Wunner 	 * vendor (sub)id on gen 1 and gen 2 controllers.
113816603153SAndreas Noever 	 */
113916603153SAndreas Noever 	{
114016603153SAndreas Noever 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
11411d111406SLukas Wunner 		.vendor = PCI_VENDOR_ID_INTEL,
114219bf4d4fSLukas Wunner 		.device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
114319bf4d4fSLukas Wunner 		.subvendor = 0x2222, .subdevice = 0x1111,
114419bf4d4fSLukas Wunner 	},
114519bf4d4fSLukas Wunner 	{
114619bf4d4fSLukas Wunner 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
114719bf4d4fSLukas Wunner 		.vendor = PCI_VENDOR_ID_INTEL,
11481d111406SLukas Wunner 		.device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
114916603153SAndreas Noever 		.subvendor = 0x2222, .subdevice = 0x1111,
115016603153SAndreas Noever 	},
115116603153SAndreas Noever 	{
115216603153SAndreas Noever 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
11531d111406SLukas Wunner 		.vendor = PCI_VENDOR_ID_INTEL,
115482a6a81cSXavier Gnata 		.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
115582a6a81cSXavier Gnata 		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
115682a6a81cSXavier Gnata 	},
115782a6a81cSXavier Gnata 	{
115882a6a81cSXavier Gnata 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
115982a6a81cSXavier Gnata 		.vendor = PCI_VENDOR_ID_INTEL,
11601d111406SLukas Wunner 		.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
1161a42fb351SKnuth Posern 		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
116216603153SAndreas Noever 	},
11635e2781bcSMika Westerberg 
11645e2781bcSMika Westerberg 	/* Thunderbolt 3 */
11655e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
11665e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
11675e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
11685e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
11695e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
11705e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
11715e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
11725e2781bcSMika Westerberg 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
11734bac471dSRadion Mirchevsky 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
11744bac471dSRadion Mirchevsky 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
11755e2781bcSMika Westerberg 
117616603153SAndreas Noever 	{ 0,}
117716603153SAndreas Noever };
117816603153SAndreas Noever 
117916603153SAndreas Noever MODULE_DEVICE_TABLE(pci, nhi_ids);
118016603153SAndreas Noever MODULE_LICENSE("GPL");
118116603153SAndreas Noever 
118216603153SAndreas Noever static struct pci_driver nhi_driver = {
118316603153SAndreas Noever 	.name = "thunderbolt",
118416603153SAndreas Noever 	.id_table = nhi_ids,
118516603153SAndreas Noever 	.probe = nhi_probe,
118616603153SAndreas Noever 	.remove = nhi_remove,
118723dd5bb4SAndreas Noever 	.driver.pm = &nhi_pm_ops,
118816603153SAndreas Noever };
118916603153SAndreas Noever 
119016603153SAndreas Noever static int __init nhi_init(void)
119116603153SAndreas Noever {
11929d3cce0bSMika Westerberg 	int ret;
11939d3cce0bSMika Westerberg 
11949d3cce0bSMika Westerberg 	ret = tb_domain_init();
11959d3cce0bSMika Westerberg 	if (ret)
11969d3cce0bSMika Westerberg 		return ret;
11979d3cce0bSMika Westerberg 	ret = pci_register_driver(&nhi_driver);
11989d3cce0bSMika Westerberg 	if (ret)
11999d3cce0bSMika Westerberg 		tb_domain_exit();
12009d3cce0bSMika Westerberg 	return ret;
120116603153SAndreas Noever }
120216603153SAndreas Noever 
120316603153SAndreas Noever static void __exit nhi_unload(void)
120416603153SAndreas Noever {
120516603153SAndreas Noever 	pci_unregister_driver(&nhi_driver);
12069d3cce0bSMika Westerberg 	tb_domain_exit();
120716603153SAndreas Noever }
120816603153SAndreas Noever 
1209eafa717bSMika Westerberg rootfs_initcall(nhi_init);
121016603153SAndreas Noever module_exit(nhi_unload);
1211