xref: /openbmc/linux/drivers/net/wireless/ath/ath12k/pci.c (revision c005e2f62f8421b13b9a31adb9db7281f1a19e68)
1  // SPDX-License-Identifier: BSD-3-Clause-Clear
2  /*
3   * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
4   * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5   */
6  
7  #include <linux/module.h>
8  #include <linux/msi.h>
9  #include <linux/pci.h>
10  
11  #include "pci.h"
12  #include "core.h"
13  #include "hif.h"
14  #include "mhi.h"
15  #include "debug.h"
16  
17  #define ATH12K_PCI_BAR_NUM		0
18  #define ATH12K_PCI_DMA_MASK		32
19  
20  #define ATH12K_PCI_IRQ_CE0_OFFSET		3
21  
22  #define WINDOW_ENABLE_BIT		0x40000000
23  #define WINDOW_REG_ADDRESS		0x310c
24  #define WINDOW_VALUE_MASK		GENMASK(24, 19)
25  #define WINDOW_START			0x80000
26  #define WINDOW_RANGE_MASK		GENMASK(18, 0)
27  #define WINDOW_STATIC_MASK		GENMASK(31, 6)
28  
29  #define TCSR_SOC_HW_VERSION		0x1B00000
30  #define TCSR_SOC_HW_VERSION_MAJOR_MASK	GENMASK(11, 8)
31  #define TCSR_SOC_HW_VERSION_MINOR_MASK	GENMASK(7, 4)
32  
33  /* BAR0 + 4k is always accessible, and no
34   * need to force wakeup.
35   * 4K - 32 = 0xFE0
36   */
37  #define ACCESS_ALWAYS_OFF 0xFE0
38  
39  #define QCN9274_DEVICE_ID		0x1109
40  #define WCN7850_DEVICE_ID		0x1107
41  
42  static const struct pci_device_id ath12k_pci_id_table[] = {
43  	{ PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
44  	{ PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
45  	{0}
46  };
47  
48  MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table);
49  
50  /* TODO: revisit IRQ mapping for new SRNG's */
51  static const struct ath12k_msi_config ath12k_msi_config[] = {
52  	{
53  		.total_vectors = 16,
54  		.total_users = 3,
55  		.users = (struct ath12k_msi_user[]) {
56  			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
57  			{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
58  			{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
59  		},
60  	},
61  };
62  
63  static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
64  	"bhi",
65  	"mhi-er0",
66  	"mhi-er1",
67  	"ce0",
68  	"ce1",
69  	"ce2",
70  	"ce3",
71  	"ce4",
72  	"ce5",
73  	"ce6",
74  	"ce7",
75  	"ce8",
76  	"ce9",
77  	"ce10",
78  	"ce11",
79  	"ce12",
80  	"ce13",
81  	"ce14",
82  	"ce15",
83  	"host2wbm-desc-feed",
84  	"host2reo-re-injection",
85  	"host2reo-command",
86  	"host2rxdma-monitor-ring3",
87  	"host2rxdma-monitor-ring2",
88  	"host2rxdma-monitor-ring1",
89  	"reo2ost-exception",
90  	"wbm2host-rx-release",
91  	"reo2host-status",
92  	"reo2host-destination-ring4",
93  	"reo2host-destination-ring3",
94  	"reo2host-destination-ring2",
95  	"reo2host-destination-ring1",
96  	"rxdma2host-monitor-destination-mac3",
97  	"rxdma2host-monitor-destination-mac2",
98  	"rxdma2host-monitor-destination-mac1",
99  	"ppdu-end-interrupts-mac3",
100  	"ppdu-end-interrupts-mac2",
101  	"ppdu-end-interrupts-mac1",
102  	"rxdma2host-monitor-status-ring-mac3",
103  	"rxdma2host-monitor-status-ring-mac2",
104  	"rxdma2host-monitor-status-ring-mac1",
105  	"host2rxdma-host-buf-ring-mac3",
106  	"host2rxdma-host-buf-ring-mac2",
107  	"host2rxdma-host-buf-ring-mac1",
108  	"rxdma2host-destination-ring-mac3",
109  	"rxdma2host-destination-ring-mac2",
110  	"rxdma2host-destination-ring-mac1",
111  	"host2tcl-input-ring4",
112  	"host2tcl-input-ring3",
113  	"host2tcl-input-ring2",
114  	"host2tcl-input-ring1",
115  	"wbm2host-tx-completions-ring4",
116  	"wbm2host-tx-completions-ring3",
117  	"wbm2host-tx-completions-ring2",
118  	"wbm2host-tx-completions-ring1",
119  	"tcl2host-status-ring",
120  };
121  
ath12k_pci_bus_wake_up(struct ath12k_base * ab)122  static int ath12k_pci_bus_wake_up(struct ath12k_base *ab)
123  {
124  	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
125  
126  	return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
127  }
128  
ath12k_pci_bus_release(struct ath12k_base * ab)129  static void ath12k_pci_bus_release(struct ath12k_base *ab)
130  {
131  	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
132  
133  	mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
134  }
135  
136  static const struct ath12k_pci_ops ath12k_pci_ops_qcn9274 = {
137  	.wakeup = NULL,
138  	.release = NULL,
139  };
140  
141  static const struct ath12k_pci_ops ath12k_pci_ops_wcn7850 = {
142  	.wakeup = ath12k_pci_bus_wake_up,
143  	.release = ath12k_pci_bus_release,
144  };
145  
ath12k_pci_select_window(struct ath12k_pci * ab_pci,u32 offset)146  static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset)
147  {
148  	struct ath12k_base *ab = ab_pci->ab;
149  
150  	u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK);
151  	u32 static_window;
152  
153  	lockdep_assert_held(&ab_pci->window_lock);
154  
155  	/* Preserve the static window configuration and reset only dynamic window */
156  	static_window = ab_pci->register_window & WINDOW_STATIC_MASK;
157  	window |= static_window;
158  
159  	if (window != ab_pci->register_window) {
160  		iowrite32(WINDOW_ENABLE_BIT | window,
161  			  ab->mem + WINDOW_REG_ADDRESS);
162  		ioread32(ab->mem + WINDOW_REG_ADDRESS);
163  		ab_pci->register_window = window;
164  	}
165  }
166  
ath12k_pci_select_static_window(struct ath12k_pci * ab_pci)167  static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci)
168  {
169  	u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK);
170  	u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK);
171  	u32 window;
172  
173  	window = (umac_window << 12) | (ce_window << 6);
174  
175  	spin_lock_bh(&ab_pci->window_lock);
176  	ab_pci->register_window = window;
177  	spin_unlock_bh(&ab_pci->window_lock);
178  
179  	iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS);
180  }
181  
ath12k_pci_get_window_start(struct ath12k_base * ab,u32 offset)182  static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
183  				       u32 offset)
184  {
185  	u32 window_start;
186  
187  	/* If offset lies within DP register range, use 3rd window */
188  	if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
189  		window_start = 3 * WINDOW_START;
190  	/* If offset lies within CE register range, use 2nd window */
191  	else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
192  		window_start = 2 * WINDOW_START;
193  	/* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE
194  	 * use 0th window
195  	 */
196  	else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) &&
197  		 !((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK))
198  		window_start = 0;
199  	else
200  		window_start = WINDOW_START;
201  
202  	return window_start;
203  }
204  
ath12k_pci_soc_global_reset(struct ath12k_base * ab)205  static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
206  {
207  	u32 val, delay;
208  
209  	val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
210  
211  	val |= PCIE_SOC_GLOBAL_RESET_V;
212  
213  	ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
214  
215  	/* TODO: exact time to sleep is uncertain */
216  	delay = 10;
217  	mdelay(delay);
218  
219  	/* Need to toggle V bit back otherwise stuck in reset status */
220  	val &= ~PCIE_SOC_GLOBAL_RESET_V;
221  
222  	ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
223  
224  	mdelay(delay);
225  
226  	val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
227  	if (val == 0xffffffff)
228  		ath12k_warn(ab, "link down error during global reset\n");
229  }
230  
ath12k_pci_clear_dbg_registers(struct ath12k_base * ab)231  static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab)
232  {
233  	u32 val;
234  
235  	/* read cookie */
236  	val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
237  	ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val);
238  
239  	val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
240  	ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
241  
242  	/* TODO: exact time to sleep is uncertain */
243  	mdelay(10);
244  
245  	/* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
246  	 * continuing warm path and entering dead loop.
247  	 */
248  	ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
249  	mdelay(10);
250  
251  	val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
252  	ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
253  
254  	/* A read clear register. clear the register to prevent
255  	 * Q6 from entering wrong code path.
256  	 */
257  	val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
258  	ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val);
259  }
260  
ath12k_pci_enable_ltssm(struct ath12k_base * ab)261  static void ath12k_pci_enable_ltssm(struct ath12k_base *ab)
262  {
263  	u32 val;
264  	int i;
265  
266  	val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
267  
268  	/* PCIE link seems very unstable after the Hot Reset*/
269  	for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
270  		if (val == 0xffffffff)
271  			mdelay(5);
272  
273  		ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
274  		val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
275  	}
276  
277  	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val);
278  
279  	val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
280  	val |= GCC_GCC_PCIE_HOT_RST_VAL;
281  	ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
282  	val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
283  
284  	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
285  
286  	mdelay(5);
287  }
288  
ath12k_pci_clear_all_intrs(struct ath12k_base * ab)289  static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab)
290  {
291  	/* This is a WAR for PCIE Hotreset.
292  	 * When target receive Hotreset, but will set the interrupt.
293  	 * So when download SBL again, SBL will open Interrupt and
294  	 * receive it, and crash immediately.
295  	 */
296  	ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
297  }
298  
ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base * ab)299  static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab)
300  {
301  	u32 val;
302  
303  	val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
304  	val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
305  	ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
306  }
307  
ath12k_pci_force_wake(struct ath12k_base * ab)308  static void ath12k_pci_force_wake(struct ath12k_base *ab)
309  {
310  	ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
311  	mdelay(5);
312  }
313  
ath12k_pci_sw_reset(struct ath12k_base * ab,bool power_on)314  static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on)
315  {
316  	if (power_on) {
317  		ath12k_pci_enable_ltssm(ab);
318  		ath12k_pci_clear_all_intrs(ab);
319  		ath12k_pci_set_wlaon_pwr_ctrl(ab);
320  	}
321  
322  	ath12k_mhi_clear_vector(ab);
323  	ath12k_pci_clear_dbg_registers(ab);
324  	ath12k_pci_soc_global_reset(ab);
325  	ath12k_mhi_set_mhictrl_reset(ab);
326  }
327  
ath12k_pci_free_ext_irq(struct ath12k_base * ab)328  static void ath12k_pci_free_ext_irq(struct ath12k_base *ab)
329  {
330  	int i, j;
331  
332  	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
333  		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
334  
335  		for (j = 0; j < irq_grp->num_irq; j++)
336  			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
337  
338  		netif_napi_del(&irq_grp->napi);
339  	}
340  }
341  
ath12k_pci_free_irq(struct ath12k_base * ab)342  static void ath12k_pci_free_irq(struct ath12k_base *ab)
343  {
344  	int i, irq_idx;
345  
346  	for (i = 0; i < ab->hw_params->ce_count; i++) {
347  		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
348  			continue;
349  		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
350  		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
351  	}
352  
353  	ath12k_pci_free_ext_irq(ab);
354  }
355  
ath12k_pci_ce_irq_enable(struct ath12k_base * ab,u16 ce_id)356  static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
357  {
358  	u32 irq_idx;
359  
360  	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
361  	enable_irq(ab->irq_num[irq_idx]);
362  }
363  
ath12k_pci_ce_irq_disable(struct ath12k_base * ab,u16 ce_id)364  static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
365  {
366  	u32 irq_idx;
367  
368  	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
369  	disable_irq_nosync(ab->irq_num[irq_idx]);
370  }
371  
ath12k_pci_ce_irqs_disable(struct ath12k_base * ab)372  static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab)
373  {
374  	int i;
375  
376  	clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
377  
378  	for (i = 0; i < ab->hw_params->ce_count; i++) {
379  		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
380  			continue;
381  		ath12k_pci_ce_irq_disable(ab, i);
382  	}
383  }
384  
ath12k_pci_sync_ce_irqs(struct ath12k_base * ab)385  static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab)
386  {
387  	int i;
388  	int irq_idx;
389  
390  	for (i = 0; i < ab->hw_params->ce_count; i++) {
391  		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
392  			continue;
393  
394  		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
395  		synchronize_irq(ab->irq_num[irq_idx]);
396  	}
397  }
398  
ath12k_pci_ce_tasklet(struct tasklet_struct * t)399  static void ath12k_pci_ce_tasklet(struct tasklet_struct *t)
400  {
401  	struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
402  
403  	ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
404  
405  	ath12k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
406  }
407  
ath12k_pci_ce_interrupt_handler(int irq,void * arg)408  static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
409  {
410  	struct ath12k_ce_pipe *ce_pipe = arg;
411  	struct ath12k_base *ab = ce_pipe->ab;
412  
413  	if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
414  		return IRQ_HANDLED;
415  
416  	/* last interrupt received for this CE */
417  	ce_pipe->timestamp = jiffies;
418  
419  	ath12k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
420  	tasklet_schedule(&ce_pipe->intr_tq);
421  
422  	return IRQ_HANDLED;
423  }
424  
ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp * irq_grp)425  static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
426  {
427  	int i;
428  
429  	for (i = 0; i < irq_grp->num_irq; i++)
430  		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
431  }
432  
__ath12k_pci_ext_irq_disable(struct ath12k_base * ab)433  static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
434  {
435  	int i;
436  
437  	if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
438  		return;
439  
440  	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
441  		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
442  
443  		ath12k_pci_ext_grp_disable(irq_grp);
444  
445  		napi_synchronize(&irq_grp->napi);
446  		napi_disable(&irq_grp->napi);
447  	}
448  }
449  
ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp * irq_grp)450  static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
451  {
452  	int i;
453  
454  	for (i = 0; i < irq_grp->num_irq; i++)
455  		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
456  }
457  
ath12k_pci_sync_ext_irqs(struct ath12k_base * ab)458  static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab)
459  {
460  	int i, j, irq_idx;
461  
462  	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
463  		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
464  
465  		for (j = 0; j < irq_grp->num_irq; j++) {
466  			irq_idx = irq_grp->irqs[j];
467  			synchronize_irq(ab->irq_num[irq_idx]);
468  		}
469  	}
470  }
471  
ath12k_pci_ext_grp_napi_poll(struct napi_struct * napi,int budget)472  static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
473  {
474  	struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
475  						struct ath12k_ext_irq_grp,
476  						napi);
477  	struct ath12k_base *ab = irq_grp->ab;
478  	int work_done;
479  
480  	work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
481  	if (work_done < budget) {
482  		napi_complete_done(napi, work_done);
483  		ath12k_pci_ext_grp_enable(irq_grp);
484  	}
485  
486  	if (work_done > budget)
487  		work_done = budget;
488  
489  	return work_done;
490  }
491  
ath12k_pci_ext_interrupt_handler(int irq,void * arg)492  static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
493  {
494  	struct ath12k_ext_irq_grp *irq_grp = arg;
495  	struct ath12k_base *ab = irq_grp->ab;
496  
497  	if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
498  		return IRQ_HANDLED;
499  
500  	ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
501  
502  	/* last interrupt received for this group */
503  	irq_grp->timestamp = jiffies;
504  
505  	ath12k_pci_ext_grp_disable(irq_grp);
506  
507  	napi_schedule(&irq_grp->napi);
508  
509  	return IRQ_HANDLED;
510  }
511  
ath12k_pci_ext_irq_config(struct ath12k_base * ab)512  static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
513  {
514  	int i, j, ret, num_vectors = 0;
515  	u32 user_base_data = 0, base_vector = 0, base_idx;
516  
517  	base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
518  	ret = ath12k_pci_get_user_msi_assignment(ab, "DP",
519  						 &num_vectors,
520  						 &user_base_data,
521  						 &base_vector);
522  	if (ret < 0)
523  		return ret;
524  
525  	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
526  		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
527  		u32 num_irq = 0;
528  
529  		irq_grp->ab = ab;
530  		irq_grp->grp_id = i;
531  		init_dummy_netdev(&irq_grp->napi_ndev);
532  		netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
533  			       ath12k_pci_ext_grp_napi_poll);
534  
535  		if (ab->hw_params->ring_mask->tx[i] ||
536  		    ab->hw_params->ring_mask->rx[i] ||
537  		    ab->hw_params->ring_mask->rx_err[i] ||
538  		    ab->hw_params->ring_mask->rx_wbm_rel[i] ||
539  		    ab->hw_params->ring_mask->reo_status[i] ||
540  		    ab->hw_params->ring_mask->host2rxdma[i] ||
541  		    ab->hw_params->ring_mask->rx_mon_dest[i]) {
542  			num_irq = 1;
543  		}
544  
545  		irq_grp->num_irq = num_irq;
546  		irq_grp->irqs[0] = base_idx + i;
547  
548  		for (j = 0; j < irq_grp->num_irq; j++) {
549  			int irq_idx = irq_grp->irqs[j];
550  			int vector = (i % num_vectors) + base_vector;
551  			int irq = ath12k_pci_get_msi_irq(ab->dev, vector);
552  
553  			ab->irq_num[irq_idx] = irq;
554  
555  			ath12k_dbg(ab, ATH12K_DBG_PCI,
556  				   "irq:%d group:%d\n", irq, i);
557  
558  			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
559  			ret = request_irq(irq, ath12k_pci_ext_interrupt_handler,
560  					  IRQF_SHARED,
561  					  "DP_EXT_IRQ", irq_grp);
562  			if (ret) {
563  				ath12k_err(ab, "failed request irq %d: %d\n",
564  					   vector, ret);
565  				return ret;
566  			}
567  
568  			disable_irq_nosync(ab->irq_num[irq_idx]);
569  		}
570  	}
571  
572  	return 0;
573  }
574  
ath12k_pci_config_irq(struct ath12k_base * ab)575  static int ath12k_pci_config_irq(struct ath12k_base *ab)
576  {
577  	struct ath12k_ce_pipe *ce_pipe;
578  	u32 msi_data_start;
579  	u32 msi_data_count, msi_data_idx;
580  	u32 msi_irq_start;
581  	unsigned int msi_data;
582  	int irq, i, ret, irq_idx;
583  
584  	ret = ath12k_pci_get_user_msi_assignment(ab,
585  						 "CE", &msi_data_count,
586  						 &msi_data_start, &msi_irq_start);
587  	if (ret)
588  		return ret;
589  
590  	/* Configure CE irqs */
591  
592  	for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
593  		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
594  			continue;
595  
596  		msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
597  		irq = ath12k_pci_get_msi_irq(ab->dev, msi_data);
598  		ce_pipe = &ab->ce.ce_pipe[i];
599  
600  		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
601  
602  		tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet);
603  
604  		ret = request_irq(irq, ath12k_pci_ce_interrupt_handler,
605  				  IRQF_SHARED, irq_name[irq_idx],
606  				  ce_pipe);
607  		if (ret) {
608  			ath12k_err(ab, "failed to request irq %d: %d\n",
609  				   irq_idx, ret);
610  			return ret;
611  		}
612  
613  		ab->irq_num[irq_idx] = irq;
614  		msi_data_idx++;
615  
616  		ath12k_pci_ce_irq_disable(ab, i);
617  	}
618  
619  	ret = ath12k_pci_ext_irq_config(ab);
620  	if (ret)
621  		return ret;
622  
623  	return 0;
624  }
625  
ath12k_pci_init_qmi_ce_config(struct ath12k_base * ab)626  static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
627  {
628  	struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
629  
630  	cfg->tgt_ce = ab->hw_params->target_ce_config;
631  	cfg->tgt_ce_len = ab->hw_params->target_ce_count;
632  
633  	cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
634  	cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
635  	ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
636  }
637  
ath12k_pci_ce_irqs_enable(struct ath12k_base * ab)638  static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
639  {
640  	int i;
641  
642  	set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
643  
644  	for (i = 0; i < ab->hw_params->ce_count; i++) {
645  		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
646  			continue;
647  		ath12k_pci_ce_irq_enable(ab, i);
648  	}
649  }
650  
ath12k_pci_msi_config(struct ath12k_pci * ab_pci,bool enable)651  static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable)
652  {
653  	struct pci_dev *dev = ab_pci->pdev;
654  	u16 control;
655  
656  	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
657  
658  	if (enable)
659  		control |= PCI_MSI_FLAGS_ENABLE;
660  	else
661  		control &= ~PCI_MSI_FLAGS_ENABLE;
662  
663  	pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
664  }
665  
ath12k_pci_msi_enable(struct ath12k_pci * ab_pci)666  static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci)
667  {
668  	ath12k_pci_msi_config(ab_pci, true);
669  }
670  
ath12k_pci_msi_disable(struct ath12k_pci * ab_pci)671  static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci)
672  {
673  	ath12k_pci_msi_config(ab_pci, false);
674  }
675  
ath12k_pci_msi_alloc(struct ath12k_pci * ab_pci)676  static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci)
677  {
678  	struct ath12k_base *ab = ab_pci->ab;
679  	const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
680  	struct msi_desc *msi_desc;
681  	int num_vectors;
682  	int ret;
683  
684  	num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
685  					    msi_config->total_vectors,
686  					    msi_config->total_vectors,
687  					    PCI_IRQ_MSI);
688  	if (num_vectors != msi_config->total_vectors) {
689  		ath12k_err(ab, "failed to get %d MSI vectors, only %d available",
690  			   msi_config->total_vectors, num_vectors);
691  
692  		if (num_vectors >= 0)
693  			return -EINVAL;
694  		else
695  			return num_vectors;
696  	}
697  
698  	ath12k_pci_msi_disable(ab_pci);
699  
700  	msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
701  	if (!msi_desc) {
702  		ath12k_err(ab, "msi_desc is NULL!\n");
703  		ret = -EINVAL;
704  		goto free_msi_vector;
705  	}
706  
707  	ab_pci->msi_ep_base_data = msi_desc->msg.data;
708  	if (msi_desc->pci.msi_attrib.is_64)
709  		set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
710  
711  	ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
712  
713  	return 0;
714  
715  free_msi_vector:
716  	pci_free_irq_vectors(ab_pci->pdev);
717  
718  	return ret;
719  }
720  
ath12k_pci_msi_free(struct ath12k_pci * ab_pci)721  static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci)
722  {
723  	pci_free_irq_vectors(ab_pci->pdev);
724  }
725  
ath12k_pci_claim(struct ath12k_pci * ab_pci,struct pci_dev * pdev)726  static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
727  {
728  	struct ath12k_base *ab = ab_pci->ab;
729  	u16 device_id;
730  	int ret = 0;
731  
732  	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
733  	if (device_id != ab_pci->dev_id)  {
734  		ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
735  			   device_id, ab_pci->dev_id);
736  		ret = -EIO;
737  		goto out;
738  	}
739  
740  	ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM);
741  	if (ret) {
742  		ath12k_err(ab, "failed to assign pci resource: %d\n", ret);
743  		goto out;
744  	}
745  
746  	ret = pci_enable_device(pdev);
747  	if (ret) {
748  		ath12k_err(ab, "failed to enable pci device: %d\n", ret);
749  		goto out;
750  	}
751  
752  	ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci");
753  	if (ret) {
754  		ath12k_err(ab, "failed to request pci region: %d\n", ret);
755  		goto disable_device;
756  	}
757  
758  	ret = dma_set_mask_and_coherent(&pdev->dev,
759  					DMA_BIT_MASK(ATH12K_PCI_DMA_MASK));
760  	if (ret) {
761  		ath12k_err(ab, "failed to set pci dma mask to %d: %d\n",
762  			   ATH12K_PCI_DMA_MASK, ret);
763  		goto release_region;
764  	}
765  
766  	pci_set_master(pdev);
767  
768  	ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM);
769  	ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0);
770  	if (!ab->mem) {
771  		ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM);
772  		ret = -EIO;
773  		goto release_region;
774  	}
775  
776  	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
777  	return 0;
778  
779  release_region:
780  	pci_release_region(pdev, ATH12K_PCI_BAR_NUM);
781  disable_device:
782  	pci_disable_device(pdev);
783  out:
784  	return ret;
785  }
786  
ath12k_pci_free_region(struct ath12k_pci * ab_pci)787  static void ath12k_pci_free_region(struct ath12k_pci *ab_pci)
788  {
789  	struct ath12k_base *ab = ab_pci->ab;
790  	struct pci_dev *pci_dev = ab_pci->pdev;
791  
792  	pci_iounmap(pci_dev, ab->mem);
793  	ab->mem = NULL;
794  	pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM);
795  	if (pci_is_enabled(pci_dev))
796  		pci_disable_device(pci_dev);
797  }
798  
ath12k_pci_aspm_disable(struct ath12k_pci * ab_pci)799  static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
800  {
801  	struct ath12k_base *ab = ab_pci->ab;
802  
803  	pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
804  				  &ab_pci->link_ctl);
805  
806  	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
807  		   ab_pci->link_ctl,
808  		   u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
809  		   u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
810  
811  	/* disable L0s and L1 */
812  	pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
813  				   PCI_EXP_LNKCTL_ASPMC);
814  
815  	set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
816  }
817  
ath12k_pci_aspm_restore(struct ath12k_pci * ab_pci)818  static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
819  {
820  	if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
821  		pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
822  						   PCI_EXP_LNKCTL_ASPMC,
823  						   ab_pci->link_ctl &
824  						   PCI_EXP_LNKCTL_ASPMC);
825  }
826  
ath12k_pci_kill_tasklets(struct ath12k_base * ab)827  static void ath12k_pci_kill_tasklets(struct ath12k_base *ab)
828  {
829  	int i;
830  
831  	for (i = 0; i < ab->hw_params->ce_count; i++) {
832  		struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
833  
834  		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
835  			continue;
836  
837  		tasklet_kill(&ce_pipe->intr_tq);
838  	}
839  }
840  
ath12k_pci_ce_irq_disable_sync(struct ath12k_base * ab)841  static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab)
842  {
843  	ath12k_pci_ce_irqs_disable(ab);
844  	ath12k_pci_sync_ce_irqs(ab);
845  	ath12k_pci_kill_tasklets(ab);
846  }
847  
ath12k_pci_map_service_to_pipe(struct ath12k_base * ab,u16 service_id,u8 * ul_pipe,u8 * dl_pipe)848  int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
849  				   u8 *ul_pipe, u8 *dl_pipe)
850  {
851  	const struct service_to_pipe *entry;
852  	bool ul_set = false, dl_set = false;
853  	int i;
854  
855  	for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
856  		entry = &ab->hw_params->svc_to_ce_map[i];
857  
858  		if (__le32_to_cpu(entry->service_id) != service_id)
859  			continue;
860  
861  		switch (__le32_to_cpu(entry->pipedir)) {
862  		case PIPEDIR_NONE:
863  			break;
864  		case PIPEDIR_IN:
865  			WARN_ON(dl_set);
866  			*dl_pipe = __le32_to_cpu(entry->pipenum);
867  			dl_set = true;
868  			break;
869  		case PIPEDIR_OUT:
870  			WARN_ON(ul_set);
871  			*ul_pipe = __le32_to_cpu(entry->pipenum);
872  			ul_set = true;
873  			break;
874  		case PIPEDIR_INOUT:
875  			WARN_ON(dl_set);
876  			WARN_ON(ul_set);
877  			*dl_pipe = __le32_to_cpu(entry->pipenum);
878  			*ul_pipe = __le32_to_cpu(entry->pipenum);
879  			dl_set = true;
880  			ul_set = true;
881  			break;
882  		}
883  	}
884  
885  	if (WARN_ON(!ul_set || !dl_set))
886  		return -ENOENT;
887  
888  	return 0;
889  }
890  
ath12k_pci_get_msi_irq(struct device * dev,unsigned int vector)891  int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector)
892  {
893  	struct pci_dev *pci_dev = to_pci_dev(dev);
894  
895  	return pci_irq_vector(pci_dev, vector);
896  }
897  
ath12k_pci_get_user_msi_assignment(struct ath12k_base * ab,char * user_name,int * num_vectors,u32 * user_base_data,u32 * base_vector)898  int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
899  				       int *num_vectors, u32 *user_base_data,
900  				       u32 *base_vector)
901  {
902  	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
903  	const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
904  	int idx;
905  
906  	for (idx = 0; idx < msi_config->total_users; idx++) {
907  		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
908  			*num_vectors = msi_config->users[idx].num_vectors;
909  			*user_base_data = msi_config->users[idx].base_vector
910  				+ ab_pci->msi_ep_base_data;
911  			*base_vector = msi_config->users[idx].base_vector;
912  
913  			ath12k_dbg(ab, ATH12K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
914  				   user_name, *num_vectors, *user_base_data,
915  				   *base_vector);
916  
917  			return 0;
918  		}
919  	}
920  
921  	ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
922  
923  	return -EINVAL;
924  }
925  
ath12k_pci_get_msi_address(struct ath12k_base * ab,u32 * msi_addr_lo,u32 * msi_addr_hi)926  void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo,
927  				u32 *msi_addr_hi)
928  {
929  	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
930  	struct pci_dev *pci_dev = to_pci_dev(ab->dev);
931  
932  	pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
933  			      msi_addr_lo);
934  
935  	if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
936  		pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
937  				      msi_addr_hi);
938  	} else {
939  		*msi_addr_hi = 0;
940  	}
941  }
942  
ath12k_pci_get_ce_msi_idx(struct ath12k_base * ab,u32 ce_id,u32 * msi_idx)943  void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id,
944  			       u32 *msi_idx)
945  {
946  	u32 i, msi_data_idx;
947  
948  	for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
949  		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
950  			continue;
951  
952  		if (ce_id == i)
953  			break;
954  
955  		msi_data_idx++;
956  	}
957  	*msi_idx = msi_data_idx;
958  }
959  
ath12k_pci_hif_ce_irq_enable(struct ath12k_base * ab)960  void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab)
961  {
962  	ath12k_pci_ce_irqs_enable(ab);
963  }
964  
ath12k_pci_hif_ce_irq_disable(struct ath12k_base * ab)965  void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab)
966  {
967  	ath12k_pci_ce_irq_disable_sync(ab);
968  }
969  
ath12k_pci_ext_irq_enable(struct ath12k_base * ab)970  void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
971  {
972  	int i;
973  
974  	set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
975  
976  	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
977  		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
978  
979  		napi_enable(&irq_grp->napi);
980  		ath12k_pci_ext_grp_enable(irq_grp);
981  	}
982  }
983  
ath12k_pci_ext_irq_disable(struct ath12k_base * ab)984  void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
985  {
986  	__ath12k_pci_ext_irq_disable(ab);
987  	ath12k_pci_sync_ext_irqs(ab);
988  }
989  
ath12k_pci_hif_suspend(struct ath12k_base * ab)990  int ath12k_pci_hif_suspend(struct ath12k_base *ab)
991  {
992  	struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
993  
994  	ath12k_mhi_suspend(ar_pci);
995  
996  	return 0;
997  }
998  
ath12k_pci_hif_resume(struct ath12k_base * ab)999  int ath12k_pci_hif_resume(struct ath12k_base *ab)
1000  {
1001  	struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
1002  
1003  	ath12k_mhi_resume(ar_pci);
1004  
1005  	return 0;
1006  }
1007  
ath12k_pci_stop(struct ath12k_base * ab)1008  void ath12k_pci_stop(struct ath12k_base *ab)
1009  {
1010  	ath12k_pci_ce_irq_disable_sync(ab);
1011  	ath12k_ce_cleanup_pipes(ab);
1012  }
1013  
ath12k_pci_start(struct ath12k_base * ab)1014  int ath12k_pci_start(struct ath12k_base *ab)
1015  {
1016  	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1017  
1018  	set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1019  
1020  	ath12k_pci_aspm_restore(ab_pci);
1021  
1022  	ath12k_pci_ce_irqs_enable(ab);
1023  	ath12k_ce_rx_post_buf(ab);
1024  
1025  	return 0;
1026  }
1027  
ath12k_pci_read32(struct ath12k_base * ab,u32 offset)1028  u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
1029  {
1030  	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1031  	u32 val, window_start;
1032  	int ret = 0;
1033  
1034  	/* for offset beyond BAR + 4K - 32, may
1035  	 * need to wakeup MHI to access.
1036  	 */
1037  	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1038  	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup)
1039  		ret = ab_pci->pci_ops->wakeup(ab);
1040  
1041  	if (offset < WINDOW_START) {
1042  		val = ioread32(ab->mem + offset);
1043  	} else {
1044  		if (ab->static_window_map)
1045  			window_start = ath12k_pci_get_window_start(ab, offset);
1046  		else
1047  			window_start = WINDOW_START;
1048  
1049  		if (window_start == WINDOW_START) {
1050  			spin_lock_bh(&ab_pci->window_lock);
1051  			ath12k_pci_select_window(ab_pci, offset);
1052  			val = ioread32(ab->mem + window_start +
1053  				       (offset & WINDOW_RANGE_MASK));
1054  			spin_unlock_bh(&ab_pci->window_lock);
1055  		} else {
1056  			if ((!window_start) &&
1057  			    (offset >= PCI_MHIREGLEN_REG &&
1058  			     offset <= PCI_MHI_REGION_END))
1059  				offset = offset - PCI_MHIREGLEN_REG;
1060  
1061  			val = ioread32(ab->mem + window_start +
1062  				       (offset & WINDOW_RANGE_MASK));
1063  		}
1064  	}
1065  
1066  	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1067  	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release &&
1068  	    !ret)
1069  		ab_pci->pci_ops->release(ab);
1070  	return val;
1071  }
1072  
ath12k_pci_write32(struct ath12k_base * ab,u32 offset,u32 value)1073  void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
1074  {
1075  	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1076  	u32 window_start;
1077  	int ret = 0;
1078  
1079  	/* for offset beyond BAR + 4K - 32, may
1080  	 * need to wakeup MHI to access.
1081  	 */
1082  	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1083  	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup)
1084  		ret = ab_pci->pci_ops->wakeup(ab);
1085  
1086  	if (offset < WINDOW_START) {
1087  		iowrite32(value, ab->mem + offset);
1088  	} else {
1089  		if (ab->static_window_map)
1090  			window_start = ath12k_pci_get_window_start(ab, offset);
1091  		else
1092  			window_start = WINDOW_START;
1093  
1094  		if (window_start == WINDOW_START) {
1095  			spin_lock_bh(&ab_pci->window_lock);
1096  			ath12k_pci_select_window(ab_pci, offset);
1097  			iowrite32(value, ab->mem + window_start +
1098  				  (offset & WINDOW_RANGE_MASK));
1099  			spin_unlock_bh(&ab_pci->window_lock);
1100  		} else {
1101  			if ((!window_start) &&
1102  			    (offset >= PCI_MHIREGLEN_REG &&
1103  			     offset <= PCI_MHI_REGION_END))
1104  				offset = offset - PCI_MHIREGLEN_REG;
1105  
1106  			iowrite32(value, ab->mem + window_start +
1107  				  (offset & WINDOW_RANGE_MASK));
1108  		}
1109  	}
1110  
1111  	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1112  	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release &&
1113  	    !ret)
1114  		ab_pci->pci_ops->release(ab);
1115  }
1116  
ath12k_pci_power_up(struct ath12k_base * ab)1117  int ath12k_pci_power_up(struct ath12k_base *ab)
1118  {
1119  	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1120  	int ret;
1121  
1122  	ab_pci->register_window = 0;
1123  	clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1124  	ath12k_pci_sw_reset(ab_pci->ab, true);
1125  
1126  	/* Disable ASPM during firmware download due to problems switching
1127  	 * to AMSS state.
1128  	 */
1129  	ath12k_pci_aspm_disable(ab_pci);
1130  
1131  	ath12k_pci_msi_enable(ab_pci);
1132  
1133  	ret = ath12k_mhi_start(ab_pci);
1134  	if (ret) {
1135  		ath12k_err(ab, "failed to start mhi: %d\n", ret);
1136  		return ret;
1137  	}
1138  
1139  	if (ab->static_window_map)
1140  		ath12k_pci_select_static_window(ab_pci);
1141  
1142  	return 0;
1143  }
1144  
ath12k_pci_power_down(struct ath12k_base * ab)1145  void ath12k_pci_power_down(struct ath12k_base *ab)
1146  {
1147  	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1148  
1149  	/* restore aspm in case firmware bootup fails */
1150  	ath12k_pci_aspm_restore(ab_pci);
1151  
1152  	ath12k_pci_force_wake(ab_pci->ab);
1153  	ath12k_pci_msi_disable(ab_pci);
1154  	ath12k_mhi_stop(ab_pci);
1155  	clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1156  	ath12k_pci_sw_reset(ab_pci->ab, false);
1157  }
1158  
1159  static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
1160  	.start = ath12k_pci_start,
1161  	.stop = ath12k_pci_stop,
1162  	.read32 = ath12k_pci_read32,
1163  	.write32 = ath12k_pci_write32,
1164  	.power_down = ath12k_pci_power_down,
1165  	.power_up = ath12k_pci_power_up,
1166  	.suspend = ath12k_pci_hif_suspend,
1167  	.resume = ath12k_pci_hif_resume,
1168  	.irq_enable = ath12k_pci_ext_irq_enable,
1169  	.irq_disable = ath12k_pci_ext_irq_disable,
1170  	.get_msi_address = ath12k_pci_get_msi_address,
1171  	.get_user_msi_vector = ath12k_pci_get_user_msi_assignment,
1172  	.map_service_to_pipe = ath12k_pci_map_service_to_pipe,
1173  	.ce_irq_enable = ath12k_pci_hif_ce_irq_enable,
1174  	.ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
1175  	.get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
1176  };
1177  
1178  static
ath12k_pci_read_hw_version(struct ath12k_base * ab,u32 * major,u32 * minor)1179  void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor)
1180  {
1181  	u32 soc_hw_version;
1182  
1183  	soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION);
1184  	*major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
1185  			   soc_hw_version);
1186  	*minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
1187  			   soc_hw_version);
1188  
1189  	ath12k_dbg(ab, ATH12K_DBG_PCI,
1190  		   "pci tcsr_soc_hw_version major %d minor %d\n",
1191  		    *major, *minor);
1192  }
1193  
ath12k_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pci_dev)1194  static int ath12k_pci_probe(struct pci_dev *pdev,
1195  			    const struct pci_device_id *pci_dev)
1196  {
1197  	struct ath12k_base *ab;
1198  	struct ath12k_pci *ab_pci;
1199  	u32 soc_hw_version_major, soc_hw_version_minor;
1200  	int ret;
1201  
1202  	ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI);
1203  	if (!ab) {
1204  		dev_err(&pdev->dev, "failed to allocate ath12k base\n");
1205  		return -ENOMEM;
1206  	}
1207  
1208  	ab->dev = &pdev->dev;
1209  	pci_set_drvdata(pdev, ab);
1210  	ab_pci = ath12k_pci_priv(ab);
1211  	ab_pci->dev_id = pci_dev->device;
1212  	ab_pci->ab = ab;
1213  	ab_pci->pdev = pdev;
1214  	ab->hif.ops = &ath12k_pci_hif_ops;
1215  	pci_set_drvdata(pdev, ab);
1216  	spin_lock_init(&ab_pci->window_lock);
1217  
1218  	ret = ath12k_pci_claim(ab_pci, pdev);
1219  	if (ret) {
1220  		ath12k_err(ab, "failed to claim device: %d\n", ret);
1221  		goto err_free_core;
1222  	}
1223  
1224  	switch (pci_dev->device) {
1225  	case QCN9274_DEVICE_ID:
1226  		ab_pci->msi_config = &ath12k_msi_config[0];
1227  		ab->static_window_map = true;
1228  		ab_pci->pci_ops = &ath12k_pci_ops_qcn9274;
1229  		ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
1230  					   &soc_hw_version_minor);
1231  		switch (soc_hw_version_major) {
1232  		case ATH12K_PCI_SOC_HW_VERSION_2:
1233  			ab->hw_rev = ATH12K_HW_QCN9274_HW20;
1234  			break;
1235  		case ATH12K_PCI_SOC_HW_VERSION_1:
1236  			ab->hw_rev = ATH12K_HW_QCN9274_HW10;
1237  			break;
1238  		default:
1239  			dev_err(&pdev->dev,
1240  				"Unknown hardware version found for QCN9274: 0x%x\n",
1241  				soc_hw_version_major);
1242  			ret = -EOPNOTSUPP;
1243  			goto err_pci_free_region;
1244  		}
1245  		break;
1246  	case WCN7850_DEVICE_ID:
1247  		ab_pci->msi_config = &ath12k_msi_config[0];
1248  		ab->static_window_map = false;
1249  		ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
1250  		ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
1251  					   &soc_hw_version_minor);
1252  		switch (soc_hw_version_major) {
1253  		case ATH12K_PCI_SOC_HW_VERSION_2:
1254  			ab->hw_rev = ATH12K_HW_WCN7850_HW20;
1255  			break;
1256  		default:
1257  			dev_err(&pdev->dev,
1258  				"Unknown hardware version found for WCN7850: 0x%x\n",
1259  				soc_hw_version_major);
1260  			ret = -EOPNOTSUPP;
1261  			goto err_pci_free_region;
1262  		}
1263  		break;
1264  
1265  	default:
1266  		dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
1267  			pci_dev->device);
1268  		ret = -EOPNOTSUPP;
1269  		goto err_pci_free_region;
1270  	}
1271  
1272  	ret = ath12k_pci_msi_alloc(ab_pci);
1273  	if (ret) {
1274  		ath12k_err(ab, "failed to alloc msi: %d\n", ret);
1275  		goto err_pci_free_region;
1276  	}
1277  
1278  	ret = ath12k_core_pre_init(ab);
1279  	if (ret)
1280  		goto err_pci_msi_free;
1281  
1282  	ret = ath12k_mhi_register(ab_pci);
1283  	if (ret) {
1284  		ath12k_err(ab, "failed to register mhi: %d\n", ret);
1285  		goto err_pci_msi_free;
1286  	}
1287  
1288  	ret = ath12k_hal_srng_init(ab);
1289  	if (ret)
1290  		goto err_mhi_unregister;
1291  
1292  	ret = ath12k_ce_alloc_pipes(ab);
1293  	if (ret) {
1294  		ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1295  		goto err_hal_srng_deinit;
1296  	}
1297  
1298  	ath12k_pci_init_qmi_ce_config(ab);
1299  
1300  	ret = ath12k_pci_config_irq(ab);
1301  	if (ret) {
1302  		ath12k_err(ab, "failed to config irq: %d\n", ret);
1303  		goto err_ce_free;
1304  	}
1305  
1306  	ret = ath12k_core_init(ab);
1307  	if (ret) {
1308  		ath12k_err(ab, "failed to init core: %d\n", ret);
1309  		goto err_free_irq;
1310  	}
1311  	return 0;
1312  
1313  err_free_irq:
1314  	ath12k_pci_free_irq(ab);
1315  
1316  err_ce_free:
1317  	ath12k_ce_free_pipes(ab);
1318  
1319  err_hal_srng_deinit:
1320  	ath12k_hal_srng_deinit(ab);
1321  
1322  err_mhi_unregister:
1323  	ath12k_mhi_unregister(ab_pci);
1324  
1325  err_pci_msi_free:
1326  	ath12k_pci_msi_free(ab_pci);
1327  
1328  err_pci_free_region:
1329  	ath12k_pci_free_region(ab_pci);
1330  
1331  err_free_core:
1332  	ath12k_core_free(ab);
1333  
1334  	return ret;
1335  }
1336  
ath12k_pci_remove(struct pci_dev * pdev)1337  static void ath12k_pci_remove(struct pci_dev *pdev)
1338  {
1339  	struct ath12k_base *ab = pci_get_drvdata(pdev);
1340  	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1341  
1342  	if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1343  		ath12k_pci_power_down(ab);
1344  		ath12k_qmi_deinit_service(ab);
1345  		goto qmi_fail;
1346  	}
1347  
1348  	set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
1349  
1350  	cancel_work_sync(&ab->reset_work);
1351  	ath12k_core_deinit(ab);
1352  
1353  qmi_fail:
1354  	ath12k_mhi_unregister(ab_pci);
1355  
1356  	ath12k_pci_free_irq(ab);
1357  	ath12k_pci_msi_free(ab_pci);
1358  	ath12k_pci_free_region(ab_pci);
1359  
1360  	ath12k_hal_srng_deinit(ab);
1361  	ath12k_ce_free_pipes(ab);
1362  	ath12k_core_free(ab);
1363  }
1364  
ath12k_pci_shutdown(struct pci_dev * pdev)1365  static void ath12k_pci_shutdown(struct pci_dev *pdev)
1366  {
1367  	struct ath12k_base *ab = pci_get_drvdata(pdev);
1368  
1369  	ath12k_pci_power_down(ab);
1370  }
1371  
ath12k_pci_pm_suspend(struct device * dev)1372  static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
1373  {
1374  	struct ath12k_base *ab = dev_get_drvdata(dev);
1375  	int ret;
1376  
1377  	ret = ath12k_core_suspend(ab);
1378  	if (ret)
1379  		ath12k_warn(ab, "failed to suspend core: %d\n", ret);
1380  
1381  	return ret;
1382  }
1383  
ath12k_pci_pm_resume(struct device * dev)1384  static __maybe_unused int ath12k_pci_pm_resume(struct device *dev)
1385  {
1386  	struct ath12k_base *ab = dev_get_drvdata(dev);
1387  	int ret;
1388  
1389  	ret = ath12k_core_resume(ab);
1390  	if (ret)
1391  		ath12k_warn(ab, "failed to resume core: %d\n", ret);
1392  
1393  	return ret;
1394  }
1395  
1396  static SIMPLE_DEV_PM_OPS(ath12k_pci_pm_ops,
1397  			 ath12k_pci_pm_suspend,
1398  			 ath12k_pci_pm_resume);
1399  
1400  static struct pci_driver ath12k_pci_driver = {
1401  	.name = "ath12k_pci",
1402  	.id_table = ath12k_pci_id_table,
1403  	.probe = ath12k_pci_probe,
1404  	.remove = ath12k_pci_remove,
1405  	.shutdown = ath12k_pci_shutdown,
1406  	.driver.pm = &ath12k_pci_pm_ops,
1407  };
1408  
ath12k_pci_init(void)1409  static int ath12k_pci_init(void)
1410  {
1411  	int ret;
1412  
1413  	ret = pci_register_driver(&ath12k_pci_driver);
1414  	if (ret) {
1415  		pr_err("failed to register ath12k pci driver: %d\n",
1416  		       ret);
1417  		return ret;
1418  	}
1419  
1420  	return 0;
1421  }
1422  module_init(ath12k_pci_init);
1423  
ath12k_pci_exit(void)1424  static void ath12k_pci_exit(void)
1425  {
1426  	pci_unregister_driver(&ath12k_pci_driver);
1427  }
1428  
1429  module_exit(ath12k_pci_exit);
1430  
1431  MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices");
1432  MODULE_LICENSE("Dual BSD/GPL");
1433