xref: /openbmc/linux/drivers/bus/mhi/host/pci_generic.c (revision 85250a24)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * MHI PCI driver - MHI over PCI controller driver
4  *
5  * This module is a generic driver for registering MHI-over-PCI devices,
6  * such as PCIe QCOM modems.
7  *
8  * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
9  */
10 
11 #include <linux/aer.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/mhi.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/timer.h>
19 #include <linux/workqueue.h>
20 
21 #define MHI_PCI_DEFAULT_BAR_NUM 0
22 
23 #define MHI_POST_RESET_DELAY_MS 2000
24 
25 #define HEALTH_CHECK_PERIOD (HZ * 2)
26 
27 /**
28  * struct mhi_pci_dev_info - MHI PCI device specific information
29  * @config: MHI controller configuration
30  * @name: name of the PCI module
31  * @fw: firmware path (if any)
32  * @edl: emergency download mode firmware path (if any)
33  * @bar_num: PCI base address register to use for MHI MMIO register space
34  * @dma_data_width: DMA transfer word size (32 or 64 bits)
35  * @mru_default: default MRU size for MBIM network packets
36  * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
37  *		   of inband wake support (such as sdx24)
38  */
39 struct mhi_pci_dev_info {
40 	const struct mhi_controller_config *config;
41 	const char *name;
42 	const char *fw;
43 	const char *edl;
44 	unsigned int bar_num;
45 	unsigned int dma_data_width;
46 	unsigned int mru_default;
47 	bool sideband_wake;
48 };
49 
50 #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
51 	{						\
52 		.num = ch_num,				\
53 		.name = ch_name,			\
54 		.num_elements = el_count,		\
55 		.event_ring = ev_ring,			\
56 		.dir = DMA_TO_DEVICE,			\
57 		.ee_mask = BIT(MHI_EE_AMSS),		\
58 		.pollcfg = 0,				\
59 		.doorbell = MHI_DB_BRST_DISABLE,	\
60 		.lpm_notify = false,			\
61 		.offload_channel = false,		\
62 		.doorbell_mode_switch = false,		\
63 	}						\
64 
65 #define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
66 	{						\
67 		.num = ch_num,				\
68 		.name = ch_name,			\
69 		.num_elements = el_count,		\
70 		.event_ring = ev_ring,			\
71 		.dir = DMA_FROM_DEVICE,			\
72 		.ee_mask = BIT(MHI_EE_AMSS),		\
73 		.pollcfg = 0,				\
74 		.doorbell = MHI_DB_BRST_DISABLE,	\
75 		.lpm_notify = false,			\
76 		.offload_channel = false,		\
77 		.doorbell_mode_switch = false,		\
78 	}
79 
80 #define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
81 	{						\
82 		.num = ch_num,				\
83 		.name = ch_name,			\
84 		.num_elements = el_count,		\
85 		.event_ring = ev_ring,			\
86 		.dir = DMA_FROM_DEVICE,			\
87 		.ee_mask = BIT(MHI_EE_AMSS),		\
88 		.pollcfg = 0,				\
89 		.doorbell = MHI_DB_BRST_DISABLE,	\
90 		.lpm_notify = false,			\
91 		.offload_channel = false,		\
92 		.doorbell_mode_switch = false,		\
93 		.auto_queue = true,			\
94 	}
95 
96 #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
97 	{					\
98 		.num_elements = el_count,	\
99 		.irq_moderation_ms = 0,		\
100 		.irq = (ev_ring) + 1,		\
101 		.priority = 1,			\
102 		.mode = MHI_DB_BRST_DISABLE,	\
103 		.data_type = MHI_ER_CTRL,	\
104 		.hardware_event = false,	\
105 		.client_managed = false,	\
106 		.offload_channel = false,	\
107 	}
108 
109 #define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
110 	{						\
111 		.num = ch_num,				\
112 		.name = ch_name,			\
113 		.num_elements = el_count,		\
114 		.event_ring = ev_ring,			\
115 		.dir = DMA_TO_DEVICE,			\
116 		.ee_mask = BIT(MHI_EE_AMSS),		\
117 		.pollcfg = 0,				\
118 		.doorbell = MHI_DB_BRST_ENABLE,	\
119 		.lpm_notify = false,			\
120 		.offload_channel = false,		\
121 		.doorbell_mode_switch = true,		\
122 	}						\
123 
124 #define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
125 	{						\
126 		.num = ch_num,				\
127 		.name = ch_name,			\
128 		.num_elements = el_count,		\
129 		.event_ring = ev_ring,			\
130 		.dir = DMA_FROM_DEVICE,			\
131 		.ee_mask = BIT(MHI_EE_AMSS),		\
132 		.pollcfg = 0,				\
133 		.doorbell = MHI_DB_BRST_ENABLE,	\
134 		.lpm_notify = false,			\
135 		.offload_channel = false,		\
136 		.doorbell_mode_switch = true,		\
137 	}
138 
139 #define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
140 	{						\
141 		.num = ch_num,				\
142 		.name = ch_name,			\
143 		.num_elements = el_count,		\
144 		.event_ring = ev_ring,			\
145 		.dir = DMA_TO_DEVICE,			\
146 		.ee_mask = BIT(MHI_EE_SBL),		\
147 		.pollcfg = 0,				\
148 		.doorbell = MHI_DB_BRST_DISABLE,	\
149 		.lpm_notify = false,			\
150 		.offload_channel = false,		\
151 		.doorbell_mode_switch = false,		\
152 	}						\
153 
154 #define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
155 	{						\
156 		.num = ch_num,				\
157 		.name = ch_name,			\
158 		.num_elements = el_count,		\
159 		.event_ring = ev_ring,			\
160 		.dir = DMA_FROM_DEVICE,			\
161 		.ee_mask = BIT(MHI_EE_SBL),		\
162 		.pollcfg = 0,				\
163 		.doorbell = MHI_DB_BRST_DISABLE,	\
164 		.lpm_notify = false,			\
165 		.offload_channel = false,		\
166 		.doorbell_mode_switch = false,		\
167 	}
168 
169 #define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
170 	{						\
171 		.num = ch_num,				\
172 		.name = ch_name,			\
173 		.num_elements = el_count,		\
174 		.event_ring = ev_ring,			\
175 		.dir = DMA_TO_DEVICE,			\
176 		.ee_mask = BIT(MHI_EE_FP),		\
177 		.pollcfg = 0,				\
178 		.doorbell = MHI_DB_BRST_DISABLE,	\
179 		.lpm_notify = false,			\
180 		.offload_channel = false,		\
181 		.doorbell_mode_switch = false,		\
182 	}						\
183 
184 #define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
185 	{						\
186 		.num = ch_num,				\
187 		.name = ch_name,			\
188 		.num_elements = el_count,		\
189 		.event_ring = ev_ring,			\
190 		.dir = DMA_FROM_DEVICE,			\
191 		.ee_mask = BIT(MHI_EE_FP),		\
192 		.pollcfg = 0,				\
193 		.doorbell = MHI_DB_BRST_DISABLE,	\
194 		.lpm_notify = false,			\
195 		.offload_channel = false,		\
196 		.doorbell_mode_switch = false,		\
197 	}
198 
199 #define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
200 	{					\
201 		.num_elements = el_count,	\
202 		.irq_moderation_ms = 5,		\
203 		.irq = (ev_ring) + 1,		\
204 		.priority = 1,			\
205 		.mode = MHI_DB_BRST_DISABLE,	\
206 		.data_type = MHI_ER_DATA,	\
207 		.hardware_event = false,	\
208 		.client_managed = false,	\
209 		.offload_channel = false,	\
210 	}
211 
212 #define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
213 	{					\
214 		.num_elements = el_count,	\
215 		.irq_moderation_ms = 1,		\
216 		.irq = (ev_ring) + 1,		\
217 		.priority = 1,			\
218 		.mode = MHI_DB_BRST_DISABLE,	\
219 		.data_type = MHI_ER_DATA,	\
220 		.hardware_event = true,		\
221 		.client_managed = false,	\
222 		.offload_channel = false,	\
223 		.channel = ch_num,		\
224 	}
225 
226 static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
227 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
228 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
229 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
230 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
231 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
232 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
233 	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
234 	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
235 	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
236 	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
237 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
238 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
239 };
240 
241 static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
242 	/* first ring is control+data ring */
243 	MHI_EVENT_CONFIG_CTRL(0, 64),
244 	/* DIAG dedicated event ring */
245 	MHI_EVENT_CONFIG_DATA(1, 128),
246 	/* Hardware channels request dedicated hardware event rings */
247 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
248 	MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
249 };
250 
251 static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
252 	.max_channels = 128,
253 	.timeout_ms = 8000,
254 	.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
255 	.ch_cfg = modem_qcom_v1_mhi_channels,
256 	.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
257 	.event_cfg = modem_qcom_v1_mhi_events,
258 };
259 
260 static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
261 	.name = "qcom-sdx65m",
262 	.fw = "qcom/sdx65m/xbl.elf",
263 	.edl = "qcom/sdx65m/edl.mbn",
264 	.config = &modem_qcom_v1_mhiv_config,
265 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
266 	.dma_data_width = 32,
267 	.sideband_wake = false,
268 };
269 
270 static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
271 	.name = "qcom-sdx55m",
272 	.fw = "qcom/sdx55m/sbl1.mbn",
273 	.edl = "qcom/sdx55m/edl.mbn",
274 	.config = &modem_qcom_v1_mhiv_config,
275 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
276 	.dma_data_width = 32,
277 	.mru_default = 32768,
278 	.sideband_wake = false,
279 };
280 
281 static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
282 	.name = "qcom-sdx24",
283 	.edl = "qcom/prog_firehose_sdx24.mbn",
284 	.config = &modem_qcom_v1_mhiv_config,
285 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
286 	.dma_data_width = 32,
287 	.sideband_wake = true,
288 };
289 
290 static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
291 	MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
292 	MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
293 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
294 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
295 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
296 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
297 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
298 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
299 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
300 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
301 	/* The EDL firmware is a flash-programmer exposing firehose protocol */
302 	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
303 	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
304 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
305 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
306 };
307 
308 static struct mhi_event_config mhi_quectel_em1xx_events[] = {
309 	MHI_EVENT_CONFIG_CTRL(0, 128),
310 	MHI_EVENT_CONFIG_DATA(1, 128),
311 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
312 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
313 };
314 
315 static const struct mhi_controller_config modem_quectel_em1xx_config = {
316 	.max_channels = 128,
317 	.timeout_ms = 20000,
318 	.num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
319 	.ch_cfg = mhi_quectel_em1xx_channels,
320 	.num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
321 	.event_cfg = mhi_quectel_em1xx_events,
322 };
323 
324 static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
325 	.name = "quectel-em1xx",
326 	.edl = "qcom/prog_firehose_sdx24.mbn",
327 	.config = &modem_quectel_em1xx_config,
328 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
329 	.dma_data_width = 32,
330 	.mru_default = 32768,
331 	.sideband_wake = true,
332 };
333 
334 static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
335 	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
336 	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
337 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
338 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
339 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
340 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
341 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
342 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
343 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
344 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
345 };
346 
347 static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
348 	MHI_EVENT_CONFIG_CTRL(0, 128),
349 	MHI_EVENT_CONFIG_DATA(1, 128),
350 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
351 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
352 };
353 
354 static const struct mhi_controller_config modem_foxconn_sdx55_config = {
355 	.max_channels = 128,
356 	.timeout_ms = 20000,
357 	.num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
358 	.ch_cfg = mhi_foxconn_sdx55_channels,
359 	.num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
360 	.event_cfg = mhi_foxconn_sdx55_events,
361 };
362 
363 static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
364 	.name = "foxconn-sdx55",
365 	.fw = "qcom/sdx55m/sbl1.mbn",
366 	.edl = "qcom/sdx55m/edl.mbn",
367 	.config = &modem_foxconn_sdx55_config,
368 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
369 	.dma_data_width = 32,
370 	.mru_default = 32768,
371 	.sideband_wake = false,
372 };
373 
374 static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
375 	.name = "foxconn-sdx65",
376 	.config = &modem_foxconn_sdx55_config,
377 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
378 	.dma_data_width = 32,
379 	.mru_default = 32768,
380 	.sideband_wake = false,
381 };
382 
383 static const struct mhi_channel_config mhi_mv3x_channels[] = {
384 	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
385 	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
386 	/* MBIM Control Channel */
387 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0),
388 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0),
389 	/* MBIM Data Channel */
390 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2),
391 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
392 };
393 
394 static struct mhi_event_config mhi_mv3x_events[] = {
395 	MHI_EVENT_CONFIG_CTRL(0, 256),
396 	MHI_EVENT_CONFIG_DATA(1, 256),
397 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
398 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
399 };
400 
401 static const struct mhi_controller_config modem_mv3x_config = {
402 	.max_channels = 128,
403 	.timeout_ms = 20000,
404 	.num_channels = ARRAY_SIZE(mhi_mv3x_channels),
405 	.ch_cfg = mhi_mv3x_channels,
406 	.num_events = ARRAY_SIZE(mhi_mv3x_events),
407 	.event_cfg = mhi_mv3x_events,
408 };
409 
410 static const struct mhi_pci_dev_info mhi_mv31_info = {
411 	.name = "cinterion-mv31",
412 	.config = &modem_mv3x_config,
413 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
414 	.dma_data_width = 32,
415 	.mru_default = 32768,
416 };
417 
418 static const struct mhi_pci_dev_info mhi_mv32_info = {
419 	.name = "cinterion-mv32",
420 	.config = &modem_mv3x_config,
421 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
422 	.dma_data_width = 32,
423 	.mru_default = 32768,
424 };
425 
426 static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
427 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
428 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
429 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
430 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
431 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
432 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
433 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
434 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
435 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
436 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
437 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
438 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
439 };
440 
441 static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
442 	/* first ring is control+data and DIAG ring */
443 	MHI_EVENT_CONFIG_CTRL(0, 2048),
444 	/* Hardware channels request dedicated hardware event rings */
445 	MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
446 	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
447 };
448 
449 static const struct mhi_controller_config modem_sierra_em919x_config = {
450 	.max_channels = 128,
451 	.timeout_ms = 24000,
452 	.num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
453 	.ch_cfg = mhi_sierra_em919x_channels,
454 	.num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
455 	.event_cfg = modem_sierra_em919x_mhi_events,
456 };
457 
458 static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
459 	.name = "sierra-em919x",
460 	.config = &modem_sierra_em919x_config,
461 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
462 	.dma_data_width = 32,
463 	.sideband_wake = false,
464 };
465 
466 static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
467 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
468 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
469 	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
470 	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
471 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
472 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
473 };
474 
475 static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
476 	MHI_EVENT_CONFIG_CTRL(0, 128),
477 	MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
478 	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
479 };
480 
481 static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
482 	.max_channels = 128,
483 	.timeout_ms = 20000,
484 	.num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
485 	.ch_cfg = mhi_telit_fn980_hw_v1_channels,
486 	.num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
487 	.event_cfg = mhi_telit_fn980_hw_v1_events,
488 };
489 
490 static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
491 	.name = "telit-fn980-hwv1",
492 	.fw = "qcom/sdx55m/sbl1.mbn",
493 	.edl = "qcom/sdx55m/edl.mbn",
494 	.config = &modem_telit_fn980_hw_v1_config,
495 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
496 	.dma_data_width = 32,
497 	.mru_default = 32768,
498 	.sideband_wake = false,
499 };
500 
501 static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
502 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
503 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
504 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
505 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
506 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
507 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
508 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
509 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
510 	MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
511 	MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
512 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
513 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
514 };
515 
516 static struct mhi_event_config mhi_telit_fn990_events[] = {
517 	MHI_EVENT_CONFIG_CTRL(0, 128),
518 	MHI_EVENT_CONFIG_DATA(1, 128),
519 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
520 	MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
521 };
522 
523 static const struct mhi_controller_config modem_telit_fn990_config = {
524 	.max_channels = 128,
525 	.timeout_ms = 20000,
526 	.num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
527 	.ch_cfg = mhi_telit_fn990_channels,
528 	.num_events = ARRAY_SIZE(mhi_telit_fn990_events),
529 	.event_cfg = mhi_telit_fn990_events,
530 };
531 
532 static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
533 	.name = "telit-fn990",
534 	.config = &modem_telit_fn990_config,
535 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
536 	.dma_data_width = 32,
537 	.sideband_wake = false,
538 	.mru_default = 32768,
539 };
540 
541 /* Keep the list sorted based on the PID. New VID should be added as the last entry */
542 static const struct pci_device_id mhi_pci_id_table[] = {
543 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
544 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
545 	/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
546 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
547 		.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
548 	/* Telit FN980 hardware revision v1 */
549 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
550 		.driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
551 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
552 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
553 	/* Telit FN990 */
554 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
555 		.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
556 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
557 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
558 	{ PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */
559 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
560 	{ PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */
561 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
562 	{ PCI_DEVICE(0x1eac, 0x2001), /* EM120R-GL for FCCL (sdx24) */
563 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
564 	/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
565 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
566 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
567 	/* DW5930e (sdx55), With eSIM, It's also T99W175 */
568 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
569 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
570 	/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
571 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
572 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
573 	/* T99W175 (sdx55), Based on Qualcomm new baseline */
574 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
575 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
576 	/* T99W175 (sdx55) */
577 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
578 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
579 	/* T99W368 (sdx65) */
580 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
581 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
582 	/* T99W373 (sdx62) */
583 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
584 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
585 	/* MV31-W (Cinterion) */
586 	{ PCI_DEVICE(0x1269, 0x00b3),
587 		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
588 	/* MV31-W (Cinterion), based on new baseline */
589 	{ PCI_DEVICE(0x1269, 0x00b4),
590 		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
591 	/* MV32-WA (Cinterion) */
592 	{ PCI_DEVICE(0x1269, 0x00ba),
593 		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
594 	/* MV32-WB (Cinterion) */
595 	{ PCI_DEVICE(0x1269, 0x00bb),
596 		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
597 	{  }
598 };
599 MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
600 
601 enum mhi_pci_device_status {
602 	MHI_PCI_DEV_STARTED,
603 	MHI_PCI_DEV_SUSPENDED,
604 };
605 
606 struct mhi_pci_device {
607 	struct mhi_controller mhi_cntrl;
608 	struct pci_saved_state *pci_state;
609 	struct work_struct recovery_work;
610 	struct timer_list health_check_timer;
611 	unsigned long status;
612 };
613 
614 static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
615 			    void __iomem *addr, u32 *out)
616 {
617 	*out = readl(addr);
618 	return 0;
619 }
620 
621 static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
622 			      void __iomem *addr, u32 val)
623 {
624 	writel(val, addr);
625 }
626 
627 static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
628 			      enum mhi_callback cb)
629 {
630 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
631 
632 	/* Nothing to do for now */
633 	switch (cb) {
634 	case MHI_CB_FATAL_ERROR:
635 	case MHI_CB_SYS_ERROR:
636 		dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
637 		pm_runtime_forbid(&pdev->dev);
638 		break;
639 	case MHI_CB_EE_MISSION_MODE:
640 		pm_runtime_allow(&pdev->dev);
641 		break;
642 	default:
643 		break;
644 	}
645 }
646 
647 static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
648 {
649 	/* no-op */
650 }
651 
652 static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
653 {
654 	/* no-op */
655 }
656 
657 static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
658 {
659 	/* no-op */
660 }
661 
662 static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
663 {
664 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
665 	u16 vendor = 0;
666 
667 	if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
668 		return false;
669 
670 	if (vendor == (u16) ~0 || vendor == 0)
671 		return false;
672 
673 	return true;
674 }
675 
676 static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
677 			 unsigned int bar_num, u64 dma_mask)
678 {
679 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
680 	int err;
681 
682 	err = pci_assign_resource(pdev, bar_num);
683 	if (err)
684 		return err;
685 
686 	err = pcim_enable_device(pdev);
687 	if (err) {
688 		dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
689 		return err;
690 	}
691 
692 	err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
693 	if (err) {
694 		dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
695 		return err;
696 	}
697 	mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
698 	mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
699 
700 	err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
701 	if (err) {
702 		dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
703 		return err;
704 	}
705 
706 	pci_set_master(pdev);
707 
708 	return 0;
709 }
710 
711 static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
712 			    const struct mhi_controller_config *mhi_cntrl_config)
713 {
714 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
715 	int nr_vectors, i;
716 	int *irq;
717 
718 	/*
719 	 * Alloc one MSI vector for BHI + one vector per event ring, ideally...
720 	 * No explicit pci_free_irq_vectors required, done by pcim_release.
721 	 */
722 	mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
723 
724 	nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
725 	if (nr_vectors < 0) {
726 		dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
727 			nr_vectors);
728 		return nr_vectors;
729 	}
730 
731 	if (nr_vectors < mhi_cntrl->nr_irqs) {
732 		dev_warn(&pdev->dev, "using shared MSI\n");
733 
734 		/* Patch msi vectors, use only one (shared) */
735 		for (i = 0; i < mhi_cntrl_config->num_events; i++)
736 			mhi_cntrl_config->event_cfg[i].irq = 0;
737 		mhi_cntrl->nr_irqs = 1;
738 	}
739 
740 	irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
741 	if (!irq)
742 		return -ENOMEM;
743 
744 	for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
745 		int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
746 
747 		irq[i] = pci_irq_vector(pdev, vector);
748 	}
749 
750 	mhi_cntrl->irq = irq;
751 
752 	return 0;
753 }
754 
755 static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
756 {
757 	/* The runtime_get() MHI callback means:
758 	 *    Do whatever is requested to leave M3.
759 	 */
760 	return pm_runtime_get(mhi_cntrl->cntrl_dev);
761 }
762 
763 static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
764 {
765 	/* The runtime_put() MHI callback means:
766 	 *    Device can be moved in M3 state.
767 	 */
768 	pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
769 	pm_runtime_put(mhi_cntrl->cntrl_dev);
770 }
771 
772 static void mhi_pci_recovery_work(struct work_struct *work)
773 {
774 	struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
775 						       recovery_work);
776 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
777 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
778 	int err;
779 
780 	dev_warn(&pdev->dev, "device recovery started\n");
781 
782 	del_timer(&mhi_pdev->health_check_timer);
783 	pm_runtime_forbid(&pdev->dev);
784 
785 	/* Clean up MHI state */
786 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
787 		mhi_power_down(mhi_cntrl, false);
788 		mhi_unprepare_after_power_down(mhi_cntrl);
789 	}
790 
791 	pci_set_power_state(pdev, PCI_D0);
792 	pci_load_saved_state(pdev, mhi_pdev->pci_state);
793 	pci_restore_state(pdev);
794 
795 	if (!mhi_pci_is_alive(mhi_cntrl))
796 		goto err_try_reset;
797 
798 	err = mhi_prepare_for_power_up(mhi_cntrl);
799 	if (err)
800 		goto err_try_reset;
801 
802 	err = mhi_sync_power_up(mhi_cntrl);
803 	if (err)
804 		goto err_unprepare;
805 
806 	dev_dbg(&pdev->dev, "Recovery completed\n");
807 
808 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
809 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
810 	return;
811 
812 err_unprepare:
813 	mhi_unprepare_after_power_down(mhi_cntrl);
814 err_try_reset:
815 	if (pci_reset_function(pdev))
816 		dev_err(&pdev->dev, "Recovery failed\n");
817 }
818 
819 static void health_check(struct timer_list *t)
820 {
821 	struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
822 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
823 
824 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
825 			test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
826 		return;
827 
828 	if (!mhi_pci_is_alive(mhi_cntrl)) {
829 		dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
830 		queue_work(system_long_wq, &mhi_pdev->recovery_work);
831 		return;
832 	}
833 
834 	/* reschedule in two seconds */
835 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
836 }
837 
838 static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
839 {
840 	const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
841 	const struct mhi_controller_config *mhi_cntrl_config;
842 	struct mhi_pci_device *mhi_pdev;
843 	struct mhi_controller *mhi_cntrl;
844 	int err;
845 
846 	dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
847 
848 	/* mhi_pdev.mhi_cntrl must be zero-initialized */
849 	mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
850 	if (!mhi_pdev)
851 		return -ENOMEM;
852 
853 	INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
854 	timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
855 
856 	mhi_cntrl_config = info->config;
857 	mhi_cntrl = &mhi_pdev->mhi_cntrl;
858 
859 	mhi_cntrl->cntrl_dev = &pdev->dev;
860 	mhi_cntrl->iova_start = 0;
861 	mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
862 	mhi_cntrl->fw_image = info->fw;
863 	mhi_cntrl->edl_image = info->edl;
864 
865 	mhi_cntrl->read_reg = mhi_pci_read_reg;
866 	mhi_cntrl->write_reg = mhi_pci_write_reg;
867 	mhi_cntrl->status_cb = mhi_pci_status_cb;
868 	mhi_cntrl->runtime_get = mhi_pci_runtime_get;
869 	mhi_cntrl->runtime_put = mhi_pci_runtime_put;
870 	mhi_cntrl->mru = info->mru_default;
871 
872 	if (info->sideband_wake) {
873 		mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
874 		mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
875 		mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
876 	}
877 
878 	err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
879 	if (err)
880 		return err;
881 
882 	err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
883 	if (err)
884 		return err;
885 
886 	pci_set_drvdata(pdev, mhi_pdev);
887 
888 	/* Have stored pci confspace at hand for restore in sudden PCI error.
889 	 * cache the state locally and discard the PCI core one.
890 	 */
891 	pci_save_state(pdev);
892 	mhi_pdev->pci_state = pci_store_saved_state(pdev);
893 	pci_load_saved_state(pdev, NULL);
894 
895 	pci_enable_pcie_error_reporting(pdev);
896 
897 	err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
898 	if (err)
899 		goto err_disable_reporting;
900 
901 	/* MHI bus does not power up the controller by default */
902 	err = mhi_prepare_for_power_up(mhi_cntrl);
903 	if (err) {
904 		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
905 		goto err_unregister;
906 	}
907 
908 	err = mhi_sync_power_up(mhi_cntrl);
909 	if (err) {
910 		dev_err(&pdev->dev, "failed to power up MHI controller\n");
911 		goto err_unprepare;
912 	}
913 
914 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
915 
916 	/* start health check */
917 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
918 
919 	/* Only allow runtime-suspend if PME capable (for wakeup) */
920 	if (pci_pme_capable(pdev, PCI_D3hot)) {
921 		pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
922 		pm_runtime_use_autosuspend(&pdev->dev);
923 		pm_runtime_mark_last_busy(&pdev->dev);
924 		pm_runtime_put_noidle(&pdev->dev);
925 	}
926 
927 	return 0;
928 
929 err_unprepare:
930 	mhi_unprepare_after_power_down(mhi_cntrl);
931 err_unregister:
932 	mhi_unregister_controller(mhi_cntrl);
933 err_disable_reporting:
934 	pci_disable_pcie_error_reporting(pdev);
935 
936 	return err;
937 }
938 
939 static void mhi_pci_remove(struct pci_dev *pdev)
940 {
941 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
942 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
943 
944 	del_timer_sync(&mhi_pdev->health_check_timer);
945 	cancel_work_sync(&mhi_pdev->recovery_work);
946 
947 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
948 		mhi_power_down(mhi_cntrl, true);
949 		mhi_unprepare_after_power_down(mhi_cntrl);
950 	}
951 
952 	/* balancing probe put_noidle */
953 	if (pci_pme_capable(pdev, PCI_D3hot))
954 		pm_runtime_get_noresume(&pdev->dev);
955 
956 	mhi_unregister_controller(mhi_cntrl);
957 	pci_disable_pcie_error_reporting(pdev);
958 }
959 
960 static void mhi_pci_shutdown(struct pci_dev *pdev)
961 {
962 	mhi_pci_remove(pdev);
963 	pci_set_power_state(pdev, PCI_D3hot);
964 }
965 
966 static void mhi_pci_reset_prepare(struct pci_dev *pdev)
967 {
968 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
969 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
970 
971 	dev_info(&pdev->dev, "reset\n");
972 
973 	del_timer(&mhi_pdev->health_check_timer);
974 
975 	/* Clean up MHI state */
976 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
977 		mhi_power_down(mhi_cntrl, false);
978 		mhi_unprepare_after_power_down(mhi_cntrl);
979 	}
980 
981 	/* cause internal device reset */
982 	mhi_soc_reset(mhi_cntrl);
983 
984 	/* Be sure device reset has been executed */
985 	msleep(MHI_POST_RESET_DELAY_MS);
986 }
987 
988 static void mhi_pci_reset_done(struct pci_dev *pdev)
989 {
990 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
991 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
992 	int err;
993 
994 	/* Restore initial known working PCI state */
995 	pci_load_saved_state(pdev, mhi_pdev->pci_state);
996 	pci_restore_state(pdev);
997 
998 	/* Is device status available ? */
999 	if (!mhi_pci_is_alive(mhi_cntrl)) {
1000 		dev_err(&pdev->dev, "reset failed\n");
1001 		return;
1002 	}
1003 
1004 	err = mhi_prepare_for_power_up(mhi_cntrl);
1005 	if (err) {
1006 		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
1007 		return;
1008 	}
1009 
1010 	err = mhi_sync_power_up(mhi_cntrl);
1011 	if (err) {
1012 		dev_err(&pdev->dev, "failed to power up MHI controller\n");
1013 		mhi_unprepare_after_power_down(mhi_cntrl);
1014 		return;
1015 	}
1016 
1017 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
1018 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1019 }
1020 
1021 static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
1022 					       pci_channel_state_t state)
1023 {
1024 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1025 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1026 
1027 	dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
1028 
1029 	if (state == pci_channel_io_perm_failure)
1030 		return PCI_ERS_RESULT_DISCONNECT;
1031 
1032 	/* Clean up MHI state */
1033 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1034 		mhi_power_down(mhi_cntrl, false);
1035 		mhi_unprepare_after_power_down(mhi_cntrl);
1036 	} else {
1037 		/* Nothing to do */
1038 		return PCI_ERS_RESULT_RECOVERED;
1039 	}
1040 
1041 	pci_disable_device(pdev);
1042 
1043 	return PCI_ERS_RESULT_NEED_RESET;
1044 }
1045 
1046 static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
1047 {
1048 	if (pci_enable_device(pdev)) {
1049 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
1050 		return PCI_ERS_RESULT_DISCONNECT;
1051 	}
1052 
1053 	return PCI_ERS_RESULT_RECOVERED;
1054 }
1055 
1056 static void mhi_pci_io_resume(struct pci_dev *pdev)
1057 {
1058 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1059 
1060 	dev_err(&pdev->dev, "PCI slot reset done\n");
1061 
1062 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1063 }
1064 
1065 static const struct pci_error_handlers mhi_pci_err_handler = {
1066 	.error_detected = mhi_pci_error_detected,
1067 	.slot_reset = mhi_pci_slot_reset,
1068 	.resume = mhi_pci_io_resume,
1069 	.reset_prepare = mhi_pci_reset_prepare,
1070 	.reset_done = mhi_pci_reset_done,
1071 };
1072 
1073 static int  __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
1074 {
1075 	struct pci_dev *pdev = to_pci_dev(dev);
1076 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1077 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1078 	int err;
1079 
1080 	if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1081 		return 0;
1082 
1083 	del_timer(&mhi_pdev->health_check_timer);
1084 	cancel_work_sync(&mhi_pdev->recovery_work);
1085 
1086 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1087 			mhi_cntrl->ee != MHI_EE_AMSS)
1088 		goto pci_suspend; /* Nothing to do at MHI level */
1089 
1090 	/* Transition to M3 state */
1091 	err = mhi_pm_suspend(mhi_cntrl);
1092 	if (err) {
1093 		dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
1094 		clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
1095 		return -EBUSY;
1096 	}
1097 
1098 pci_suspend:
1099 	pci_disable_device(pdev);
1100 	pci_wake_from_d3(pdev, true);
1101 
1102 	return 0;
1103 }
1104 
1105 static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
1106 {
1107 	struct pci_dev *pdev = to_pci_dev(dev);
1108 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1109 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1110 	int err;
1111 
1112 	if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1113 		return 0;
1114 
1115 	err = pci_enable_device(pdev);
1116 	if (err)
1117 		goto err_recovery;
1118 
1119 	pci_set_master(pdev);
1120 	pci_wake_from_d3(pdev, false);
1121 
1122 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1123 			mhi_cntrl->ee != MHI_EE_AMSS)
1124 		return 0; /* Nothing to do at MHI level */
1125 
1126 	/* Exit M3, transition to M0 state */
1127 	err = mhi_pm_resume(mhi_cntrl);
1128 	if (err) {
1129 		dev_err(&pdev->dev, "failed to resume device: %d\n", err);
1130 		goto err_recovery;
1131 	}
1132 
1133 	/* Resume health check */
1134 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1135 
1136 	/* It can be a remote wakeup (no mhi runtime_get), update access time */
1137 	pm_runtime_mark_last_busy(dev);
1138 
1139 	return 0;
1140 
1141 err_recovery:
1142 	/* Do not fail to not mess up our PCI device state, the device likely
1143 	 * lost power (d3cold) and we simply need to reset it from the recovery
1144 	 * procedure, trigger the recovery asynchronously to prevent system
1145 	 * suspend exit delaying.
1146 	 */
1147 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1148 	pm_runtime_mark_last_busy(dev);
1149 
1150 	return 0;
1151 }
1152 
1153 static int  __maybe_unused mhi_pci_suspend(struct device *dev)
1154 {
1155 	pm_runtime_disable(dev);
1156 	return mhi_pci_runtime_suspend(dev);
1157 }
1158 
1159 static int __maybe_unused mhi_pci_resume(struct device *dev)
1160 {
1161 	int ret;
1162 
1163 	/* Depending the platform, device may have lost power (d3cold), we need
1164 	 * to resume it now to check its state and recover when necessary.
1165 	 */
1166 	ret = mhi_pci_runtime_resume(dev);
1167 	pm_runtime_enable(dev);
1168 
1169 	return ret;
1170 }
1171 
1172 static int __maybe_unused mhi_pci_freeze(struct device *dev)
1173 {
1174 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1175 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1176 
1177 	/* We want to stop all operations, hibernation does not guarantee that
1178 	 * device will be in the same state as before freezing, especially if
1179 	 * the intermediate restore kernel reinitializes MHI device with new
1180 	 * context.
1181 	 */
1182 	flush_work(&mhi_pdev->recovery_work);
1183 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1184 		mhi_power_down(mhi_cntrl, true);
1185 		mhi_unprepare_after_power_down(mhi_cntrl);
1186 	}
1187 
1188 	return 0;
1189 }
1190 
1191 static int __maybe_unused mhi_pci_restore(struct device *dev)
1192 {
1193 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1194 
1195 	/* Reinitialize the device */
1196 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1197 
1198 	return 0;
1199 }
1200 
1201 static const struct dev_pm_ops mhi_pci_pm_ops = {
1202 	SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
1203 #ifdef CONFIG_PM_SLEEP
1204 	.suspend = mhi_pci_suspend,
1205 	.resume = mhi_pci_resume,
1206 	.freeze = mhi_pci_freeze,
1207 	.thaw = mhi_pci_restore,
1208 	.poweroff = mhi_pci_freeze,
1209 	.restore = mhi_pci_restore,
1210 #endif
1211 };
1212 
1213 static struct pci_driver mhi_pci_driver = {
1214 	.name		= "mhi-pci-generic",
1215 	.id_table	= mhi_pci_id_table,
1216 	.probe		= mhi_pci_probe,
1217 	.remove		= mhi_pci_remove,
1218 	.shutdown	= mhi_pci_shutdown,
1219 	.err_handler	= &mhi_pci_err_handler,
1220 	.driver.pm	= &mhi_pci_pm_ops
1221 };
1222 module_pci_driver(mhi_pci_driver);
1223 
1224 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
1225 MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
1226 MODULE_LICENSE("GPL");
1227