xref: /openbmc/linux/drivers/bus/mhi/host/pci_generic.c (revision cf41d18b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * MHI PCI driver - MHI over PCI controller driver
4  *
5  * This module is a generic driver for registering MHI-over-PCI devices,
6  * such as PCIe QCOM modems.
7  *
8  * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
9  */
10 
11 #include <linux/aer.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/mhi.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/timer.h>
19 #include <linux/workqueue.h>
20 
21 #define MHI_PCI_DEFAULT_BAR_NUM 0
22 
23 #define MHI_POST_RESET_DELAY_MS 2000
24 
25 #define HEALTH_CHECK_PERIOD (HZ * 2)
26 
27 /* PCI VID definitions */
28 #define PCI_VENDOR_ID_THALES	0x1269
29 #define PCI_VENDOR_ID_QUECTEL	0x1eac
30 
31 /**
32  * struct mhi_pci_dev_info - MHI PCI device specific information
33  * @config: MHI controller configuration
34  * @name: name of the PCI module
35  * @fw: firmware path (if any)
36  * @edl: emergency download mode firmware path (if any)
37  * @bar_num: PCI base address register to use for MHI MMIO register space
38  * @dma_data_width: DMA transfer word size (32 or 64 bits)
39  * @mru_default: default MRU size for MBIM network packets
40  * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
41  *		   of inband wake support (such as sdx24)
42  */
43 struct mhi_pci_dev_info {
44 	const struct mhi_controller_config *config;
45 	const char *name;
46 	const char *fw;
47 	const char *edl;
48 	unsigned int bar_num;
49 	unsigned int dma_data_width;
50 	unsigned int mru_default;
51 	bool sideband_wake;
52 };
53 
54 #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
55 	{						\
56 		.num = ch_num,				\
57 		.name = ch_name,			\
58 		.num_elements = el_count,		\
59 		.event_ring = ev_ring,			\
60 		.dir = DMA_TO_DEVICE,			\
61 		.ee_mask = BIT(MHI_EE_AMSS),		\
62 		.pollcfg = 0,				\
63 		.doorbell = MHI_DB_BRST_DISABLE,	\
64 		.lpm_notify = false,			\
65 		.offload_channel = false,		\
66 		.doorbell_mode_switch = false,		\
67 	}						\
68 
69 #define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
70 	{						\
71 		.num = ch_num,				\
72 		.name = ch_name,			\
73 		.num_elements = el_count,		\
74 		.event_ring = ev_ring,			\
75 		.dir = DMA_FROM_DEVICE,			\
76 		.ee_mask = BIT(MHI_EE_AMSS),		\
77 		.pollcfg = 0,				\
78 		.doorbell = MHI_DB_BRST_DISABLE,	\
79 		.lpm_notify = false,			\
80 		.offload_channel = false,		\
81 		.doorbell_mode_switch = false,		\
82 	}
83 
84 #define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
85 	{						\
86 		.num = ch_num,				\
87 		.name = ch_name,			\
88 		.num_elements = el_count,		\
89 		.event_ring = ev_ring,			\
90 		.dir = DMA_FROM_DEVICE,			\
91 		.ee_mask = BIT(MHI_EE_AMSS),		\
92 		.pollcfg = 0,				\
93 		.doorbell = MHI_DB_BRST_DISABLE,	\
94 		.lpm_notify = false,			\
95 		.offload_channel = false,		\
96 		.doorbell_mode_switch = false,		\
97 		.auto_queue = true,			\
98 	}
99 
100 #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
101 	{					\
102 		.num_elements = el_count,	\
103 		.irq_moderation_ms = 0,		\
104 		.irq = (ev_ring) + 1,		\
105 		.priority = 1,			\
106 		.mode = MHI_DB_BRST_DISABLE,	\
107 		.data_type = MHI_ER_CTRL,	\
108 		.hardware_event = false,	\
109 		.client_managed = false,	\
110 		.offload_channel = false,	\
111 	}
112 
113 #define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
114 	{						\
115 		.num = ch_num,				\
116 		.name = ch_name,			\
117 		.num_elements = el_count,		\
118 		.event_ring = ev_ring,			\
119 		.dir = DMA_TO_DEVICE,			\
120 		.ee_mask = BIT(MHI_EE_AMSS),		\
121 		.pollcfg = 0,				\
122 		.doorbell = MHI_DB_BRST_ENABLE,	\
123 		.lpm_notify = false,			\
124 		.offload_channel = false,		\
125 		.doorbell_mode_switch = true,		\
126 	}						\
127 
128 #define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
129 	{						\
130 		.num = ch_num,				\
131 		.name = ch_name,			\
132 		.num_elements = el_count,		\
133 		.event_ring = ev_ring,			\
134 		.dir = DMA_FROM_DEVICE,			\
135 		.ee_mask = BIT(MHI_EE_AMSS),		\
136 		.pollcfg = 0,				\
137 		.doorbell = MHI_DB_BRST_ENABLE,	\
138 		.lpm_notify = false,			\
139 		.offload_channel = false,		\
140 		.doorbell_mode_switch = true,		\
141 	}
142 
143 #define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
144 	{						\
145 		.num = ch_num,				\
146 		.name = ch_name,			\
147 		.num_elements = el_count,		\
148 		.event_ring = ev_ring,			\
149 		.dir = DMA_TO_DEVICE,			\
150 		.ee_mask = BIT(MHI_EE_SBL),		\
151 		.pollcfg = 0,				\
152 		.doorbell = MHI_DB_BRST_DISABLE,	\
153 		.lpm_notify = false,			\
154 		.offload_channel = false,		\
155 		.doorbell_mode_switch = false,		\
156 	}						\
157 
158 #define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
159 	{						\
160 		.num = ch_num,				\
161 		.name = ch_name,			\
162 		.num_elements = el_count,		\
163 		.event_ring = ev_ring,			\
164 		.dir = DMA_FROM_DEVICE,			\
165 		.ee_mask = BIT(MHI_EE_SBL),		\
166 		.pollcfg = 0,				\
167 		.doorbell = MHI_DB_BRST_DISABLE,	\
168 		.lpm_notify = false,			\
169 		.offload_channel = false,		\
170 		.doorbell_mode_switch = false,		\
171 	}
172 
173 #define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
174 	{						\
175 		.num = ch_num,				\
176 		.name = ch_name,			\
177 		.num_elements = el_count,		\
178 		.event_ring = ev_ring,			\
179 		.dir = DMA_TO_DEVICE,			\
180 		.ee_mask = BIT(MHI_EE_FP),		\
181 		.pollcfg = 0,				\
182 		.doorbell = MHI_DB_BRST_DISABLE,	\
183 		.lpm_notify = false,			\
184 		.offload_channel = false,		\
185 		.doorbell_mode_switch = false,		\
186 	}						\
187 
188 #define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
189 	{						\
190 		.num = ch_num,				\
191 		.name = ch_name,			\
192 		.num_elements = el_count,		\
193 		.event_ring = ev_ring,			\
194 		.dir = DMA_FROM_DEVICE,			\
195 		.ee_mask = BIT(MHI_EE_FP),		\
196 		.pollcfg = 0,				\
197 		.doorbell = MHI_DB_BRST_DISABLE,	\
198 		.lpm_notify = false,			\
199 		.offload_channel = false,		\
200 		.doorbell_mode_switch = false,		\
201 	}
202 
203 #define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
204 	{					\
205 		.num_elements = el_count,	\
206 		.irq_moderation_ms = 5,		\
207 		.irq = (ev_ring) + 1,		\
208 		.priority = 1,			\
209 		.mode = MHI_DB_BRST_DISABLE,	\
210 		.data_type = MHI_ER_DATA,	\
211 		.hardware_event = false,	\
212 		.client_managed = false,	\
213 		.offload_channel = false,	\
214 	}
215 
216 #define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
217 	{					\
218 		.num_elements = el_count,	\
219 		.irq_moderation_ms = 1,		\
220 		.irq = (ev_ring) + 1,		\
221 		.priority = 1,			\
222 		.mode = MHI_DB_BRST_DISABLE,	\
223 		.data_type = MHI_ER_DATA,	\
224 		.hardware_event = true,		\
225 		.client_managed = false,	\
226 		.offload_channel = false,	\
227 		.channel = ch_num,		\
228 	}
229 
230 static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
231 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
232 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
233 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
234 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
235 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
236 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
237 	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
238 	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
239 	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
240 	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
241 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
242 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
243 };
244 
245 static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
246 	/* first ring is control+data ring */
247 	MHI_EVENT_CONFIG_CTRL(0, 64),
248 	/* DIAG dedicated event ring */
249 	MHI_EVENT_CONFIG_DATA(1, 128),
250 	/* Hardware channels request dedicated hardware event rings */
251 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
252 	MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
253 };
254 
255 static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
256 	.max_channels = 128,
257 	.timeout_ms = 8000,
258 	.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
259 	.ch_cfg = modem_qcom_v1_mhi_channels,
260 	.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
261 	.event_cfg = modem_qcom_v1_mhi_events,
262 };
263 
264 static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
265 	.name = "qcom-sdx65m",
266 	.fw = "qcom/sdx65m/xbl.elf",
267 	.edl = "qcom/sdx65m/edl.mbn",
268 	.config = &modem_qcom_v1_mhiv_config,
269 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
270 	.dma_data_width = 32,
271 	.sideband_wake = false,
272 };
273 
274 static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
275 	.name = "qcom-sdx55m",
276 	.fw = "qcom/sdx55m/sbl1.mbn",
277 	.edl = "qcom/sdx55m/edl.mbn",
278 	.config = &modem_qcom_v1_mhiv_config,
279 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
280 	.dma_data_width = 32,
281 	.mru_default = 32768,
282 	.sideband_wake = false,
283 };
284 
285 static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
286 	.name = "qcom-sdx24",
287 	.edl = "qcom/prog_firehose_sdx24.mbn",
288 	.config = &modem_qcom_v1_mhiv_config,
289 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
290 	.dma_data_width = 32,
291 	.sideband_wake = true,
292 };
293 
294 static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
295 	MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
296 	MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
297 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
298 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
299 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
300 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
301 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
302 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
303 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
304 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
305 	/* The EDL firmware is a flash-programmer exposing firehose protocol */
306 	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
307 	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
308 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
309 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
310 };
311 
312 static struct mhi_event_config mhi_quectel_em1xx_events[] = {
313 	MHI_EVENT_CONFIG_CTRL(0, 128),
314 	MHI_EVENT_CONFIG_DATA(1, 128),
315 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
316 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
317 };
318 
319 static const struct mhi_controller_config modem_quectel_em1xx_config = {
320 	.max_channels = 128,
321 	.timeout_ms = 20000,
322 	.num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
323 	.ch_cfg = mhi_quectel_em1xx_channels,
324 	.num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
325 	.event_cfg = mhi_quectel_em1xx_events,
326 };
327 
328 static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
329 	.name = "quectel-em1xx",
330 	.edl = "qcom/prog_firehose_sdx24.mbn",
331 	.config = &modem_quectel_em1xx_config,
332 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
333 	.dma_data_width = 32,
334 	.mru_default = 32768,
335 	.sideband_wake = true,
336 };
337 
338 static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
339 	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
340 	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
341 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
342 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
343 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
344 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
345 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
346 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
347 	MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
348 	MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
349 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
350 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
351 };
352 
353 static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
354 	MHI_EVENT_CONFIG_CTRL(0, 128),
355 	MHI_EVENT_CONFIG_DATA(1, 128),
356 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
357 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
358 };
359 
360 static const struct mhi_controller_config modem_foxconn_sdx55_config = {
361 	.max_channels = 128,
362 	.timeout_ms = 20000,
363 	.num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
364 	.ch_cfg = mhi_foxconn_sdx55_channels,
365 	.num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
366 	.event_cfg = mhi_foxconn_sdx55_events,
367 };
368 
369 static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
370 	.name = "foxconn-sdx55",
371 	.fw = "qcom/sdx55m/sbl1.mbn",
372 	.edl = "qcom/sdx55m/edl.mbn",
373 	.config = &modem_foxconn_sdx55_config,
374 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
375 	.dma_data_width = 32,
376 	.mru_default = 32768,
377 	.sideband_wake = false,
378 };
379 
380 static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
381 	.name = "foxconn-sdx65",
382 	.config = &modem_foxconn_sdx55_config,
383 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
384 	.dma_data_width = 32,
385 	.mru_default = 32768,
386 	.sideband_wake = false,
387 };
388 
389 static const struct mhi_channel_config mhi_mv3x_channels[] = {
390 	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
391 	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
392 	/* MBIM Control Channel */
393 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0),
394 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0),
395 	/* MBIM Data Channel */
396 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2),
397 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
398 };
399 
400 static struct mhi_event_config mhi_mv3x_events[] = {
401 	MHI_EVENT_CONFIG_CTRL(0, 256),
402 	MHI_EVENT_CONFIG_DATA(1, 256),
403 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
404 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
405 };
406 
407 static const struct mhi_controller_config modem_mv3x_config = {
408 	.max_channels = 128,
409 	.timeout_ms = 20000,
410 	.num_channels = ARRAY_SIZE(mhi_mv3x_channels),
411 	.ch_cfg = mhi_mv3x_channels,
412 	.num_events = ARRAY_SIZE(mhi_mv3x_events),
413 	.event_cfg = mhi_mv3x_events,
414 };
415 
416 static const struct mhi_pci_dev_info mhi_mv31_info = {
417 	.name = "cinterion-mv31",
418 	.config = &modem_mv3x_config,
419 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
420 	.dma_data_width = 32,
421 	.mru_default = 32768,
422 };
423 
424 static const struct mhi_pci_dev_info mhi_mv32_info = {
425 	.name = "cinterion-mv32",
426 	.config = &modem_mv3x_config,
427 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
428 	.dma_data_width = 32,
429 	.mru_default = 32768,
430 };
431 
432 static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
433 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
434 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
435 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
436 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
437 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
438 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
439 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
440 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
441 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
442 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
443 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
444 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
445 };
446 
447 static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
448 	/* first ring is control+data and DIAG ring */
449 	MHI_EVENT_CONFIG_CTRL(0, 2048),
450 	/* Hardware channels request dedicated hardware event rings */
451 	MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
452 	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
453 };
454 
455 static const struct mhi_controller_config modem_sierra_em919x_config = {
456 	.max_channels = 128,
457 	.timeout_ms = 24000,
458 	.num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
459 	.ch_cfg = mhi_sierra_em919x_channels,
460 	.num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
461 	.event_cfg = modem_sierra_em919x_mhi_events,
462 };
463 
464 static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
465 	.name = "sierra-em919x",
466 	.config = &modem_sierra_em919x_config,
467 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
468 	.dma_data_width = 32,
469 	.sideband_wake = false,
470 };
471 
472 static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
473 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
474 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
475 	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
476 	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
477 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
478 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
479 };
480 
481 static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
482 	MHI_EVENT_CONFIG_CTRL(0, 128),
483 	MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
484 	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
485 };
486 
487 static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
488 	.max_channels = 128,
489 	.timeout_ms = 20000,
490 	.num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
491 	.ch_cfg = mhi_telit_fn980_hw_v1_channels,
492 	.num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
493 	.event_cfg = mhi_telit_fn980_hw_v1_events,
494 };
495 
496 static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
497 	.name = "telit-fn980-hwv1",
498 	.fw = "qcom/sdx55m/sbl1.mbn",
499 	.edl = "qcom/sdx55m/edl.mbn",
500 	.config = &modem_telit_fn980_hw_v1_config,
501 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
502 	.dma_data_width = 32,
503 	.mru_default = 32768,
504 	.sideband_wake = false,
505 };
506 
507 static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
508 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
509 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
510 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
511 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
512 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
513 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
514 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
515 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
516 	MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
517 	MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
518 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
519 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
520 };
521 
522 static struct mhi_event_config mhi_telit_fn990_events[] = {
523 	MHI_EVENT_CONFIG_CTRL(0, 128),
524 	MHI_EVENT_CONFIG_DATA(1, 128),
525 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
526 	MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
527 };
528 
529 static const struct mhi_controller_config modem_telit_fn990_config = {
530 	.max_channels = 128,
531 	.timeout_ms = 20000,
532 	.num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
533 	.ch_cfg = mhi_telit_fn990_channels,
534 	.num_events = ARRAY_SIZE(mhi_telit_fn990_events),
535 	.event_cfg = mhi_telit_fn990_events,
536 };
537 
538 static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
539 	.name = "telit-fn990",
540 	.config = &modem_telit_fn990_config,
541 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
542 	.dma_data_width = 32,
543 	.sideband_wake = false,
544 	.mru_default = 32768,
545 };
546 
547 /* Keep the list sorted based on the PID. New VID should be added as the last entry */
548 static const struct pci_device_id mhi_pci_id_table[] = {
549 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
550 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
551 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
552 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
553 	/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
554 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
555 		.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
556 	/* Telit FN980 hardware revision v1 */
557 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
558 		.driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
559 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
560 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
561 	/* Telit FN990 */
562 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
563 		.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
564 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
565 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
566 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
567 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
568 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
569 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
570 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */
571 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
572 	/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
573 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
574 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
575 	/* DW5930e (sdx55), With eSIM, It's also T99W175 */
576 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
577 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
578 	/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
579 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
580 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
581 	/* T99W175 (sdx55), Based on Qualcomm new baseline */
582 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
583 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
584 	/* T99W175 (sdx55) */
585 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
586 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
587 	/* T99W368 (sdx65) */
588 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
589 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
590 	/* T99W373 (sdx62) */
591 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
592 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
593 	/* MV31-W (Cinterion) */
594 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
595 		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
596 	/* MV31-W (Cinterion), based on new baseline */
597 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b4),
598 		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
599 	/* MV32-WA (Cinterion) */
600 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00ba),
601 		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
602 	/* MV32-WB (Cinterion) */
603 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00bb),
604 		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
605 	/* T99W175 (sdx55), HP variant */
606 	{ PCI_DEVICE(0x03f0, 0x0a6c),
607 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
608 	{  }
609 };
610 MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
611 
612 enum mhi_pci_device_status {
613 	MHI_PCI_DEV_STARTED,
614 	MHI_PCI_DEV_SUSPENDED,
615 };
616 
617 struct mhi_pci_device {
618 	struct mhi_controller mhi_cntrl;
619 	struct pci_saved_state *pci_state;
620 	struct work_struct recovery_work;
621 	struct timer_list health_check_timer;
622 	unsigned long status;
623 };
624 
625 static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
626 			    void __iomem *addr, u32 *out)
627 {
628 	*out = readl(addr);
629 	return 0;
630 }
631 
632 static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
633 			      void __iomem *addr, u32 val)
634 {
635 	writel(val, addr);
636 }
637 
638 static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
639 			      enum mhi_callback cb)
640 {
641 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
642 
643 	/* Nothing to do for now */
644 	switch (cb) {
645 	case MHI_CB_FATAL_ERROR:
646 	case MHI_CB_SYS_ERROR:
647 		dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
648 		pm_runtime_forbid(&pdev->dev);
649 		break;
650 	case MHI_CB_EE_MISSION_MODE:
651 		pm_runtime_allow(&pdev->dev);
652 		break;
653 	default:
654 		break;
655 	}
656 }
657 
658 static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
659 {
660 	/* no-op */
661 }
662 
663 static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
664 {
665 	/* no-op */
666 }
667 
668 static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
669 {
670 	/* no-op */
671 }
672 
673 static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
674 {
675 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
676 	u16 vendor = 0;
677 
678 	if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
679 		return false;
680 
681 	if (vendor == (u16) ~0 || vendor == 0)
682 		return false;
683 
684 	return true;
685 }
686 
687 static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
688 			 unsigned int bar_num, u64 dma_mask)
689 {
690 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
691 	int err;
692 
693 	err = pci_assign_resource(pdev, bar_num);
694 	if (err)
695 		return err;
696 
697 	err = pcim_enable_device(pdev);
698 	if (err) {
699 		dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
700 		return err;
701 	}
702 
703 	err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
704 	if (err) {
705 		dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
706 		return err;
707 	}
708 	mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
709 	mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
710 
711 	err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
712 	if (err) {
713 		dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
714 		return err;
715 	}
716 
717 	pci_set_master(pdev);
718 
719 	return 0;
720 }
721 
722 static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
723 			    const struct mhi_controller_config *mhi_cntrl_config)
724 {
725 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
726 	int nr_vectors, i;
727 	int *irq;
728 
729 	/*
730 	 * Alloc one MSI vector for BHI + one vector per event ring, ideally...
731 	 * No explicit pci_free_irq_vectors required, done by pcim_release.
732 	 */
733 	mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
734 
735 	nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
736 	if (nr_vectors < 0) {
737 		dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
738 			nr_vectors);
739 		return nr_vectors;
740 	}
741 
742 	if (nr_vectors < mhi_cntrl->nr_irqs) {
743 		dev_warn(&pdev->dev, "using shared MSI\n");
744 
745 		/* Patch msi vectors, use only one (shared) */
746 		for (i = 0; i < mhi_cntrl_config->num_events; i++)
747 			mhi_cntrl_config->event_cfg[i].irq = 0;
748 		mhi_cntrl->nr_irqs = 1;
749 	}
750 
751 	irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
752 	if (!irq)
753 		return -ENOMEM;
754 
755 	for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
756 		int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
757 
758 		irq[i] = pci_irq_vector(pdev, vector);
759 	}
760 
761 	mhi_cntrl->irq = irq;
762 
763 	return 0;
764 }
765 
766 static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
767 {
768 	/* The runtime_get() MHI callback means:
769 	 *    Do whatever is requested to leave M3.
770 	 */
771 	return pm_runtime_get(mhi_cntrl->cntrl_dev);
772 }
773 
774 static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
775 {
776 	/* The runtime_put() MHI callback means:
777 	 *    Device can be moved in M3 state.
778 	 */
779 	pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
780 	pm_runtime_put(mhi_cntrl->cntrl_dev);
781 }
782 
783 static void mhi_pci_recovery_work(struct work_struct *work)
784 {
785 	struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
786 						       recovery_work);
787 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
788 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
789 	int err;
790 
791 	dev_warn(&pdev->dev, "device recovery started\n");
792 
793 	del_timer(&mhi_pdev->health_check_timer);
794 	pm_runtime_forbid(&pdev->dev);
795 
796 	/* Clean up MHI state */
797 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
798 		mhi_power_down(mhi_cntrl, false);
799 		mhi_unprepare_after_power_down(mhi_cntrl);
800 	}
801 
802 	pci_set_power_state(pdev, PCI_D0);
803 	pci_load_saved_state(pdev, mhi_pdev->pci_state);
804 	pci_restore_state(pdev);
805 
806 	if (!mhi_pci_is_alive(mhi_cntrl))
807 		goto err_try_reset;
808 
809 	err = mhi_prepare_for_power_up(mhi_cntrl);
810 	if (err)
811 		goto err_try_reset;
812 
813 	err = mhi_sync_power_up(mhi_cntrl);
814 	if (err)
815 		goto err_unprepare;
816 
817 	dev_dbg(&pdev->dev, "Recovery completed\n");
818 
819 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
820 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
821 	return;
822 
823 err_unprepare:
824 	mhi_unprepare_after_power_down(mhi_cntrl);
825 err_try_reset:
826 	if (pci_reset_function(pdev))
827 		dev_err(&pdev->dev, "Recovery failed\n");
828 }
829 
830 static void health_check(struct timer_list *t)
831 {
832 	struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
833 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
834 
835 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
836 			test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
837 		return;
838 
839 	if (!mhi_pci_is_alive(mhi_cntrl)) {
840 		dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
841 		queue_work(system_long_wq, &mhi_pdev->recovery_work);
842 		return;
843 	}
844 
845 	/* reschedule in two seconds */
846 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
847 }
848 
849 static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
850 {
851 	const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
852 	const struct mhi_controller_config *mhi_cntrl_config;
853 	struct mhi_pci_device *mhi_pdev;
854 	struct mhi_controller *mhi_cntrl;
855 	int err;
856 
857 	dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
858 
859 	/* mhi_pdev.mhi_cntrl must be zero-initialized */
860 	mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
861 	if (!mhi_pdev)
862 		return -ENOMEM;
863 
864 	INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
865 	timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
866 
867 	mhi_cntrl_config = info->config;
868 	mhi_cntrl = &mhi_pdev->mhi_cntrl;
869 
870 	mhi_cntrl->cntrl_dev = &pdev->dev;
871 	mhi_cntrl->iova_start = 0;
872 	mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
873 	mhi_cntrl->fw_image = info->fw;
874 	mhi_cntrl->edl_image = info->edl;
875 
876 	mhi_cntrl->read_reg = mhi_pci_read_reg;
877 	mhi_cntrl->write_reg = mhi_pci_write_reg;
878 	mhi_cntrl->status_cb = mhi_pci_status_cb;
879 	mhi_cntrl->runtime_get = mhi_pci_runtime_get;
880 	mhi_cntrl->runtime_put = mhi_pci_runtime_put;
881 	mhi_cntrl->mru = info->mru_default;
882 
883 	if (info->sideband_wake) {
884 		mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
885 		mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
886 		mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
887 	}
888 
889 	err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
890 	if (err)
891 		return err;
892 
893 	err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
894 	if (err)
895 		return err;
896 
897 	pci_set_drvdata(pdev, mhi_pdev);
898 
899 	/* Have stored pci confspace at hand for restore in sudden PCI error.
900 	 * cache the state locally and discard the PCI core one.
901 	 */
902 	pci_save_state(pdev);
903 	mhi_pdev->pci_state = pci_store_saved_state(pdev);
904 	pci_load_saved_state(pdev, NULL);
905 
906 	pci_enable_pcie_error_reporting(pdev);
907 
908 	err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
909 	if (err)
910 		goto err_disable_reporting;
911 
912 	/* MHI bus does not power up the controller by default */
913 	err = mhi_prepare_for_power_up(mhi_cntrl);
914 	if (err) {
915 		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
916 		goto err_unregister;
917 	}
918 
919 	err = mhi_sync_power_up(mhi_cntrl);
920 	if (err) {
921 		dev_err(&pdev->dev, "failed to power up MHI controller\n");
922 		goto err_unprepare;
923 	}
924 
925 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
926 
927 	/* start health check */
928 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
929 
930 	/* Only allow runtime-suspend if PME capable (for wakeup) */
931 	if (pci_pme_capable(pdev, PCI_D3hot)) {
932 		pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
933 		pm_runtime_use_autosuspend(&pdev->dev);
934 		pm_runtime_mark_last_busy(&pdev->dev);
935 		pm_runtime_put_noidle(&pdev->dev);
936 	}
937 
938 	return 0;
939 
940 err_unprepare:
941 	mhi_unprepare_after_power_down(mhi_cntrl);
942 err_unregister:
943 	mhi_unregister_controller(mhi_cntrl);
944 err_disable_reporting:
945 	pci_disable_pcie_error_reporting(pdev);
946 
947 	return err;
948 }
949 
950 static void mhi_pci_remove(struct pci_dev *pdev)
951 {
952 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
953 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
954 
955 	del_timer_sync(&mhi_pdev->health_check_timer);
956 	cancel_work_sync(&mhi_pdev->recovery_work);
957 
958 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
959 		mhi_power_down(mhi_cntrl, true);
960 		mhi_unprepare_after_power_down(mhi_cntrl);
961 	}
962 
963 	/* balancing probe put_noidle */
964 	if (pci_pme_capable(pdev, PCI_D3hot))
965 		pm_runtime_get_noresume(&pdev->dev);
966 
967 	mhi_unregister_controller(mhi_cntrl);
968 	pci_disable_pcie_error_reporting(pdev);
969 }
970 
971 static void mhi_pci_shutdown(struct pci_dev *pdev)
972 {
973 	mhi_pci_remove(pdev);
974 	pci_set_power_state(pdev, PCI_D3hot);
975 }
976 
977 static void mhi_pci_reset_prepare(struct pci_dev *pdev)
978 {
979 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
980 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
981 
982 	dev_info(&pdev->dev, "reset\n");
983 
984 	del_timer(&mhi_pdev->health_check_timer);
985 
986 	/* Clean up MHI state */
987 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
988 		mhi_power_down(mhi_cntrl, false);
989 		mhi_unprepare_after_power_down(mhi_cntrl);
990 	}
991 
992 	/* cause internal device reset */
993 	mhi_soc_reset(mhi_cntrl);
994 
995 	/* Be sure device reset has been executed */
996 	msleep(MHI_POST_RESET_DELAY_MS);
997 }
998 
999 static void mhi_pci_reset_done(struct pci_dev *pdev)
1000 {
1001 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1002 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1003 	int err;
1004 
1005 	/* Restore initial known working PCI state */
1006 	pci_load_saved_state(pdev, mhi_pdev->pci_state);
1007 	pci_restore_state(pdev);
1008 
1009 	/* Is device status available ? */
1010 	if (!mhi_pci_is_alive(mhi_cntrl)) {
1011 		dev_err(&pdev->dev, "reset failed\n");
1012 		return;
1013 	}
1014 
1015 	err = mhi_prepare_for_power_up(mhi_cntrl);
1016 	if (err) {
1017 		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
1018 		return;
1019 	}
1020 
1021 	err = mhi_sync_power_up(mhi_cntrl);
1022 	if (err) {
1023 		dev_err(&pdev->dev, "failed to power up MHI controller\n");
1024 		mhi_unprepare_after_power_down(mhi_cntrl);
1025 		return;
1026 	}
1027 
1028 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
1029 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1030 }
1031 
1032 static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
1033 					       pci_channel_state_t state)
1034 {
1035 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1036 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1037 
1038 	dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
1039 
1040 	if (state == pci_channel_io_perm_failure)
1041 		return PCI_ERS_RESULT_DISCONNECT;
1042 
1043 	/* Clean up MHI state */
1044 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1045 		mhi_power_down(mhi_cntrl, false);
1046 		mhi_unprepare_after_power_down(mhi_cntrl);
1047 	} else {
1048 		/* Nothing to do */
1049 		return PCI_ERS_RESULT_RECOVERED;
1050 	}
1051 
1052 	pci_disable_device(pdev);
1053 
1054 	return PCI_ERS_RESULT_NEED_RESET;
1055 }
1056 
1057 static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
1058 {
1059 	if (pci_enable_device(pdev)) {
1060 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
1061 		return PCI_ERS_RESULT_DISCONNECT;
1062 	}
1063 
1064 	return PCI_ERS_RESULT_RECOVERED;
1065 }
1066 
1067 static void mhi_pci_io_resume(struct pci_dev *pdev)
1068 {
1069 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1070 
1071 	dev_err(&pdev->dev, "PCI slot reset done\n");
1072 
1073 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1074 }
1075 
1076 static const struct pci_error_handlers mhi_pci_err_handler = {
1077 	.error_detected = mhi_pci_error_detected,
1078 	.slot_reset = mhi_pci_slot_reset,
1079 	.resume = mhi_pci_io_resume,
1080 	.reset_prepare = mhi_pci_reset_prepare,
1081 	.reset_done = mhi_pci_reset_done,
1082 };
1083 
1084 static int  __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
1085 {
1086 	struct pci_dev *pdev = to_pci_dev(dev);
1087 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1088 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1089 	int err;
1090 
1091 	if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1092 		return 0;
1093 
1094 	del_timer(&mhi_pdev->health_check_timer);
1095 	cancel_work_sync(&mhi_pdev->recovery_work);
1096 
1097 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1098 			mhi_cntrl->ee != MHI_EE_AMSS)
1099 		goto pci_suspend; /* Nothing to do at MHI level */
1100 
1101 	/* Transition to M3 state */
1102 	err = mhi_pm_suspend(mhi_cntrl);
1103 	if (err) {
1104 		dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
1105 		clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
1106 		return -EBUSY;
1107 	}
1108 
1109 pci_suspend:
1110 	pci_disable_device(pdev);
1111 	pci_wake_from_d3(pdev, true);
1112 
1113 	return 0;
1114 }
1115 
1116 static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
1117 {
1118 	struct pci_dev *pdev = to_pci_dev(dev);
1119 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1120 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1121 	int err;
1122 
1123 	if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1124 		return 0;
1125 
1126 	err = pci_enable_device(pdev);
1127 	if (err)
1128 		goto err_recovery;
1129 
1130 	pci_set_master(pdev);
1131 	pci_wake_from_d3(pdev, false);
1132 
1133 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1134 			mhi_cntrl->ee != MHI_EE_AMSS)
1135 		return 0; /* Nothing to do at MHI level */
1136 
1137 	/* Exit M3, transition to M0 state */
1138 	err = mhi_pm_resume(mhi_cntrl);
1139 	if (err) {
1140 		dev_err(&pdev->dev, "failed to resume device: %d\n", err);
1141 		goto err_recovery;
1142 	}
1143 
1144 	/* Resume health check */
1145 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1146 
1147 	/* It can be a remote wakeup (no mhi runtime_get), update access time */
1148 	pm_runtime_mark_last_busy(dev);
1149 
1150 	return 0;
1151 
1152 err_recovery:
1153 	/* Do not fail to not mess up our PCI device state, the device likely
1154 	 * lost power (d3cold) and we simply need to reset it from the recovery
1155 	 * procedure, trigger the recovery asynchronously to prevent system
1156 	 * suspend exit delaying.
1157 	 */
1158 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1159 	pm_runtime_mark_last_busy(dev);
1160 
1161 	return 0;
1162 }
1163 
1164 static int  __maybe_unused mhi_pci_suspend(struct device *dev)
1165 {
1166 	pm_runtime_disable(dev);
1167 	return mhi_pci_runtime_suspend(dev);
1168 }
1169 
1170 static int __maybe_unused mhi_pci_resume(struct device *dev)
1171 {
1172 	int ret;
1173 
1174 	/* Depending the platform, device may have lost power (d3cold), we need
1175 	 * to resume it now to check its state and recover when necessary.
1176 	 */
1177 	ret = mhi_pci_runtime_resume(dev);
1178 	pm_runtime_enable(dev);
1179 
1180 	return ret;
1181 }
1182 
1183 static int __maybe_unused mhi_pci_freeze(struct device *dev)
1184 {
1185 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1186 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1187 
1188 	/* We want to stop all operations, hibernation does not guarantee that
1189 	 * device will be in the same state as before freezing, especially if
1190 	 * the intermediate restore kernel reinitializes MHI device with new
1191 	 * context.
1192 	 */
1193 	flush_work(&mhi_pdev->recovery_work);
1194 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1195 		mhi_power_down(mhi_cntrl, true);
1196 		mhi_unprepare_after_power_down(mhi_cntrl);
1197 	}
1198 
1199 	return 0;
1200 }
1201 
1202 static int __maybe_unused mhi_pci_restore(struct device *dev)
1203 {
1204 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1205 
1206 	/* Reinitialize the device */
1207 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1208 
1209 	return 0;
1210 }
1211 
1212 static const struct dev_pm_ops mhi_pci_pm_ops = {
1213 	SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
1214 #ifdef CONFIG_PM_SLEEP
1215 	.suspend = mhi_pci_suspend,
1216 	.resume = mhi_pci_resume,
1217 	.freeze = mhi_pci_freeze,
1218 	.thaw = mhi_pci_restore,
1219 	.poweroff = mhi_pci_freeze,
1220 	.restore = mhi_pci_restore,
1221 #endif
1222 };
1223 
1224 static struct pci_driver mhi_pci_driver = {
1225 	.name		= "mhi-pci-generic",
1226 	.id_table	= mhi_pci_id_table,
1227 	.probe		= mhi_pci_probe,
1228 	.remove		= mhi_pci_remove,
1229 	.shutdown	= mhi_pci_shutdown,
1230 	.err_handler	= &mhi_pci_err_handler,
1231 	.driver.pm	= &mhi_pci_pm_ops
1232 };
1233 module_pci_driver(mhi_pci_driver);
1234 
1235 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
1236 MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
1237 MODULE_LICENSE("GPL");
1238