xref: /openbmc/linux/drivers/bus/mhi/host/pci_generic.c (revision 66c98360)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * MHI PCI driver - MHI over PCI controller driver
4  *
5  * This module is a generic driver for registering MHI-over-PCI devices,
6  * such as PCIe QCOM modems.
7  *
8  * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
9  */
10 
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/timer.h>
18 #include <linux/workqueue.h>
19 
20 #define MHI_PCI_DEFAULT_BAR_NUM 0
21 
22 #define MHI_POST_RESET_DELAY_MS 2000
23 
24 #define HEALTH_CHECK_PERIOD (HZ * 2)
25 
26 /* PCI VID definitions */
27 #define PCI_VENDOR_ID_THALES	0x1269
28 #define PCI_VENDOR_ID_QUECTEL	0x1eac
29 
30 /**
31  * struct mhi_pci_dev_info - MHI PCI device specific information
32  * @config: MHI controller configuration
33  * @name: name of the PCI module
34  * @fw: firmware path (if any)
35  * @edl: emergency download mode firmware path (if any)
36  * @bar_num: PCI base address register to use for MHI MMIO register space
37  * @dma_data_width: DMA transfer word size (32 or 64 bits)
38  * @mru_default: default MRU size for MBIM network packets
39  * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
40  *		   of inband wake support (such as sdx24)
41  */
42 struct mhi_pci_dev_info {
43 	const struct mhi_controller_config *config;
44 	const char *name;
45 	const char *fw;
46 	const char *edl;
47 	unsigned int bar_num;
48 	unsigned int dma_data_width;
49 	unsigned int mru_default;
50 	bool sideband_wake;
51 };
52 
53 #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
54 	{						\
55 		.num = ch_num,				\
56 		.name = ch_name,			\
57 		.num_elements = el_count,		\
58 		.event_ring = ev_ring,			\
59 		.dir = DMA_TO_DEVICE,			\
60 		.ee_mask = BIT(MHI_EE_AMSS),		\
61 		.pollcfg = 0,				\
62 		.doorbell = MHI_DB_BRST_DISABLE,	\
63 		.lpm_notify = false,			\
64 		.offload_channel = false,		\
65 		.doorbell_mode_switch = false,		\
66 	}						\
67 
68 #define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
69 	{						\
70 		.num = ch_num,				\
71 		.name = ch_name,			\
72 		.num_elements = el_count,		\
73 		.event_ring = ev_ring,			\
74 		.dir = DMA_FROM_DEVICE,			\
75 		.ee_mask = BIT(MHI_EE_AMSS),		\
76 		.pollcfg = 0,				\
77 		.doorbell = MHI_DB_BRST_DISABLE,	\
78 		.lpm_notify = false,			\
79 		.offload_channel = false,		\
80 		.doorbell_mode_switch = false,		\
81 	}
82 
83 #define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
84 	{						\
85 		.num = ch_num,				\
86 		.name = ch_name,			\
87 		.num_elements = el_count,		\
88 		.event_ring = ev_ring,			\
89 		.dir = DMA_FROM_DEVICE,			\
90 		.ee_mask = BIT(MHI_EE_AMSS),		\
91 		.pollcfg = 0,				\
92 		.doorbell = MHI_DB_BRST_DISABLE,	\
93 		.lpm_notify = false,			\
94 		.offload_channel = false,		\
95 		.doorbell_mode_switch = false,		\
96 		.auto_queue = true,			\
97 	}
98 
99 #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
100 	{					\
101 		.num_elements = el_count,	\
102 		.irq_moderation_ms = 0,		\
103 		.irq = (ev_ring) + 1,		\
104 		.priority = 1,			\
105 		.mode = MHI_DB_BRST_DISABLE,	\
106 		.data_type = MHI_ER_CTRL,	\
107 		.hardware_event = false,	\
108 		.client_managed = false,	\
109 		.offload_channel = false,	\
110 	}
111 
112 #define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
113 	{						\
114 		.num = ch_num,				\
115 		.name = ch_name,			\
116 		.num_elements = el_count,		\
117 		.event_ring = ev_ring,			\
118 		.dir = DMA_TO_DEVICE,			\
119 		.ee_mask = BIT(MHI_EE_AMSS),		\
120 		.pollcfg = 0,				\
121 		.doorbell = MHI_DB_BRST_ENABLE,	\
122 		.lpm_notify = false,			\
123 		.offload_channel = false,		\
124 		.doorbell_mode_switch = true,		\
125 	}						\
126 
127 #define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
128 	{						\
129 		.num = ch_num,				\
130 		.name = ch_name,			\
131 		.num_elements = el_count,		\
132 		.event_ring = ev_ring,			\
133 		.dir = DMA_FROM_DEVICE,			\
134 		.ee_mask = BIT(MHI_EE_AMSS),		\
135 		.pollcfg = 0,				\
136 		.doorbell = MHI_DB_BRST_ENABLE,	\
137 		.lpm_notify = false,			\
138 		.offload_channel = false,		\
139 		.doorbell_mode_switch = true,		\
140 	}
141 
142 #define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
143 	{						\
144 		.num = ch_num,				\
145 		.name = ch_name,			\
146 		.num_elements = el_count,		\
147 		.event_ring = ev_ring,			\
148 		.dir = DMA_TO_DEVICE,			\
149 		.ee_mask = BIT(MHI_EE_SBL),		\
150 		.pollcfg = 0,				\
151 		.doorbell = MHI_DB_BRST_DISABLE,	\
152 		.lpm_notify = false,			\
153 		.offload_channel = false,		\
154 		.doorbell_mode_switch = false,		\
155 	}						\
156 
157 #define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
158 	{						\
159 		.num = ch_num,				\
160 		.name = ch_name,			\
161 		.num_elements = el_count,		\
162 		.event_ring = ev_ring,			\
163 		.dir = DMA_FROM_DEVICE,			\
164 		.ee_mask = BIT(MHI_EE_SBL),		\
165 		.pollcfg = 0,				\
166 		.doorbell = MHI_DB_BRST_DISABLE,	\
167 		.lpm_notify = false,			\
168 		.offload_channel = false,		\
169 		.doorbell_mode_switch = false,		\
170 	}
171 
172 #define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
173 	{						\
174 		.num = ch_num,				\
175 		.name = ch_name,			\
176 		.num_elements = el_count,		\
177 		.event_ring = ev_ring,			\
178 		.dir = DMA_TO_DEVICE,			\
179 		.ee_mask = BIT(MHI_EE_FP),		\
180 		.pollcfg = 0,				\
181 		.doorbell = MHI_DB_BRST_DISABLE,	\
182 		.lpm_notify = false,			\
183 		.offload_channel = false,		\
184 		.doorbell_mode_switch = false,		\
185 	}						\
186 
187 #define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
188 	{						\
189 		.num = ch_num,				\
190 		.name = ch_name,			\
191 		.num_elements = el_count,		\
192 		.event_ring = ev_ring,			\
193 		.dir = DMA_FROM_DEVICE,			\
194 		.ee_mask = BIT(MHI_EE_FP),		\
195 		.pollcfg = 0,				\
196 		.doorbell = MHI_DB_BRST_DISABLE,	\
197 		.lpm_notify = false,			\
198 		.offload_channel = false,		\
199 		.doorbell_mode_switch = false,		\
200 	}
201 
202 #define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
203 	{					\
204 		.num_elements = el_count,	\
205 		.irq_moderation_ms = 5,		\
206 		.irq = (ev_ring) + 1,		\
207 		.priority = 1,			\
208 		.mode = MHI_DB_BRST_DISABLE,	\
209 		.data_type = MHI_ER_DATA,	\
210 		.hardware_event = false,	\
211 		.client_managed = false,	\
212 		.offload_channel = false,	\
213 	}
214 
215 #define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
216 	{					\
217 		.num_elements = el_count,	\
218 		.irq_moderation_ms = 1,		\
219 		.irq = (ev_ring) + 1,		\
220 		.priority = 1,			\
221 		.mode = MHI_DB_BRST_DISABLE,	\
222 		.data_type = MHI_ER_DATA,	\
223 		.hardware_event = true,		\
224 		.client_managed = false,	\
225 		.offload_channel = false,	\
226 		.channel = ch_num,		\
227 	}
228 
229 static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
230 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
231 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
232 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
233 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
234 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
235 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
236 	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
237 	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
238 	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
239 	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
240 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
241 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
242 };
243 
244 static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
245 	/* first ring is control+data ring */
246 	MHI_EVENT_CONFIG_CTRL(0, 64),
247 	/* DIAG dedicated event ring */
248 	MHI_EVENT_CONFIG_DATA(1, 128),
249 	/* Hardware channels request dedicated hardware event rings */
250 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
251 	MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
252 };
253 
254 static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
255 	.max_channels = 128,
256 	.timeout_ms = 8000,
257 	.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
258 	.ch_cfg = modem_qcom_v1_mhi_channels,
259 	.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
260 	.event_cfg = modem_qcom_v1_mhi_events,
261 };
262 
263 static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
264 	.name = "qcom-sdx65m",
265 	.fw = "qcom/sdx65m/xbl.elf",
266 	.edl = "qcom/sdx65m/edl.mbn",
267 	.config = &modem_qcom_v1_mhiv_config,
268 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
269 	.dma_data_width = 32,
270 	.sideband_wake = false,
271 };
272 
273 static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
274 	.name = "qcom-sdx55m",
275 	.fw = "qcom/sdx55m/sbl1.mbn",
276 	.edl = "qcom/sdx55m/edl.mbn",
277 	.config = &modem_qcom_v1_mhiv_config,
278 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
279 	.dma_data_width = 32,
280 	.mru_default = 32768,
281 	.sideband_wake = false,
282 };
283 
284 static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
285 	.name = "qcom-sdx24",
286 	.edl = "qcom/prog_firehose_sdx24.mbn",
287 	.config = &modem_qcom_v1_mhiv_config,
288 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
289 	.dma_data_width = 32,
290 	.sideband_wake = true,
291 };
292 
293 static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
294 	MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
295 	MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
296 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
297 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
298 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
299 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
300 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
301 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
302 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
303 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
304 	/* The EDL firmware is a flash-programmer exposing firehose protocol */
305 	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
306 	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
307 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
308 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
309 };
310 
311 static struct mhi_event_config mhi_quectel_em1xx_events[] = {
312 	MHI_EVENT_CONFIG_CTRL(0, 128),
313 	MHI_EVENT_CONFIG_DATA(1, 128),
314 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
315 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
316 };
317 
318 static const struct mhi_controller_config modem_quectel_em1xx_config = {
319 	.max_channels = 128,
320 	.timeout_ms = 20000,
321 	.num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
322 	.ch_cfg = mhi_quectel_em1xx_channels,
323 	.num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
324 	.event_cfg = mhi_quectel_em1xx_events,
325 };
326 
327 static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
328 	.name = "quectel-em1xx",
329 	.edl = "qcom/prog_firehose_sdx24.mbn",
330 	.config = &modem_quectel_em1xx_config,
331 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
332 	.dma_data_width = 32,
333 	.mru_default = 32768,
334 	.sideband_wake = true,
335 };
336 
337 static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
338 	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
339 	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
340 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
341 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
342 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
343 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
344 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
345 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
346 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
347 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
348 };
349 
350 static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
351 	MHI_EVENT_CONFIG_CTRL(0, 128),
352 	MHI_EVENT_CONFIG_DATA(1, 128),
353 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
354 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
355 };
356 
357 static const struct mhi_controller_config modem_foxconn_sdx55_config = {
358 	.max_channels = 128,
359 	.timeout_ms = 20000,
360 	.num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
361 	.ch_cfg = mhi_foxconn_sdx55_channels,
362 	.num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
363 	.event_cfg = mhi_foxconn_sdx55_events,
364 };
365 
366 static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
367 	.name = "foxconn-sdx24",
368 	.config = &modem_foxconn_sdx55_config,
369 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
370 	.dma_data_width = 32,
371 	.mru_default = 32768,
372 	.sideband_wake = false,
373 };
374 
375 static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
376 	.name = "foxconn-sdx55",
377 	.fw = "qcom/sdx55m/sbl1.mbn",
378 	.edl = "qcom/sdx55m/edl.mbn",
379 	.config = &modem_foxconn_sdx55_config,
380 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
381 	.dma_data_width = 32,
382 	.mru_default = 32768,
383 	.sideband_wake = false,
384 };
385 
386 static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
387 	.name = "foxconn-sdx65",
388 	.config = &modem_foxconn_sdx55_config,
389 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
390 	.dma_data_width = 32,
391 	.mru_default = 32768,
392 	.sideband_wake = false,
393 };
394 
395 static const struct mhi_channel_config mhi_mv3x_channels[] = {
396 	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
397 	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
398 	/* MBIM Control Channel */
399 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0),
400 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0),
401 	/* MBIM Data Channel */
402 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2),
403 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
404 };
405 
406 static struct mhi_event_config mhi_mv3x_events[] = {
407 	MHI_EVENT_CONFIG_CTRL(0, 256),
408 	MHI_EVENT_CONFIG_DATA(1, 256),
409 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
410 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
411 };
412 
413 static const struct mhi_controller_config modem_mv3x_config = {
414 	.max_channels = 128,
415 	.timeout_ms = 20000,
416 	.num_channels = ARRAY_SIZE(mhi_mv3x_channels),
417 	.ch_cfg = mhi_mv3x_channels,
418 	.num_events = ARRAY_SIZE(mhi_mv3x_events),
419 	.event_cfg = mhi_mv3x_events,
420 };
421 
422 static const struct mhi_pci_dev_info mhi_mv31_info = {
423 	.name = "cinterion-mv31",
424 	.config = &modem_mv3x_config,
425 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
426 	.dma_data_width = 32,
427 	.mru_default = 32768,
428 };
429 
430 static const struct mhi_pci_dev_info mhi_mv32_info = {
431 	.name = "cinterion-mv32",
432 	.config = &modem_mv3x_config,
433 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
434 	.dma_data_width = 32,
435 	.mru_default = 32768,
436 };
437 
438 static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
439 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
440 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
441 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
442 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
443 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
444 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
445 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
446 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
447 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
448 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
449 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
450 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
451 };
452 
453 static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
454 	/* first ring is control+data and DIAG ring */
455 	MHI_EVENT_CONFIG_CTRL(0, 2048),
456 	/* Hardware channels request dedicated hardware event rings */
457 	MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
458 	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
459 };
460 
461 static const struct mhi_controller_config modem_sierra_em919x_config = {
462 	.max_channels = 128,
463 	.timeout_ms = 24000,
464 	.num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
465 	.ch_cfg = mhi_sierra_em919x_channels,
466 	.num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
467 	.event_cfg = modem_sierra_em919x_mhi_events,
468 };
469 
470 static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
471 	.name = "sierra-em919x",
472 	.config = &modem_sierra_em919x_config,
473 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
474 	.dma_data_width = 32,
475 	.sideband_wake = false,
476 };
477 
478 static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
479 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
480 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
481 	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
482 	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
483 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
484 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
485 };
486 
487 static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
488 	MHI_EVENT_CONFIG_CTRL(0, 128),
489 	MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
490 	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
491 };
492 
493 static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
494 	.max_channels = 128,
495 	.timeout_ms = 20000,
496 	.num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
497 	.ch_cfg = mhi_telit_fn980_hw_v1_channels,
498 	.num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
499 	.event_cfg = mhi_telit_fn980_hw_v1_events,
500 };
501 
502 static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
503 	.name = "telit-fn980-hwv1",
504 	.fw = "qcom/sdx55m/sbl1.mbn",
505 	.edl = "qcom/sdx55m/edl.mbn",
506 	.config = &modem_telit_fn980_hw_v1_config,
507 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
508 	.dma_data_width = 32,
509 	.mru_default = 32768,
510 	.sideband_wake = false,
511 };
512 
513 static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
514 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
515 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
516 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
517 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
518 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
519 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
520 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
521 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
522 	MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
523 	MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
524 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
525 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
526 };
527 
528 static struct mhi_event_config mhi_telit_fn990_events[] = {
529 	MHI_EVENT_CONFIG_CTRL(0, 128),
530 	MHI_EVENT_CONFIG_DATA(1, 128),
531 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
532 	MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
533 };
534 
535 static const struct mhi_controller_config modem_telit_fn990_config = {
536 	.max_channels = 128,
537 	.timeout_ms = 20000,
538 	.num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
539 	.ch_cfg = mhi_telit_fn990_channels,
540 	.num_events = ARRAY_SIZE(mhi_telit_fn990_events),
541 	.event_cfg = mhi_telit_fn990_events,
542 };
543 
544 static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
545 	.name = "telit-fn990",
546 	.config = &modem_telit_fn990_config,
547 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
548 	.dma_data_width = 32,
549 	.sideband_wake = false,
550 	.mru_default = 32768,
551 };
552 
553 /* Keep the list sorted based on the PID. New VID should be added as the last entry */
554 static const struct pci_device_id mhi_pci_id_table[] = {
555 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
556 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
557 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
558 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
559 	/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
560 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
561 		.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
562 	/* Telit FN980 hardware revision v1 */
563 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
564 		.driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
565 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
566 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
567 	/* Telit FN990 */
568 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
569 		.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
570 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
571 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
572 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
573 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
574 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
575 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
576 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */
577 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
578 	/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
579 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
580 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
581 	/* DW5930e (sdx55), With eSIM, It's also T99W175 */
582 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
583 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
584 	/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
585 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
586 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
587 	/* T99W175 (sdx55), Based on Qualcomm new baseline */
588 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
589 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
590 	/* T99W175 (sdx55) */
591 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
592 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
593 	/* T99W368 (sdx65) */
594 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
595 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
596 	/* T99W373 (sdx62) */
597 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
598 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
599 	/* T99W510 (sdx24), variant 1 */
600 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0),
601 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
602 	/* T99W510 (sdx24), variant 2 */
603 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1),
604 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
605 	/* T99W510 (sdx24), variant 3 */
606 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2),
607 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
608 	/* MV31-W (Cinterion) */
609 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
610 		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
611 	/* MV31-W (Cinterion), based on new baseline */
612 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b4),
613 		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
614 	/* MV32-WA (Cinterion) */
615 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00ba),
616 		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
617 	/* MV32-WB (Cinterion) */
618 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00bb),
619 		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
620 	/* T99W175 (sdx55), HP variant */
621 	{ PCI_DEVICE(0x03f0, 0x0a6c),
622 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
623 	{  }
624 };
625 MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
626 
627 enum mhi_pci_device_status {
628 	MHI_PCI_DEV_STARTED,
629 	MHI_PCI_DEV_SUSPENDED,
630 };
631 
632 struct mhi_pci_device {
633 	struct mhi_controller mhi_cntrl;
634 	struct pci_saved_state *pci_state;
635 	struct work_struct recovery_work;
636 	struct timer_list health_check_timer;
637 	unsigned long status;
638 };
639 
640 static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
641 			    void __iomem *addr, u32 *out)
642 {
643 	*out = readl(addr);
644 	return 0;
645 }
646 
647 static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
648 			      void __iomem *addr, u32 val)
649 {
650 	writel(val, addr);
651 }
652 
653 static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
654 			      enum mhi_callback cb)
655 {
656 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
657 
658 	/* Nothing to do for now */
659 	switch (cb) {
660 	case MHI_CB_FATAL_ERROR:
661 	case MHI_CB_SYS_ERROR:
662 		dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
663 		pm_runtime_forbid(&pdev->dev);
664 		break;
665 	case MHI_CB_EE_MISSION_MODE:
666 		pm_runtime_allow(&pdev->dev);
667 		break;
668 	default:
669 		break;
670 	}
671 }
672 
673 static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
674 {
675 	/* no-op */
676 }
677 
678 static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
679 {
680 	/* no-op */
681 }
682 
683 static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
684 {
685 	/* no-op */
686 }
687 
688 static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
689 {
690 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
691 	u16 vendor = 0;
692 
693 	if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
694 		return false;
695 
696 	if (vendor == (u16) ~0 || vendor == 0)
697 		return false;
698 
699 	return true;
700 }
701 
702 static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
703 			 unsigned int bar_num, u64 dma_mask)
704 {
705 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
706 	int err;
707 
708 	err = pci_assign_resource(pdev, bar_num);
709 	if (err)
710 		return err;
711 
712 	err = pcim_enable_device(pdev);
713 	if (err) {
714 		dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
715 		return err;
716 	}
717 
718 	err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
719 	if (err) {
720 		dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
721 		return err;
722 	}
723 	mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
724 	mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
725 
726 	err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
727 	if (err) {
728 		dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
729 		return err;
730 	}
731 
732 	pci_set_master(pdev);
733 
734 	return 0;
735 }
736 
737 static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
738 			    const struct mhi_controller_config *mhi_cntrl_config)
739 {
740 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
741 	int nr_vectors, i;
742 	int *irq;
743 
744 	/*
745 	 * Alloc one MSI vector for BHI + one vector per event ring, ideally...
746 	 * No explicit pci_free_irq_vectors required, done by pcim_release.
747 	 */
748 	mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
749 
750 	nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
751 	if (nr_vectors < 0) {
752 		dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
753 			nr_vectors);
754 		return nr_vectors;
755 	}
756 
757 	if (nr_vectors < mhi_cntrl->nr_irqs) {
758 		dev_warn(&pdev->dev, "using shared MSI\n");
759 
760 		/* Patch msi vectors, use only one (shared) */
761 		for (i = 0; i < mhi_cntrl_config->num_events; i++)
762 			mhi_cntrl_config->event_cfg[i].irq = 0;
763 		mhi_cntrl->nr_irqs = 1;
764 	}
765 
766 	irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
767 	if (!irq)
768 		return -ENOMEM;
769 
770 	for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
771 		int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
772 
773 		irq[i] = pci_irq_vector(pdev, vector);
774 	}
775 
776 	mhi_cntrl->irq = irq;
777 
778 	return 0;
779 }
780 
781 static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
782 {
783 	/* The runtime_get() MHI callback means:
784 	 *    Do whatever is requested to leave M3.
785 	 */
786 	return pm_runtime_get(mhi_cntrl->cntrl_dev);
787 }
788 
789 static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
790 {
791 	/* The runtime_put() MHI callback means:
792 	 *    Device can be moved in M3 state.
793 	 */
794 	pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
795 	pm_runtime_put(mhi_cntrl->cntrl_dev);
796 }
797 
798 static void mhi_pci_recovery_work(struct work_struct *work)
799 {
800 	struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
801 						       recovery_work);
802 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
803 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
804 	int err;
805 
806 	dev_warn(&pdev->dev, "device recovery started\n");
807 
808 	del_timer(&mhi_pdev->health_check_timer);
809 	pm_runtime_forbid(&pdev->dev);
810 
811 	/* Clean up MHI state */
812 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
813 		mhi_power_down(mhi_cntrl, false);
814 		mhi_unprepare_after_power_down(mhi_cntrl);
815 	}
816 
817 	pci_set_power_state(pdev, PCI_D0);
818 	pci_load_saved_state(pdev, mhi_pdev->pci_state);
819 	pci_restore_state(pdev);
820 
821 	if (!mhi_pci_is_alive(mhi_cntrl))
822 		goto err_try_reset;
823 
824 	err = mhi_prepare_for_power_up(mhi_cntrl);
825 	if (err)
826 		goto err_try_reset;
827 
828 	err = mhi_sync_power_up(mhi_cntrl);
829 	if (err)
830 		goto err_unprepare;
831 
832 	dev_dbg(&pdev->dev, "Recovery completed\n");
833 
834 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
835 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
836 	return;
837 
838 err_unprepare:
839 	mhi_unprepare_after_power_down(mhi_cntrl);
840 err_try_reset:
841 	if (pci_reset_function(pdev))
842 		dev_err(&pdev->dev, "Recovery failed\n");
843 }
844 
845 static void health_check(struct timer_list *t)
846 {
847 	struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
848 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
849 
850 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
851 			test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
852 		return;
853 
854 	if (!mhi_pci_is_alive(mhi_cntrl)) {
855 		dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
856 		queue_work(system_long_wq, &mhi_pdev->recovery_work);
857 		return;
858 	}
859 
860 	/* reschedule in two seconds */
861 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
862 }
863 
864 static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
865 {
866 	const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
867 	const struct mhi_controller_config *mhi_cntrl_config;
868 	struct mhi_pci_device *mhi_pdev;
869 	struct mhi_controller *mhi_cntrl;
870 	int err;
871 
872 	dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
873 
874 	/* mhi_pdev.mhi_cntrl must be zero-initialized */
875 	mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
876 	if (!mhi_pdev)
877 		return -ENOMEM;
878 
879 	INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
880 	timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
881 
882 	mhi_cntrl_config = info->config;
883 	mhi_cntrl = &mhi_pdev->mhi_cntrl;
884 
885 	mhi_cntrl->cntrl_dev = &pdev->dev;
886 	mhi_cntrl->iova_start = 0;
887 	mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
888 	mhi_cntrl->fw_image = info->fw;
889 	mhi_cntrl->edl_image = info->edl;
890 
891 	mhi_cntrl->read_reg = mhi_pci_read_reg;
892 	mhi_cntrl->write_reg = mhi_pci_write_reg;
893 	mhi_cntrl->status_cb = mhi_pci_status_cb;
894 	mhi_cntrl->runtime_get = mhi_pci_runtime_get;
895 	mhi_cntrl->runtime_put = mhi_pci_runtime_put;
896 	mhi_cntrl->mru = info->mru_default;
897 
898 	if (info->sideband_wake) {
899 		mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
900 		mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
901 		mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
902 	}
903 
904 	err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
905 	if (err)
906 		return err;
907 
908 	err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
909 	if (err)
910 		return err;
911 
912 	pci_set_drvdata(pdev, mhi_pdev);
913 
914 	/* Have stored pci confspace at hand for restore in sudden PCI error.
915 	 * cache the state locally and discard the PCI core one.
916 	 */
917 	pci_save_state(pdev);
918 	mhi_pdev->pci_state = pci_store_saved_state(pdev);
919 	pci_load_saved_state(pdev, NULL);
920 
921 	err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
922 	if (err)
923 		return err;
924 
925 	/* MHI bus does not power up the controller by default */
926 	err = mhi_prepare_for_power_up(mhi_cntrl);
927 	if (err) {
928 		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
929 		goto err_unregister;
930 	}
931 
932 	err = mhi_sync_power_up(mhi_cntrl);
933 	if (err) {
934 		dev_err(&pdev->dev, "failed to power up MHI controller\n");
935 		goto err_unprepare;
936 	}
937 
938 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
939 
940 	/* start health check */
941 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
942 
943 	/* Only allow runtime-suspend if PME capable (for wakeup) */
944 	if (pci_pme_capable(pdev, PCI_D3hot)) {
945 		pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
946 		pm_runtime_use_autosuspend(&pdev->dev);
947 		pm_runtime_mark_last_busy(&pdev->dev);
948 		pm_runtime_put_noidle(&pdev->dev);
949 	}
950 
951 	return 0;
952 
953 err_unprepare:
954 	mhi_unprepare_after_power_down(mhi_cntrl);
955 err_unregister:
956 	mhi_unregister_controller(mhi_cntrl);
957 
958 	return err;
959 }
960 
961 static void mhi_pci_remove(struct pci_dev *pdev)
962 {
963 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
964 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
965 
966 	del_timer_sync(&mhi_pdev->health_check_timer);
967 	cancel_work_sync(&mhi_pdev->recovery_work);
968 
969 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
970 		mhi_power_down(mhi_cntrl, true);
971 		mhi_unprepare_after_power_down(mhi_cntrl);
972 	}
973 
974 	/* balancing probe put_noidle */
975 	if (pci_pme_capable(pdev, PCI_D3hot))
976 		pm_runtime_get_noresume(&pdev->dev);
977 
978 	mhi_unregister_controller(mhi_cntrl);
979 }
980 
981 static void mhi_pci_shutdown(struct pci_dev *pdev)
982 {
983 	mhi_pci_remove(pdev);
984 	pci_set_power_state(pdev, PCI_D3hot);
985 }
986 
987 static void mhi_pci_reset_prepare(struct pci_dev *pdev)
988 {
989 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
990 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
991 
992 	dev_info(&pdev->dev, "reset\n");
993 
994 	del_timer(&mhi_pdev->health_check_timer);
995 
996 	/* Clean up MHI state */
997 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
998 		mhi_power_down(mhi_cntrl, false);
999 		mhi_unprepare_after_power_down(mhi_cntrl);
1000 	}
1001 
1002 	/* cause internal device reset */
1003 	mhi_soc_reset(mhi_cntrl);
1004 
1005 	/* Be sure device reset has been executed */
1006 	msleep(MHI_POST_RESET_DELAY_MS);
1007 }
1008 
1009 static void mhi_pci_reset_done(struct pci_dev *pdev)
1010 {
1011 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1012 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1013 	int err;
1014 
1015 	/* Restore initial known working PCI state */
1016 	pci_load_saved_state(pdev, mhi_pdev->pci_state);
1017 	pci_restore_state(pdev);
1018 
1019 	/* Is device status available ? */
1020 	if (!mhi_pci_is_alive(mhi_cntrl)) {
1021 		dev_err(&pdev->dev, "reset failed\n");
1022 		return;
1023 	}
1024 
1025 	err = mhi_prepare_for_power_up(mhi_cntrl);
1026 	if (err) {
1027 		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
1028 		return;
1029 	}
1030 
1031 	err = mhi_sync_power_up(mhi_cntrl);
1032 	if (err) {
1033 		dev_err(&pdev->dev, "failed to power up MHI controller\n");
1034 		mhi_unprepare_after_power_down(mhi_cntrl);
1035 		return;
1036 	}
1037 
1038 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
1039 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1040 }
1041 
1042 static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
1043 					       pci_channel_state_t state)
1044 {
1045 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1046 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1047 
1048 	dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
1049 
1050 	if (state == pci_channel_io_perm_failure)
1051 		return PCI_ERS_RESULT_DISCONNECT;
1052 
1053 	/* Clean up MHI state */
1054 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1055 		mhi_power_down(mhi_cntrl, false);
1056 		mhi_unprepare_after_power_down(mhi_cntrl);
1057 	} else {
1058 		/* Nothing to do */
1059 		return PCI_ERS_RESULT_RECOVERED;
1060 	}
1061 
1062 	pci_disable_device(pdev);
1063 
1064 	return PCI_ERS_RESULT_NEED_RESET;
1065 }
1066 
1067 static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
1068 {
1069 	if (pci_enable_device(pdev)) {
1070 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
1071 		return PCI_ERS_RESULT_DISCONNECT;
1072 	}
1073 
1074 	return PCI_ERS_RESULT_RECOVERED;
1075 }
1076 
1077 static void mhi_pci_io_resume(struct pci_dev *pdev)
1078 {
1079 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1080 
1081 	dev_err(&pdev->dev, "PCI slot reset done\n");
1082 
1083 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1084 }
1085 
1086 static const struct pci_error_handlers mhi_pci_err_handler = {
1087 	.error_detected = mhi_pci_error_detected,
1088 	.slot_reset = mhi_pci_slot_reset,
1089 	.resume = mhi_pci_io_resume,
1090 	.reset_prepare = mhi_pci_reset_prepare,
1091 	.reset_done = mhi_pci_reset_done,
1092 };
1093 
1094 static int  __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
1095 {
1096 	struct pci_dev *pdev = to_pci_dev(dev);
1097 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1098 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1099 	int err;
1100 
1101 	if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1102 		return 0;
1103 
1104 	del_timer(&mhi_pdev->health_check_timer);
1105 	cancel_work_sync(&mhi_pdev->recovery_work);
1106 
1107 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1108 			mhi_cntrl->ee != MHI_EE_AMSS)
1109 		goto pci_suspend; /* Nothing to do at MHI level */
1110 
1111 	/* Transition to M3 state */
1112 	err = mhi_pm_suspend(mhi_cntrl);
1113 	if (err) {
1114 		dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
1115 		clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
1116 		return -EBUSY;
1117 	}
1118 
1119 pci_suspend:
1120 	pci_disable_device(pdev);
1121 	pci_wake_from_d3(pdev, true);
1122 
1123 	return 0;
1124 }
1125 
1126 static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
1127 {
1128 	struct pci_dev *pdev = to_pci_dev(dev);
1129 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1130 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1131 	int err;
1132 
1133 	if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1134 		return 0;
1135 
1136 	err = pci_enable_device(pdev);
1137 	if (err)
1138 		goto err_recovery;
1139 
1140 	pci_set_master(pdev);
1141 	pci_wake_from_d3(pdev, false);
1142 
1143 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1144 			mhi_cntrl->ee != MHI_EE_AMSS)
1145 		return 0; /* Nothing to do at MHI level */
1146 
1147 	/* Exit M3, transition to M0 state */
1148 	err = mhi_pm_resume(mhi_cntrl);
1149 	if (err) {
1150 		dev_err(&pdev->dev, "failed to resume device: %d\n", err);
1151 		goto err_recovery;
1152 	}
1153 
1154 	/* Resume health check */
1155 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1156 
1157 	/* It can be a remote wakeup (no mhi runtime_get), update access time */
1158 	pm_runtime_mark_last_busy(dev);
1159 
1160 	return 0;
1161 
1162 err_recovery:
1163 	/* Do not fail to not mess up our PCI device state, the device likely
1164 	 * lost power (d3cold) and we simply need to reset it from the recovery
1165 	 * procedure, trigger the recovery asynchronously to prevent system
1166 	 * suspend exit delaying.
1167 	 */
1168 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1169 	pm_runtime_mark_last_busy(dev);
1170 
1171 	return 0;
1172 }
1173 
1174 static int  __maybe_unused mhi_pci_suspend(struct device *dev)
1175 {
1176 	pm_runtime_disable(dev);
1177 	return mhi_pci_runtime_suspend(dev);
1178 }
1179 
1180 static int __maybe_unused mhi_pci_resume(struct device *dev)
1181 {
1182 	int ret;
1183 
1184 	/* Depending the platform, device may have lost power (d3cold), we need
1185 	 * to resume it now to check its state and recover when necessary.
1186 	 */
1187 	ret = mhi_pci_runtime_resume(dev);
1188 	pm_runtime_enable(dev);
1189 
1190 	return ret;
1191 }
1192 
1193 static int __maybe_unused mhi_pci_freeze(struct device *dev)
1194 {
1195 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1196 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1197 
1198 	/* We want to stop all operations, hibernation does not guarantee that
1199 	 * device will be in the same state as before freezing, especially if
1200 	 * the intermediate restore kernel reinitializes MHI device with new
1201 	 * context.
1202 	 */
1203 	flush_work(&mhi_pdev->recovery_work);
1204 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1205 		mhi_power_down(mhi_cntrl, true);
1206 		mhi_unprepare_after_power_down(mhi_cntrl);
1207 	}
1208 
1209 	return 0;
1210 }
1211 
1212 static int __maybe_unused mhi_pci_restore(struct device *dev)
1213 {
1214 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1215 
1216 	/* Reinitialize the device */
1217 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1218 
1219 	return 0;
1220 }
1221 
1222 static const struct dev_pm_ops mhi_pci_pm_ops = {
1223 	SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
1224 #ifdef CONFIG_PM_SLEEP
1225 	.suspend = mhi_pci_suspend,
1226 	.resume = mhi_pci_resume,
1227 	.freeze = mhi_pci_freeze,
1228 	.thaw = mhi_pci_restore,
1229 	.poweroff = mhi_pci_freeze,
1230 	.restore = mhi_pci_restore,
1231 #endif
1232 };
1233 
1234 static struct pci_driver mhi_pci_driver = {
1235 	.name		= "mhi-pci-generic",
1236 	.id_table	= mhi_pci_id_table,
1237 	.probe		= mhi_pci_probe,
1238 	.remove		= mhi_pci_remove,
1239 	.shutdown	= mhi_pci_shutdown,
1240 	.err_handler	= &mhi_pci_err_handler,
1241 	.driver.pm	= &mhi_pci_pm_ops
1242 };
1243 module_pci_driver(mhi_pci_driver);
1244 
1245 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
1246 MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
1247 MODULE_LICENSE("GPL");
1248