xref: /openbmc/linux/drivers/misc/mei/hw-me.c (revision ae3473231e77a3f1909d48cd144cebe5e1d049b3)
1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2012, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16 
17 #include <linux/pci.h>
18 
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/pm_runtime.h>
22 
23 #include "mei_dev.h"
24 #include "hbm.h"
25 
26 #include "hw-me.h"
27 #include "hw-me-regs.h"
28 
29 #include "mei-trace.h"
30 
31 /**
32  * mei_me_reg_read - Reads 32bit data from the mei device
33  *
34  * @hw: the me hardware structure
35  * @offset: offset from which to read the data
36  *
37  * Return: register value (u32)
38  */
39 static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
40 			       unsigned long offset)
41 {
42 	return ioread32(hw->mem_addr + offset);
43 }
44 
45 
46 /**
47  * mei_me_reg_write - Writes 32bit data to the mei device
48  *
49  * @hw: the me hardware structure
50  * @offset: offset from which to write the data
51  * @value: register value to write (u32)
52  */
53 static inline void mei_me_reg_write(const struct mei_me_hw *hw,
54 				 unsigned long offset, u32 value)
55 {
56 	iowrite32(value, hw->mem_addr + offset);
57 }
58 
59 /**
60  * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
61  *  read window register
62  *
63  * @dev: the device structure
64  *
65  * Return: ME_CB_RW register value (u32)
66  */
67 static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
68 {
69 	return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
70 }
71 
72 /**
73  * mei_me_hcbww_write - write 32bit data to the host circular buffer
74  *
75  * @dev: the device structure
76  * @data: 32bit data to be written to the host circular buffer
77  */
78 static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
79 {
80 	mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
81 }
82 
83 /**
84  * mei_me_mecsr_read - Reads 32bit data from the ME CSR
85  *
86  * @dev: the device structure
87  *
88  * Return: ME_CSR_HA register value (u32)
89  */
90 static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
91 {
92 	u32 reg;
93 
94 	reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
95 	trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
96 
97 	return reg;
98 }
99 
100 /**
101  * mei_hcsr_read - Reads 32bit data from the host CSR
102  *
103  * @dev: the device structure
104  *
105  * Return: H_CSR register value (u32)
106  */
107 static inline u32 mei_hcsr_read(const struct mei_device *dev)
108 {
109 	u32 reg;
110 
111 	reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
112 	trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
113 
114 	return reg;
115 }
116 
117 /**
118  * mei_hcsr_write - writes H_CSR register to the mei device
119  *
120  * @dev: the device structure
121  * @reg: new register value
122  */
123 static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
124 {
125 	trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
126 	mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
127 }
128 
129 /**
130  * mei_hcsr_set - writes H_CSR register to the mei device,
131  * and ignores the H_IS bit for it is write-one-to-zero.
132  *
133  * @dev: the device structure
134  * @reg: new register value
135  */
136 static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
137 {
138 	reg &= ~H_CSR_IS_MASK;
139 	mei_hcsr_write(dev, reg);
140 }
141 
142 /**
143  * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
144  *
145  * @dev: the device structure
146  *
147  * Return: H_D0I3C register value (u32)
148  */
149 static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
150 {
151 	u32 reg;
152 
153 	reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
154 	trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
155 
156 	return reg;
157 }
158 
159 /**
160  * mei_me_d0i3c_write - writes H_D0I3C register to device
161  *
162  * @dev: the device structure
163  * @reg: new register value
164  */
165 static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
166 {
167 	trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
168 	mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
169 }
170 
171 /**
172  * mei_me_fw_status - read fw status register from pci config space
173  *
174  * @dev: mei device
175  * @fw_status: fw status register values
176  *
177  * Return: 0 on success, error otherwise
178  */
179 static int mei_me_fw_status(struct mei_device *dev,
180 			    struct mei_fw_status *fw_status)
181 {
182 	struct pci_dev *pdev = to_pci_dev(dev->dev);
183 	struct mei_me_hw *hw = to_me_hw(dev);
184 	const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
185 	int ret;
186 	int i;
187 
188 	if (!fw_status)
189 		return -EINVAL;
190 
191 	fw_status->count = fw_src->count;
192 	for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
193 		ret = pci_read_config_dword(pdev, fw_src->status[i],
194 					    &fw_status->status[i]);
195 		trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
196 				       fw_src->status[i],
197 				       fw_status->status[i]);
198 		if (ret)
199 			return ret;
200 	}
201 
202 	return 0;
203 }
204 
205 /**
206  * mei_me_hw_config - configure hw dependent settings
207  *
208  * @dev: mei device
209  */
210 static void mei_me_hw_config(struct mei_device *dev)
211 {
212 	struct pci_dev *pdev = to_pci_dev(dev->dev);
213 	struct mei_me_hw *hw = to_me_hw(dev);
214 	u32 hcsr, reg;
215 
216 	/* Doesn't change in runtime */
217 	hcsr = mei_hcsr_read(dev);
218 	dev->hbuf_depth = (hcsr & H_CBD) >> 24;
219 
220 	reg = 0;
221 	pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
222 	trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
223 	hw->d0i3_supported =
224 		((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
225 
226 	hw->pg_state = MEI_PG_OFF;
227 	if (hw->d0i3_supported) {
228 		reg = mei_me_d0i3c_read(dev);
229 		if (reg & H_D0I3C_I3)
230 			hw->pg_state = MEI_PG_ON;
231 	}
232 }
233 
234 /**
235  * mei_me_pg_state  - translate internal pg state
236  *   to the mei power gating state
237  *
238  * @dev:  mei device
239  *
240  * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
241  */
242 static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
243 {
244 	struct mei_me_hw *hw = to_me_hw(dev);
245 
246 	return hw->pg_state;
247 }
248 
249 static inline u32 me_intr_src(u32 hcsr)
250 {
251 	return hcsr & H_CSR_IS_MASK;
252 }
253 
254 /**
255  * me_intr_disable - disables mei device interrupts
256  *      using supplied hcsr register value.
257  *
258  * @dev: the device structure
259  * @hcsr: supplied hcsr register value
260  */
261 static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
262 {
263 	hcsr &= ~H_CSR_IE_MASK;
264 	mei_hcsr_set(dev, hcsr);
265 }
266 
267 /**
268  * mei_me_intr_clear - clear and stop interrupts
269  *
270  * @dev: the device structure
271  * @hcsr: supplied hcsr register value
272  */
273 static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
274 {
275 	if (me_intr_src(hcsr))
276 		mei_hcsr_write(dev, hcsr);
277 }
278 
279 /**
280  * mei_me_intr_clear - clear and stop interrupts
281  *
282  * @dev: the device structure
283  */
284 static void mei_me_intr_clear(struct mei_device *dev)
285 {
286 	u32 hcsr = mei_hcsr_read(dev);
287 
288 	me_intr_clear(dev, hcsr);
289 }
290 /**
291  * mei_me_intr_enable - enables mei device interrupts
292  *
293  * @dev: the device structure
294  */
295 static void mei_me_intr_enable(struct mei_device *dev)
296 {
297 	u32 hcsr = mei_hcsr_read(dev);
298 
299 	hcsr |= H_CSR_IE_MASK;
300 	mei_hcsr_set(dev, hcsr);
301 }
302 
303 /**
304  * mei_me_intr_disable - disables mei device interrupts
305  *
306  * @dev: the device structure
307  */
308 static void mei_me_intr_disable(struct mei_device *dev)
309 {
310 	u32 hcsr = mei_hcsr_read(dev);
311 
312 	me_intr_disable(dev, hcsr);
313 }
314 
315 /**
316  * mei_me_synchronize_irq - wait for pending IRQ handlers
317  *
318  * @dev: the device structure
319  */
320 static void mei_me_synchronize_irq(struct mei_device *dev)
321 {
322 	struct pci_dev *pdev = to_pci_dev(dev->dev);
323 
324 	synchronize_irq(pdev->irq);
325 }
326 
327 /**
328  * mei_me_hw_reset_release - release device from the reset
329  *
330  * @dev: the device structure
331  */
332 static void mei_me_hw_reset_release(struct mei_device *dev)
333 {
334 	u32 hcsr = mei_hcsr_read(dev);
335 
336 	hcsr |= H_IG;
337 	hcsr &= ~H_RST;
338 	mei_hcsr_set(dev, hcsr);
339 
340 	/* complete this write before we set host ready on another CPU */
341 	mmiowb();
342 }
343 
344 /**
345  * mei_me_host_set_ready - enable device
346  *
347  * @dev: mei device
348  */
349 static void mei_me_host_set_ready(struct mei_device *dev)
350 {
351 	u32 hcsr = mei_hcsr_read(dev);
352 
353 	hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
354 	mei_hcsr_set(dev, hcsr);
355 }
356 
357 /**
358  * mei_me_host_is_ready - check whether the host has turned ready
359  *
360  * @dev: mei device
361  * Return: bool
362  */
363 static bool mei_me_host_is_ready(struct mei_device *dev)
364 {
365 	u32 hcsr = mei_hcsr_read(dev);
366 
367 	return (hcsr & H_RDY) == H_RDY;
368 }
369 
370 /**
371  * mei_me_hw_is_ready - check whether the me(hw) has turned ready
372  *
373  * @dev: mei device
374  * Return: bool
375  */
376 static bool mei_me_hw_is_ready(struct mei_device *dev)
377 {
378 	u32 mecsr = mei_me_mecsr_read(dev);
379 
380 	return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
381 }
382 
383 /**
384  * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
385  *  or timeout is reached
386  *
387  * @dev: mei device
388  * Return: 0 on success, error otherwise
389  */
390 static int mei_me_hw_ready_wait(struct mei_device *dev)
391 {
392 	mutex_unlock(&dev->device_lock);
393 	wait_event_timeout(dev->wait_hw_ready,
394 			dev->recvd_hw_ready,
395 			mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
396 	mutex_lock(&dev->device_lock);
397 	if (!dev->recvd_hw_ready) {
398 		dev_err(dev->dev, "wait hw ready failed\n");
399 		return -ETIME;
400 	}
401 
402 	mei_me_hw_reset_release(dev);
403 	dev->recvd_hw_ready = false;
404 	return 0;
405 }
406 
407 /**
408  * mei_me_hw_start - hw start routine
409  *
410  * @dev: mei device
411  * Return: 0 on success, error otherwise
412  */
413 static int mei_me_hw_start(struct mei_device *dev)
414 {
415 	int ret = mei_me_hw_ready_wait(dev);
416 
417 	if (ret)
418 		return ret;
419 	dev_dbg(dev->dev, "hw is ready\n");
420 
421 	mei_me_host_set_ready(dev);
422 	return ret;
423 }
424 
425 
426 /**
427  * mei_hbuf_filled_slots - gets number of device filled buffer slots
428  *
429  * @dev: the device structure
430  *
431  * Return: number of filled slots
432  */
433 static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
434 {
435 	u32 hcsr;
436 	char read_ptr, write_ptr;
437 
438 	hcsr = mei_hcsr_read(dev);
439 
440 	read_ptr = (char) ((hcsr & H_CBRP) >> 8);
441 	write_ptr = (char) ((hcsr & H_CBWP) >> 16);
442 
443 	return (unsigned char) (write_ptr - read_ptr);
444 }
445 
446 /**
447  * mei_me_hbuf_is_empty - checks if host buffer is empty.
448  *
449  * @dev: the device structure
450  *
451  * Return: true if empty, false - otherwise.
452  */
453 static bool mei_me_hbuf_is_empty(struct mei_device *dev)
454 {
455 	return mei_hbuf_filled_slots(dev) == 0;
456 }
457 
458 /**
459  * mei_me_hbuf_empty_slots - counts write empty slots.
460  *
461  * @dev: the device structure
462  *
463  * Return: -EOVERFLOW if overflow, otherwise empty slots count
464  */
465 static int mei_me_hbuf_empty_slots(struct mei_device *dev)
466 {
467 	unsigned char filled_slots, empty_slots;
468 
469 	filled_slots = mei_hbuf_filled_slots(dev);
470 	empty_slots = dev->hbuf_depth - filled_slots;
471 
472 	/* check for overflow */
473 	if (filled_slots > dev->hbuf_depth)
474 		return -EOVERFLOW;
475 
476 	return empty_slots;
477 }
478 
479 /**
480  * mei_me_hbuf_max_len - returns size of hw buffer.
481  *
482  * @dev: the device structure
483  *
484  * Return: size of hw buffer in bytes
485  */
486 static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
487 {
488 	return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
489 }
490 
491 
492 /**
493  * mei_me_hbuf_write - writes a message to host hw buffer.
494  *
495  * @dev: the device structure
496  * @header: mei HECI header of message
497  * @buf: message payload will be written
498  *
499  * Return: -EIO if write has failed
500  */
501 static int mei_me_hbuf_write(struct mei_device *dev,
502 			     struct mei_msg_hdr *header,
503 			     const unsigned char *buf)
504 {
505 	unsigned long rem;
506 	unsigned long length = header->length;
507 	u32 *reg_buf = (u32 *)buf;
508 	u32 hcsr;
509 	u32 dw_cnt;
510 	int i;
511 	int empty_slots;
512 
513 	dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
514 
515 	empty_slots = mei_hbuf_empty_slots(dev);
516 	dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
517 
518 	dw_cnt = mei_data2slots(length);
519 	if (empty_slots < 0 || dw_cnt > empty_slots)
520 		return -EMSGSIZE;
521 
522 	mei_me_hcbww_write(dev, *((u32 *) header));
523 
524 	for (i = 0; i < length / 4; i++)
525 		mei_me_hcbww_write(dev, reg_buf[i]);
526 
527 	rem = length & 0x3;
528 	if (rem > 0) {
529 		u32 reg = 0;
530 
531 		memcpy(&reg, &buf[length - rem], rem);
532 		mei_me_hcbww_write(dev, reg);
533 	}
534 
535 	hcsr = mei_hcsr_read(dev) | H_IG;
536 	mei_hcsr_set(dev, hcsr);
537 	if (!mei_me_hw_is_ready(dev))
538 		return -EIO;
539 
540 	return 0;
541 }
542 
543 /**
544  * mei_me_count_full_read_slots - counts read full slots.
545  *
546  * @dev: the device structure
547  *
548  * Return: -EOVERFLOW if overflow, otherwise filled slots count
549  */
550 static int mei_me_count_full_read_slots(struct mei_device *dev)
551 {
552 	u32 me_csr;
553 	char read_ptr, write_ptr;
554 	unsigned char buffer_depth, filled_slots;
555 
556 	me_csr = mei_me_mecsr_read(dev);
557 	buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
558 	read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
559 	write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
560 	filled_slots = (unsigned char) (write_ptr - read_ptr);
561 
562 	/* check for overflow */
563 	if (filled_slots > buffer_depth)
564 		return -EOVERFLOW;
565 
566 	dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
567 	return (int)filled_slots;
568 }
569 
570 /**
571  * mei_me_read_slots - reads a message from mei device.
572  *
573  * @dev: the device structure
574  * @buffer: message buffer will be written
575  * @buffer_length: message size will be read
576  *
577  * Return: always 0
578  */
579 static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
580 		    unsigned long buffer_length)
581 {
582 	u32 *reg_buf = (u32 *)buffer;
583 	u32 hcsr;
584 
585 	for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
586 		*reg_buf++ = mei_me_mecbrw_read(dev);
587 
588 	if (buffer_length > 0) {
589 		u32 reg = mei_me_mecbrw_read(dev);
590 
591 		memcpy(reg_buf, &reg, buffer_length);
592 	}
593 
594 	hcsr = mei_hcsr_read(dev) | H_IG;
595 	mei_hcsr_set(dev, hcsr);
596 	return 0;
597 }
598 
599 /**
600  * mei_me_pg_set - write pg enter register
601  *
602  * @dev: the device structure
603  */
604 static void mei_me_pg_set(struct mei_device *dev)
605 {
606 	struct mei_me_hw *hw = to_me_hw(dev);
607 	u32 reg;
608 
609 	reg = mei_me_reg_read(hw, H_HPG_CSR);
610 	trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
611 
612 	reg |= H_HPG_CSR_PGI;
613 
614 	trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
615 	mei_me_reg_write(hw, H_HPG_CSR, reg);
616 }
617 
618 /**
619  * mei_me_pg_unset - write pg exit register
620  *
621  * @dev: the device structure
622  */
623 static void mei_me_pg_unset(struct mei_device *dev)
624 {
625 	struct mei_me_hw *hw = to_me_hw(dev);
626 	u32 reg;
627 
628 	reg = mei_me_reg_read(hw, H_HPG_CSR);
629 	trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
630 
631 	WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
632 
633 	reg |= H_HPG_CSR_PGIHEXR;
634 
635 	trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
636 	mei_me_reg_write(hw, H_HPG_CSR, reg);
637 }
638 
639 /**
640  * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
641  *
642  * @dev: the device structure
643  *
644  * Return: 0 on success an error code otherwise
645  */
646 static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
647 {
648 	struct mei_me_hw *hw = to_me_hw(dev);
649 	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
650 	int ret;
651 
652 	dev->pg_event = MEI_PG_EVENT_WAIT;
653 
654 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
655 	if (ret)
656 		return ret;
657 
658 	mutex_unlock(&dev->device_lock);
659 	wait_event_timeout(dev->wait_pg,
660 		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
661 	mutex_lock(&dev->device_lock);
662 
663 	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
664 		mei_me_pg_set(dev);
665 		ret = 0;
666 	} else {
667 		ret = -ETIME;
668 	}
669 
670 	dev->pg_event = MEI_PG_EVENT_IDLE;
671 	hw->pg_state = MEI_PG_ON;
672 
673 	return ret;
674 }
675 
676 /**
677  * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
678  *
679  * @dev: the device structure
680  *
681  * Return: 0 on success an error code otherwise
682  */
683 static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
684 {
685 	struct mei_me_hw *hw = to_me_hw(dev);
686 	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
687 	int ret;
688 
689 	if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
690 		goto reply;
691 
692 	dev->pg_event = MEI_PG_EVENT_WAIT;
693 
694 	mei_me_pg_unset(dev);
695 
696 	mutex_unlock(&dev->device_lock);
697 	wait_event_timeout(dev->wait_pg,
698 		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
699 	mutex_lock(&dev->device_lock);
700 
701 reply:
702 	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
703 		ret = -ETIME;
704 		goto out;
705 	}
706 
707 	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
708 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
709 	if (ret)
710 		return ret;
711 
712 	mutex_unlock(&dev->device_lock);
713 	wait_event_timeout(dev->wait_pg,
714 		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
715 	mutex_lock(&dev->device_lock);
716 
717 	if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
718 		ret = 0;
719 	else
720 		ret = -ETIME;
721 
722 out:
723 	dev->pg_event = MEI_PG_EVENT_IDLE;
724 	hw->pg_state = MEI_PG_OFF;
725 
726 	return ret;
727 }
728 
729 /**
730  * mei_me_pg_in_transition - is device now in pg transition
731  *
732  * @dev: the device structure
733  *
734  * Return: true if in pg transition, false otherwise
735  */
736 static bool mei_me_pg_in_transition(struct mei_device *dev)
737 {
738 	return dev->pg_event >= MEI_PG_EVENT_WAIT &&
739 	       dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
740 }
741 
742 /**
743  * mei_me_pg_is_enabled - detect if PG is supported by HW
744  *
745  * @dev: the device structure
746  *
747  * Return: true is pg supported, false otherwise
748  */
749 static bool mei_me_pg_is_enabled(struct mei_device *dev)
750 {
751 	struct mei_me_hw *hw = to_me_hw(dev);
752 	u32 reg = mei_me_mecsr_read(dev);
753 
754 	if (hw->d0i3_supported)
755 		return true;
756 
757 	if ((reg & ME_PGIC_HRA) == 0)
758 		goto notsupported;
759 
760 	if (!dev->hbm_f_pg_supported)
761 		goto notsupported;
762 
763 	return true;
764 
765 notsupported:
766 	dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
767 		hw->d0i3_supported,
768 		!!(reg & ME_PGIC_HRA),
769 		dev->version.major_version,
770 		dev->version.minor_version,
771 		HBM_MAJOR_VERSION_PGI,
772 		HBM_MINOR_VERSION_PGI);
773 
774 	return false;
775 }
776 
777 /**
778  * mei_me_d0i3_set - write d0i3 register bit on mei device.
779  *
780  * @dev: the device structure
781  * @intr: ask for interrupt
782  *
783  * Return: D0I3C register value
784  */
785 static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
786 {
787 	u32 reg = mei_me_d0i3c_read(dev);
788 
789 	reg |= H_D0I3C_I3;
790 	if (intr)
791 		reg |= H_D0I3C_IR;
792 	else
793 		reg &= ~H_D0I3C_IR;
794 	mei_me_d0i3c_write(dev, reg);
795 	/* read it to ensure HW consistency */
796 	reg = mei_me_d0i3c_read(dev);
797 	return reg;
798 }
799 
800 /**
801  * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
802  *
803  * @dev: the device structure
804  *
805  * Return: D0I3C register value
806  */
807 static u32 mei_me_d0i3_unset(struct mei_device *dev)
808 {
809 	u32 reg = mei_me_d0i3c_read(dev);
810 
811 	reg &= ~H_D0I3C_I3;
812 	reg |= H_D0I3C_IR;
813 	mei_me_d0i3c_write(dev, reg);
814 	/* read it to ensure HW consistency */
815 	reg = mei_me_d0i3c_read(dev);
816 	return reg;
817 }
818 
819 /**
820  * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
821  *
822  * @dev: the device structure
823  *
824  * Return: 0 on success an error code otherwise
825  */
826 static int mei_me_d0i3_enter_sync(struct mei_device *dev)
827 {
828 	struct mei_me_hw *hw = to_me_hw(dev);
829 	unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
830 	unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
831 	int ret;
832 	u32 reg;
833 
834 	reg = mei_me_d0i3c_read(dev);
835 	if (reg & H_D0I3C_I3) {
836 		/* we are in d0i3, nothing to do */
837 		dev_dbg(dev->dev, "d0i3 set not needed\n");
838 		ret = 0;
839 		goto on;
840 	}
841 
842 	/* PGI entry procedure */
843 	dev->pg_event = MEI_PG_EVENT_WAIT;
844 
845 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
846 	if (ret)
847 		/* FIXME: should we reset here? */
848 		goto out;
849 
850 	mutex_unlock(&dev->device_lock);
851 	wait_event_timeout(dev->wait_pg,
852 		dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
853 	mutex_lock(&dev->device_lock);
854 
855 	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
856 		ret = -ETIME;
857 		goto out;
858 	}
859 	/* end PGI entry procedure */
860 
861 	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
862 
863 	reg = mei_me_d0i3_set(dev, true);
864 	if (!(reg & H_D0I3C_CIP)) {
865 		dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
866 		ret = 0;
867 		goto on;
868 	}
869 
870 	mutex_unlock(&dev->device_lock);
871 	wait_event_timeout(dev->wait_pg,
872 		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
873 	mutex_lock(&dev->device_lock);
874 
875 	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
876 		reg = mei_me_d0i3c_read(dev);
877 		if (!(reg & H_D0I3C_I3)) {
878 			ret = -ETIME;
879 			goto out;
880 		}
881 	}
882 
883 	ret = 0;
884 on:
885 	hw->pg_state = MEI_PG_ON;
886 out:
887 	dev->pg_event = MEI_PG_EVENT_IDLE;
888 	dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
889 	return ret;
890 }
891 
892 /**
893  * mei_me_d0i3_enter - perform d0i3 entry procedure
894  *   no hbm PG handshake
895  *   no waiting for confirmation; runs with interrupts
896  *   disabled
897  *
898  * @dev: the device structure
899  *
900  * Return: 0 on success an error code otherwise
901  */
902 static int mei_me_d0i3_enter(struct mei_device *dev)
903 {
904 	struct mei_me_hw *hw = to_me_hw(dev);
905 	u32 reg;
906 
907 	reg = mei_me_d0i3c_read(dev);
908 	if (reg & H_D0I3C_I3) {
909 		/* we are in d0i3, nothing to do */
910 		dev_dbg(dev->dev, "already d0i3 : set not needed\n");
911 		goto on;
912 	}
913 
914 	mei_me_d0i3_set(dev, false);
915 on:
916 	hw->pg_state = MEI_PG_ON;
917 	dev->pg_event = MEI_PG_EVENT_IDLE;
918 	dev_dbg(dev->dev, "d0i3 enter\n");
919 	return 0;
920 }
921 
922 /**
923  * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
924  *
925  * @dev: the device structure
926  *
927  * Return: 0 on success an error code otherwise
928  */
929 static int mei_me_d0i3_exit_sync(struct mei_device *dev)
930 {
931 	struct mei_me_hw *hw = to_me_hw(dev);
932 	unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
933 	int ret;
934 	u32 reg;
935 
936 	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
937 
938 	reg = mei_me_d0i3c_read(dev);
939 	if (!(reg & H_D0I3C_I3)) {
940 		/* we are not in d0i3, nothing to do */
941 		dev_dbg(dev->dev, "d0i3 exit not needed\n");
942 		ret = 0;
943 		goto off;
944 	}
945 
946 	reg = mei_me_d0i3_unset(dev);
947 	if (!(reg & H_D0I3C_CIP)) {
948 		dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
949 		ret = 0;
950 		goto off;
951 	}
952 
953 	mutex_unlock(&dev->device_lock);
954 	wait_event_timeout(dev->wait_pg,
955 		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
956 	mutex_lock(&dev->device_lock);
957 
958 	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
959 		reg = mei_me_d0i3c_read(dev);
960 		if (reg & H_D0I3C_I3) {
961 			ret = -ETIME;
962 			goto out;
963 		}
964 	}
965 
966 	ret = 0;
967 off:
968 	hw->pg_state = MEI_PG_OFF;
969 out:
970 	dev->pg_event = MEI_PG_EVENT_IDLE;
971 
972 	dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
973 	return ret;
974 }
975 
976 /**
977  * mei_me_pg_legacy_intr - perform legacy pg processing
978  *			   in interrupt thread handler
979  *
980  * @dev: the device structure
981  */
982 static void mei_me_pg_legacy_intr(struct mei_device *dev)
983 {
984 	struct mei_me_hw *hw = to_me_hw(dev);
985 
986 	if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
987 		return;
988 
989 	dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
990 	hw->pg_state = MEI_PG_OFF;
991 	if (waitqueue_active(&dev->wait_pg))
992 		wake_up(&dev->wait_pg);
993 }
994 
995 /**
996  * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
997  *
998  * @dev: the device structure
999  * @intr_source: interrupt source
1000  */
1001 static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
1002 {
1003 	struct mei_me_hw *hw = to_me_hw(dev);
1004 
1005 	if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
1006 	    (intr_source & H_D0I3C_IS)) {
1007 		dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1008 		if (hw->pg_state == MEI_PG_ON) {
1009 			hw->pg_state = MEI_PG_OFF;
1010 			if (dev->hbm_state != MEI_HBM_IDLE) {
1011 				/*
1012 				 * force H_RDY because it could be
1013 				 * wiped off during PG
1014 				 */
1015 				dev_dbg(dev->dev, "d0i3 set host ready\n");
1016 				mei_me_host_set_ready(dev);
1017 			}
1018 		} else {
1019 			hw->pg_state = MEI_PG_ON;
1020 		}
1021 
1022 		wake_up(&dev->wait_pg);
1023 	}
1024 
1025 	if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
1026 		/*
1027 		 * HW sent some data and we are in D0i3, so
1028 		 * we got here because of HW initiated exit from D0i3.
1029 		 * Start runtime pm resume sequence to exit low power state.
1030 		 */
1031 		dev_dbg(dev->dev, "d0i3 want resume\n");
1032 		mei_hbm_pg_resume(dev);
1033 	}
1034 }
1035 
1036 /**
1037  * mei_me_pg_intr - perform pg processing in interrupt thread handler
1038  *
1039  * @dev: the device structure
1040  * @intr_source: interrupt source
1041  */
1042 static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
1043 {
1044 	struct mei_me_hw *hw = to_me_hw(dev);
1045 
1046 	if (hw->d0i3_supported)
1047 		mei_me_d0i3_intr(dev, intr_source);
1048 	else
1049 		mei_me_pg_legacy_intr(dev);
1050 }
1051 
1052 /**
1053  * mei_me_pg_enter_sync - perform runtime pm entry procedure
1054  *
1055  * @dev: the device structure
1056  *
1057  * Return: 0 on success an error code otherwise
1058  */
1059 int mei_me_pg_enter_sync(struct mei_device *dev)
1060 {
1061 	struct mei_me_hw *hw = to_me_hw(dev);
1062 
1063 	if (hw->d0i3_supported)
1064 		return mei_me_d0i3_enter_sync(dev);
1065 	else
1066 		return mei_me_pg_legacy_enter_sync(dev);
1067 }
1068 
1069 /**
1070  * mei_me_pg_exit_sync - perform runtime pm exit procedure
1071  *
1072  * @dev: the device structure
1073  *
1074  * Return: 0 on success an error code otherwise
1075  */
1076 int mei_me_pg_exit_sync(struct mei_device *dev)
1077 {
1078 	struct mei_me_hw *hw = to_me_hw(dev);
1079 
1080 	if (hw->d0i3_supported)
1081 		return mei_me_d0i3_exit_sync(dev);
1082 	else
1083 		return mei_me_pg_legacy_exit_sync(dev);
1084 }
1085 
1086 /**
1087  * mei_me_hw_reset - resets fw via mei csr register.
1088  *
1089  * @dev: the device structure
1090  * @intr_enable: if interrupt should be enabled after reset.
1091  *
1092  * Return: 0 on success an error code otherwise
1093  */
1094 static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
1095 {
1096 	struct mei_me_hw *hw = to_me_hw(dev);
1097 	int ret;
1098 	u32 hcsr;
1099 
1100 	if (intr_enable) {
1101 		mei_me_intr_enable(dev);
1102 		if (hw->d0i3_supported) {
1103 			ret = mei_me_d0i3_exit_sync(dev);
1104 			if (ret)
1105 				return ret;
1106 		}
1107 	}
1108 
1109 	pm_runtime_set_active(dev->dev);
1110 
1111 	hcsr = mei_hcsr_read(dev);
1112 	/* H_RST may be found lit before reset is started,
1113 	 * for example if preceding reset flow hasn't completed.
1114 	 * In that case asserting H_RST will be ignored, therefore
1115 	 * we need to clean H_RST bit to start a successful reset sequence.
1116 	 */
1117 	if ((hcsr & H_RST) == H_RST) {
1118 		dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
1119 		hcsr &= ~H_RST;
1120 		mei_hcsr_set(dev, hcsr);
1121 		hcsr = mei_hcsr_read(dev);
1122 	}
1123 
1124 	hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
1125 
1126 	if (!intr_enable)
1127 		hcsr &= ~H_CSR_IE_MASK;
1128 
1129 	dev->recvd_hw_ready = false;
1130 	mei_hcsr_write(dev, hcsr);
1131 
1132 	/*
1133 	 * Host reads the H_CSR once to ensure that the
1134 	 * posted write to H_CSR completes.
1135 	 */
1136 	hcsr = mei_hcsr_read(dev);
1137 
1138 	if ((hcsr & H_RST) == 0)
1139 		dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
1140 
1141 	if ((hcsr & H_RDY) == H_RDY)
1142 		dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
1143 
1144 	if (!intr_enable) {
1145 		mei_me_hw_reset_release(dev);
1146 		if (hw->d0i3_supported) {
1147 			ret = mei_me_d0i3_enter(dev);
1148 			if (ret)
1149 				return ret;
1150 		}
1151 	}
1152 	return 0;
1153 }
1154 
1155 /**
1156  * mei_me_irq_quick_handler - The ISR of the MEI device
1157  *
1158  * @irq: The irq number
1159  * @dev_id: pointer to the device structure
1160  *
1161  * Return: irqreturn_t
1162  */
1163 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
1164 {
1165 	struct mei_device *dev = (struct mei_device *)dev_id;
1166 	u32 hcsr;
1167 
1168 	hcsr = mei_hcsr_read(dev);
1169 	if (!me_intr_src(hcsr))
1170 		return IRQ_NONE;
1171 
1172 	dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
1173 
1174 	/* disable interrupts on device */
1175 	me_intr_disable(dev, hcsr);
1176 	return IRQ_WAKE_THREAD;
1177 }
1178 
1179 /**
1180  * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
1181  * processing.
1182  *
1183  * @irq: The irq number
1184  * @dev_id: pointer to the device structure
1185  *
1186  * Return: irqreturn_t
1187  *
1188  */
1189 irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1190 {
1191 	struct mei_device *dev = (struct mei_device *) dev_id;
1192 	struct mei_cl_cb complete_list;
1193 	s32 slots;
1194 	u32 hcsr;
1195 	int rets = 0;
1196 
1197 	dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
1198 	/* initialize our complete list */
1199 	mutex_lock(&dev->device_lock);
1200 
1201 	hcsr = mei_hcsr_read(dev);
1202 	me_intr_clear(dev, hcsr);
1203 
1204 	mei_io_list_init(&complete_list);
1205 
1206 	/* check if ME wants a reset */
1207 	if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
1208 		dev_warn(dev->dev, "FW not ready: resetting.\n");
1209 		schedule_work(&dev->reset_work);
1210 		goto end;
1211 	}
1212 
1213 	mei_me_pg_intr(dev, me_intr_src(hcsr));
1214 
1215 	/*  check if we need to start the dev */
1216 	if (!mei_host_is_ready(dev)) {
1217 		if (mei_hw_is_ready(dev)) {
1218 			dev_dbg(dev->dev, "we need to start the dev.\n");
1219 			dev->recvd_hw_ready = true;
1220 			wake_up(&dev->wait_hw_ready);
1221 		} else {
1222 			dev_dbg(dev->dev, "Spurious Interrupt\n");
1223 		}
1224 		goto end;
1225 	}
1226 	/* check slots available for reading */
1227 	slots = mei_count_full_read_slots(dev);
1228 	while (slots > 0) {
1229 		dev_dbg(dev->dev, "slots to read = %08x\n", slots);
1230 		rets = mei_irq_read_handler(dev, &complete_list, &slots);
1231 		/* There is a race between ME write and interrupt delivery:
1232 		 * Not all data is always available immediately after the
1233 		 * interrupt, so try to read again on the next interrupt.
1234 		 */
1235 		if (rets == -ENODATA)
1236 			break;
1237 
1238 		if (rets && dev->dev_state != MEI_DEV_RESETTING) {
1239 			dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
1240 						rets);
1241 			schedule_work(&dev->reset_work);
1242 			goto end;
1243 		}
1244 	}
1245 
1246 	dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1247 
1248 	/*
1249 	 * During PG handshake only allowed write is the replay to the
1250 	 * PG exit message, so block calling write function
1251 	 * if the pg event is in PG handshake
1252 	 */
1253 	if (dev->pg_event != MEI_PG_EVENT_WAIT &&
1254 	    dev->pg_event != MEI_PG_EVENT_RECEIVED) {
1255 		rets = mei_irq_write_handler(dev, &complete_list);
1256 		dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1257 	}
1258 
1259 	mei_irq_compl_handler(dev, &complete_list);
1260 
1261 end:
1262 	dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
1263 	mei_me_intr_enable(dev);
1264 	mutex_unlock(&dev->device_lock);
1265 	return IRQ_HANDLED;
1266 }
1267 
1268 static const struct mei_hw_ops mei_me_hw_ops = {
1269 
1270 	.fw_status = mei_me_fw_status,
1271 	.pg_state  = mei_me_pg_state,
1272 
1273 	.host_is_ready = mei_me_host_is_ready,
1274 
1275 	.hw_is_ready = mei_me_hw_is_ready,
1276 	.hw_reset = mei_me_hw_reset,
1277 	.hw_config = mei_me_hw_config,
1278 	.hw_start = mei_me_hw_start,
1279 
1280 	.pg_in_transition = mei_me_pg_in_transition,
1281 	.pg_is_enabled = mei_me_pg_is_enabled,
1282 
1283 	.intr_clear = mei_me_intr_clear,
1284 	.intr_enable = mei_me_intr_enable,
1285 	.intr_disable = mei_me_intr_disable,
1286 	.synchronize_irq = mei_me_synchronize_irq,
1287 
1288 	.hbuf_free_slots = mei_me_hbuf_empty_slots,
1289 	.hbuf_is_ready = mei_me_hbuf_is_empty,
1290 	.hbuf_max_len = mei_me_hbuf_max_len,
1291 
1292 	.write = mei_me_hbuf_write,
1293 
1294 	.rdbuf_full_slots = mei_me_count_full_read_slots,
1295 	.read_hdr = mei_me_mecbrw_read,
1296 	.read = mei_me_read_slots
1297 };
1298 
1299 static bool mei_me_fw_type_nm(struct pci_dev *pdev)
1300 {
1301 	u32 reg;
1302 
1303 	pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
1304 	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
1305 	/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
1306 	return (reg & 0x600) == 0x200;
1307 }
1308 
1309 #define MEI_CFG_FW_NM                           \
1310 	.quirk_probe = mei_me_fw_type_nm
1311 
1312 static bool mei_me_fw_type_sps(struct pci_dev *pdev)
1313 {
1314 	u32 reg;
1315 	unsigned int devfn;
1316 
1317 	/*
1318 	 * Read ME FW Status register to check for SPS Firmware
1319 	 * The SPS FW is only signaled in pci function 0
1320 	 */
1321 	devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1322 	pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
1323 	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
1324 	/* if bits [19:16] = 15, running SPS Firmware */
1325 	return (reg & 0xf0000) == 0xf0000;
1326 }
1327 
1328 #define MEI_CFG_FW_SPS                           \
1329 	.quirk_probe = mei_me_fw_type_sps
1330 
1331 
1332 #define MEI_CFG_LEGACY_HFS                      \
1333 	.fw_status.count = 0
1334 
1335 #define MEI_CFG_ICH_HFS                        \
1336 	.fw_status.count = 1,                   \
1337 	.fw_status.status[0] = PCI_CFG_HFS_1
1338 
1339 #define MEI_CFG_PCH_HFS                         \
1340 	.fw_status.count = 2,                   \
1341 	.fw_status.status[0] = PCI_CFG_HFS_1,   \
1342 	.fw_status.status[1] = PCI_CFG_HFS_2
1343 
1344 #define MEI_CFG_PCH8_HFS                        \
1345 	.fw_status.count = 6,                   \
1346 	.fw_status.status[0] = PCI_CFG_HFS_1,   \
1347 	.fw_status.status[1] = PCI_CFG_HFS_2,   \
1348 	.fw_status.status[2] = PCI_CFG_HFS_3,   \
1349 	.fw_status.status[3] = PCI_CFG_HFS_4,   \
1350 	.fw_status.status[4] = PCI_CFG_HFS_5,   \
1351 	.fw_status.status[5] = PCI_CFG_HFS_6
1352 
1353 /* ICH Legacy devices */
1354 const struct mei_cfg mei_me_legacy_cfg = {
1355 	MEI_CFG_LEGACY_HFS,
1356 };
1357 
1358 /* ICH devices */
1359 const struct mei_cfg mei_me_ich_cfg = {
1360 	MEI_CFG_ICH_HFS,
1361 };
1362 
1363 /* PCH devices */
1364 const struct mei_cfg mei_me_pch_cfg = {
1365 	MEI_CFG_PCH_HFS,
1366 };
1367 
1368 
1369 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
1370 const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
1371 	MEI_CFG_PCH_HFS,
1372 	MEI_CFG_FW_NM,
1373 };
1374 
1375 /* PCH8 Lynx Point and newer devices */
1376 const struct mei_cfg mei_me_pch8_cfg = {
1377 	MEI_CFG_PCH8_HFS,
1378 };
1379 
1380 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
1381 const struct mei_cfg mei_me_pch8_sps_cfg = {
1382 	MEI_CFG_PCH8_HFS,
1383 	MEI_CFG_FW_SPS,
1384 };
1385 
1386 /**
1387  * mei_me_dev_init - allocates and initializes the mei device structure
1388  *
1389  * @pdev: The pci device structure
1390  * @cfg: per device generation config
1391  *
1392  * Return: The mei_device_device pointer on success, NULL on failure.
1393  */
1394 struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
1395 				   const struct mei_cfg *cfg)
1396 {
1397 	struct mei_device *dev;
1398 	struct mei_me_hw *hw;
1399 
1400 	dev = kzalloc(sizeof(struct mei_device) +
1401 			 sizeof(struct mei_me_hw), GFP_KERNEL);
1402 	if (!dev)
1403 		return NULL;
1404 	hw = to_me_hw(dev);
1405 
1406 	mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
1407 	hw->cfg = cfg;
1408 	return dev;
1409 }
1410 
1411