xref: /openbmc/linux/drivers/misc/mei/hw-me.c (revision 293d5b43)
1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2012, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16 
17 #include <linux/pci.h>
18 
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 
22 #include "mei_dev.h"
23 #include "hbm.h"
24 
25 #include "hw-me.h"
26 #include "hw-me-regs.h"
27 
28 #include "mei-trace.h"
29 
30 /**
31  * mei_me_reg_read - Reads 32bit data from the mei device
32  *
33  * @hw: the me hardware structure
34  * @offset: offset from which to read the data
35  *
36  * Return: register value (u32)
37  */
38 static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
39 			       unsigned long offset)
40 {
41 	return ioread32(hw->mem_addr + offset);
42 }
43 
44 
45 /**
46  * mei_me_reg_write - Writes 32bit data to the mei device
47  *
48  * @hw: the me hardware structure
49  * @offset: offset from which to write the data
50  * @value: register value to write (u32)
51  */
52 static inline void mei_me_reg_write(const struct mei_me_hw *hw,
53 				 unsigned long offset, u32 value)
54 {
55 	iowrite32(value, hw->mem_addr + offset);
56 }
57 
58 /**
59  * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
60  *  read window register
61  *
62  * @dev: the device structure
63  *
64  * Return: ME_CB_RW register value (u32)
65  */
66 static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
67 {
68 	return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
69 }
70 
71 /**
72  * mei_me_hcbww_write - write 32bit data to the host circular buffer
73  *
74  * @dev: the device structure
75  * @data: 32bit data to be written to the host circular buffer
76  */
77 static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
78 {
79 	mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
80 }
81 
82 /**
83  * mei_me_mecsr_read - Reads 32bit data from the ME CSR
84  *
85  * @dev: the device structure
86  *
87  * Return: ME_CSR_HA register value (u32)
88  */
89 static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
90 {
91 	u32 reg;
92 
93 	reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
94 	trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
95 
96 	return reg;
97 }
98 
99 /**
100  * mei_hcsr_read - Reads 32bit data from the host CSR
101  *
102  * @dev: the device structure
103  *
104  * Return: H_CSR register value (u32)
105  */
106 static inline u32 mei_hcsr_read(const struct mei_device *dev)
107 {
108 	u32 reg;
109 
110 	reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
111 	trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
112 
113 	return reg;
114 }
115 
116 /**
117  * mei_hcsr_write - writes H_CSR register to the mei device
118  *
119  * @dev: the device structure
120  * @reg: new register value
121  */
122 static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
123 {
124 	trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
125 	mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
126 }
127 
128 /**
129  * mei_hcsr_set - writes H_CSR register to the mei device,
130  * and ignores the H_IS bit for it is write-one-to-zero.
131  *
132  * @dev: the device structure
133  * @reg: new register value
134  */
135 static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
136 {
137 	reg &= ~H_CSR_IS_MASK;
138 	mei_hcsr_write(dev, reg);
139 }
140 
141 /**
142  * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
143  *
144  * @dev: the device structure
145  *
146  * Return: H_D0I3C register value (u32)
147  */
148 static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
149 {
150 	u32 reg;
151 
152 	reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
153 	trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
154 
155 	return reg;
156 }
157 
158 /**
159  * mei_me_d0i3c_write - writes H_D0I3C register to device
160  *
161  * @dev: the device structure
162  * @reg: new register value
163  */
164 static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
165 {
166 	trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
167 	mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
168 }
169 
170 /**
171  * mei_me_fw_status - read fw status register from pci config space
172  *
173  * @dev: mei device
174  * @fw_status: fw status register values
175  *
176  * Return: 0 on success, error otherwise
177  */
178 static int mei_me_fw_status(struct mei_device *dev,
179 			    struct mei_fw_status *fw_status)
180 {
181 	struct pci_dev *pdev = to_pci_dev(dev->dev);
182 	struct mei_me_hw *hw = to_me_hw(dev);
183 	const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
184 	int ret;
185 	int i;
186 
187 	if (!fw_status)
188 		return -EINVAL;
189 
190 	fw_status->count = fw_src->count;
191 	for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
192 		ret = pci_read_config_dword(pdev, fw_src->status[i],
193 					    &fw_status->status[i]);
194 		trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
195 				       fw_src->status[i],
196 				       fw_status->status[i]);
197 		if (ret)
198 			return ret;
199 	}
200 
201 	return 0;
202 }
203 
204 /**
205  * mei_me_hw_config - configure hw dependent settings
206  *
207  * @dev: mei device
208  */
209 static void mei_me_hw_config(struct mei_device *dev)
210 {
211 	struct pci_dev *pdev = to_pci_dev(dev->dev);
212 	struct mei_me_hw *hw = to_me_hw(dev);
213 	u32 hcsr, reg;
214 
215 	/* Doesn't change in runtime */
216 	hcsr = mei_hcsr_read(dev);
217 	dev->hbuf_depth = (hcsr & H_CBD) >> 24;
218 
219 	reg = 0;
220 	pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
221 	trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
222 	hw->d0i3_supported =
223 		((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
224 
225 	hw->pg_state = MEI_PG_OFF;
226 	if (hw->d0i3_supported) {
227 		reg = mei_me_d0i3c_read(dev);
228 		if (reg & H_D0I3C_I3)
229 			hw->pg_state = MEI_PG_ON;
230 	}
231 }
232 
233 /**
234  * mei_me_pg_state  - translate internal pg state
235  *   to the mei power gating state
236  *
237  * @dev:  mei device
238  *
239  * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
240  */
241 static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
242 {
243 	struct mei_me_hw *hw = to_me_hw(dev);
244 
245 	return hw->pg_state;
246 }
247 
248 /**
249  * mei_me_intr_clear - clear and stop interrupts
250  *
251  * @dev: the device structure
252  */
253 static void mei_me_intr_clear(struct mei_device *dev)
254 {
255 	u32 hcsr = mei_hcsr_read(dev);
256 
257 	if (hcsr & H_CSR_IS_MASK)
258 		mei_hcsr_write(dev, hcsr);
259 }
260 /**
261  * mei_me_intr_enable - enables mei device interrupts
262  *
263  * @dev: the device structure
264  */
265 static void mei_me_intr_enable(struct mei_device *dev)
266 {
267 	u32 hcsr = mei_hcsr_read(dev);
268 
269 	hcsr |= H_CSR_IE_MASK;
270 	mei_hcsr_set(dev, hcsr);
271 }
272 
273 /**
274  * mei_me_intr_disable - disables mei device interrupts
275  *
276  * @dev: the device structure
277  */
278 static void mei_me_intr_disable(struct mei_device *dev)
279 {
280 	u32 hcsr = mei_hcsr_read(dev);
281 
282 	hcsr  &= ~H_CSR_IE_MASK;
283 	mei_hcsr_set(dev, hcsr);
284 }
285 
286 /**
287  * mei_me_hw_reset_release - release device from the reset
288  *
289  * @dev: the device structure
290  */
291 static void mei_me_hw_reset_release(struct mei_device *dev)
292 {
293 	u32 hcsr = mei_hcsr_read(dev);
294 
295 	hcsr |= H_IG;
296 	hcsr &= ~H_RST;
297 	mei_hcsr_set(dev, hcsr);
298 
299 	/* complete this write before we set host ready on another CPU */
300 	mmiowb();
301 }
302 
303 /**
304  * mei_me_host_set_ready - enable device
305  *
306  * @dev: mei device
307  */
308 static void mei_me_host_set_ready(struct mei_device *dev)
309 {
310 	u32 hcsr = mei_hcsr_read(dev);
311 
312 	hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
313 	mei_hcsr_set(dev, hcsr);
314 }
315 
316 /**
317  * mei_me_host_is_ready - check whether the host has turned ready
318  *
319  * @dev: mei device
320  * Return: bool
321  */
322 static bool mei_me_host_is_ready(struct mei_device *dev)
323 {
324 	u32 hcsr = mei_hcsr_read(dev);
325 
326 	return (hcsr & H_RDY) == H_RDY;
327 }
328 
329 /**
330  * mei_me_hw_is_ready - check whether the me(hw) has turned ready
331  *
332  * @dev: mei device
333  * Return: bool
334  */
335 static bool mei_me_hw_is_ready(struct mei_device *dev)
336 {
337 	u32 mecsr = mei_me_mecsr_read(dev);
338 
339 	return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
340 }
341 
342 /**
343  * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
344  *  or timeout is reached
345  *
346  * @dev: mei device
347  * Return: 0 on success, error otherwise
348  */
349 static int mei_me_hw_ready_wait(struct mei_device *dev)
350 {
351 	mutex_unlock(&dev->device_lock);
352 	wait_event_timeout(dev->wait_hw_ready,
353 			dev->recvd_hw_ready,
354 			mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
355 	mutex_lock(&dev->device_lock);
356 	if (!dev->recvd_hw_ready) {
357 		dev_err(dev->dev, "wait hw ready failed\n");
358 		return -ETIME;
359 	}
360 
361 	mei_me_hw_reset_release(dev);
362 	dev->recvd_hw_ready = false;
363 	return 0;
364 }
365 
366 /**
367  * mei_me_hw_start - hw start routine
368  *
369  * @dev: mei device
370  * Return: 0 on success, error otherwise
371  */
372 static int mei_me_hw_start(struct mei_device *dev)
373 {
374 	int ret = mei_me_hw_ready_wait(dev);
375 
376 	if (ret)
377 		return ret;
378 	dev_dbg(dev->dev, "hw is ready\n");
379 
380 	mei_me_host_set_ready(dev);
381 	return ret;
382 }
383 
384 
385 /**
386  * mei_hbuf_filled_slots - gets number of device filled buffer slots
387  *
388  * @dev: the device structure
389  *
390  * Return: number of filled slots
391  */
392 static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
393 {
394 	u32 hcsr;
395 	char read_ptr, write_ptr;
396 
397 	hcsr = mei_hcsr_read(dev);
398 
399 	read_ptr = (char) ((hcsr & H_CBRP) >> 8);
400 	write_ptr = (char) ((hcsr & H_CBWP) >> 16);
401 
402 	return (unsigned char) (write_ptr - read_ptr);
403 }
404 
405 /**
406  * mei_me_hbuf_is_empty - checks if host buffer is empty.
407  *
408  * @dev: the device structure
409  *
410  * Return: true if empty, false - otherwise.
411  */
412 static bool mei_me_hbuf_is_empty(struct mei_device *dev)
413 {
414 	return mei_hbuf_filled_slots(dev) == 0;
415 }
416 
417 /**
418  * mei_me_hbuf_empty_slots - counts write empty slots.
419  *
420  * @dev: the device structure
421  *
422  * Return: -EOVERFLOW if overflow, otherwise empty slots count
423  */
424 static int mei_me_hbuf_empty_slots(struct mei_device *dev)
425 {
426 	unsigned char filled_slots, empty_slots;
427 
428 	filled_slots = mei_hbuf_filled_slots(dev);
429 	empty_slots = dev->hbuf_depth - filled_slots;
430 
431 	/* check for overflow */
432 	if (filled_slots > dev->hbuf_depth)
433 		return -EOVERFLOW;
434 
435 	return empty_slots;
436 }
437 
438 /**
439  * mei_me_hbuf_max_len - returns size of hw buffer.
440  *
441  * @dev: the device structure
442  *
443  * Return: size of hw buffer in bytes
444  */
445 static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
446 {
447 	return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
448 }
449 
450 
451 /**
452  * mei_me_write_message - writes a message to mei device.
453  *
454  * @dev: the device structure
455  * @header: mei HECI header of message
456  * @buf: message payload will be written
457  *
458  * Return: -EIO if write has failed
459  */
460 static int mei_me_write_message(struct mei_device *dev,
461 			struct mei_msg_hdr *header,
462 			unsigned char *buf)
463 {
464 	unsigned long rem;
465 	unsigned long length = header->length;
466 	u32 *reg_buf = (u32 *)buf;
467 	u32 hcsr;
468 	u32 dw_cnt;
469 	int i;
470 	int empty_slots;
471 
472 	dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
473 
474 	empty_slots = mei_hbuf_empty_slots(dev);
475 	dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
476 
477 	dw_cnt = mei_data2slots(length);
478 	if (empty_slots < 0 || dw_cnt > empty_slots)
479 		return -EMSGSIZE;
480 
481 	mei_me_hcbww_write(dev, *((u32 *) header));
482 
483 	for (i = 0; i < length / 4; i++)
484 		mei_me_hcbww_write(dev, reg_buf[i]);
485 
486 	rem = length & 0x3;
487 	if (rem > 0) {
488 		u32 reg = 0;
489 
490 		memcpy(&reg, &buf[length - rem], rem);
491 		mei_me_hcbww_write(dev, reg);
492 	}
493 
494 	hcsr = mei_hcsr_read(dev) | H_IG;
495 	mei_hcsr_set(dev, hcsr);
496 	if (!mei_me_hw_is_ready(dev))
497 		return -EIO;
498 
499 	return 0;
500 }
501 
502 /**
503  * mei_me_count_full_read_slots - counts read full slots.
504  *
505  * @dev: the device structure
506  *
507  * Return: -EOVERFLOW if overflow, otherwise filled slots count
508  */
509 static int mei_me_count_full_read_slots(struct mei_device *dev)
510 {
511 	u32 me_csr;
512 	char read_ptr, write_ptr;
513 	unsigned char buffer_depth, filled_slots;
514 
515 	me_csr = mei_me_mecsr_read(dev);
516 	buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
517 	read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
518 	write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
519 	filled_slots = (unsigned char) (write_ptr - read_ptr);
520 
521 	/* check for overflow */
522 	if (filled_slots > buffer_depth)
523 		return -EOVERFLOW;
524 
525 	dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
526 	return (int)filled_slots;
527 }
528 
529 /**
530  * mei_me_read_slots - reads a message from mei device.
531  *
532  * @dev: the device structure
533  * @buffer: message buffer will be written
534  * @buffer_length: message size will be read
535  *
536  * Return: always 0
537  */
538 static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
539 		    unsigned long buffer_length)
540 {
541 	u32 *reg_buf = (u32 *)buffer;
542 	u32 hcsr;
543 
544 	for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
545 		*reg_buf++ = mei_me_mecbrw_read(dev);
546 
547 	if (buffer_length > 0) {
548 		u32 reg = mei_me_mecbrw_read(dev);
549 
550 		memcpy(reg_buf, &reg, buffer_length);
551 	}
552 
553 	hcsr = mei_hcsr_read(dev) | H_IG;
554 	mei_hcsr_set(dev, hcsr);
555 	return 0;
556 }
557 
558 /**
559  * mei_me_pg_set - write pg enter register
560  *
561  * @dev: the device structure
562  */
563 static void mei_me_pg_set(struct mei_device *dev)
564 {
565 	struct mei_me_hw *hw = to_me_hw(dev);
566 	u32 reg;
567 
568 	reg = mei_me_reg_read(hw, H_HPG_CSR);
569 	trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
570 
571 	reg |= H_HPG_CSR_PGI;
572 
573 	trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
574 	mei_me_reg_write(hw, H_HPG_CSR, reg);
575 }
576 
577 /**
578  * mei_me_pg_unset - write pg exit register
579  *
580  * @dev: the device structure
581  */
582 static void mei_me_pg_unset(struct mei_device *dev)
583 {
584 	struct mei_me_hw *hw = to_me_hw(dev);
585 	u32 reg;
586 
587 	reg = mei_me_reg_read(hw, H_HPG_CSR);
588 	trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
589 
590 	WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
591 
592 	reg |= H_HPG_CSR_PGIHEXR;
593 
594 	trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
595 	mei_me_reg_write(hw, H_HPG_CSR, reg);
596 }
597 
598 /**
599  * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
600  *
601  * @dev: the device structure
602  *
603  * Return: 0 on success an error code otherwise
604  */
605 static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
606 {
607 	struct mei_me_hw *hw = to_me_hw(dev);
608 	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
609 	int ret;
610 
611 	dev->pg_event = MEI_PG_EVENT_WAIT;
612 
613 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
614 	if (ret)
615 		return ret;
616 
617 	mutex_unlock(&dev->device_lock);
618 	wait_event_timeout(dev->wait_pg,
619 		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
620 	mutex_lock(&dev->device_lock);
621 
622 	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
623 		mei_me_pg_set(dev);
624 		ret = 0;
625 	} else {
626 		ret = -ETIME;
627 	}
628 
629 	dev->pg_event = MEI_PG_EVENT_IDLE;
630 	hw->pg_state = MEI_PG_ON;
631 
632 	return ret;
633 }
634 
635 /**
636  * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
637  *
638  * @dev: the device structure
639  *
640  * Return: 0 on success an error code otherwise
641  */
642 static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
643 {
644 	struct mei_me_hw *hw = to_me_hw(dev);
645 	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
646 	int ret;
647 
648 	if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
649 		goto reply;
650 
651 	dev->pg_event = MEI_PG_EVENT_WAIT;
652 
653 	mei_me_pg_unset(dev);
654 
655 	mutex_unlock(&dev->device_lock);
656 	wait_event_timeout(dev->wait_pg,
657 		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
658 	mutex_lock(&dev->device_lock);
659 
660 reply:
661 	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
662 		ret = -ETIME;
663 		goto out;
664 	}
665 
666 	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
667 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
668 	if (ret)
669 		return ret;
670 
671 	mutex_unlock(&dev->device_lock);
672 	wait_event_timeout(dev->wait_pg,
673 		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
674 	mutex_lock(&dev->device_lock);
675 
676 	if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
677 		ret = 0;
678 	else
679 		ret = -ETIME;
680 
681 out:
682 	dev->pg_event = MEI_PG_EVENT_IDLE;
683 	hw->pg_state = MEI_PG_OFF;
684 
685 	return ret;
686 }
687 
688 /**
689  * mei_me_pg_in_transition - is device now in pg transition
690  *
691  * @dev: the device structure
692  *
693  * Return: true if in pg transition, false otherwise
694  */
695 static bool mei_me_pg_in_transition(struct mei_device *dev)
696 {
697 	return dev->pg_event >= MEI_PG_EVENT_WAIT &&
698 	       dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
699 }
700 
701 /**
702  * mei_me_pg_is_enabled - detect if PG is supported by HW
703  *
704  * @dev: the device structure
705  *
706  * Return: true is pg supported, false otherwise
707  */
708 static bool mei_me_pg_is_enabled(struct mei_device *dev)
709 {
710 	struct mei_me_hw *hw = to_me_hw(dev);
711 	u32 reg = mei_me_mecsr_read(dev);
712 
713 	if (hw->d0i3_supported)
714 		return true;
715 
716 	if ((reg & ME_PGIC_HRA) == 0)
717 		goto notsupported;
718 
719 	if (!dev->hbm_f_pg_supported)
720 		goto notsupported;
721 
722 	return true;
723 
724 notsupported:
725 	dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
726 		hw->d0i3_supported,
727 		!!(reg & ME_PGIC_HRA),
728 		dev->version.major_version,
729 		dev->version.minor_version,
730 		HBM_MAJOR_VERSION_PGI,
731 		HBM_MINOR_VERSION_PGI);
732 
733 	return false;
734 }
735 
736 /**
737  * mei_me_d0i3_set - write d0i3 register bit on mei device.
738  *
739  * @dev: the device structure
740  * @intr: ask for interrupt
741  *
742  * Return: D0I3C register value
743  */
744 static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
745 {
746 	u32 reg = mei_me_d0i3c_read(dev);
747 
748 	reg |= H_D0I3C_I3;
749 	if (intr)
750 		reg |= H_D0I3C_IR;
751 	else
752 		reg &= ~H_D0I3C_IR;
753 	mei_me_d0i3c_write(dev, reg);
754 	/* read it to ensure HW consistency */
755 	reg = mei_me_d0i3c_read(dev);
756 	return reg;
757 }
758 
759 /**
760  * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
761  *
762  * @dev: the device structure
763  *
764  * Return: D0I3C register value
765  */
766 static u32 mei_me_d0i3_unset(struct mei_device *dev)
767 {
768 	u32 reg = mei_me_d0i3c_read(dev);
769 
770 	reg &= ~H_D0I3C_I3;
771 	reg |= H_D0I3C_IR;
772 	mei_me_d0i3c_write(dev, reg);
773 	/* read it to ensure HW consistency */
774 	reg = mei_me_d0i3c_read(dev);
775 	return reg;
776 }
777 
778 /**
779  * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
780  *
781  * @dev: the device structure
782  *
783  * Return: 0 on success an error code otherwise
784  */
785 static int mei_me_d0i3_enter_sync(struct mei_device *dev)
786 {
787 	struct mei_me_hw *hw = to_me_hw(dev);
788 	unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
789 	unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
790 	int ret;
791 	u32 reg;
792 
793 	reg = mei_me_d0i3c_read(dev);
794 	if (reg & H_D0I3C_I3) {
795 		/* we are in d0i3, nothing to do */
796 		dev_dbg(dev->dev, "d0i3 set not needed\n");
797 		ret = 0;
798 		goto on;
799 	}
800 
801 	/* PGI entry procedure */
802 	dev->pg_event = MEI_PG_EVENT_WAIT;
803 
804 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
805 	if (ret)
806 		/* FIXME: should we reset here? */
807 		goto out;
808 
809 	mutex_unlock(&dev->device_lock);
810 	wait_event_timeout(dev->wait_pg,
811 		dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
812 	mutex_lock(&dev->device_lock);
813 
814 	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
815 		ret = -ETIME;
816 		goto out;
817 	}
818 	/* end PGI entry procedure */
819 
820 	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
821 
822 	reg = mei_me_d0i3_set(dev, true);
823 	if (!(reg & H_D0I3C_CIP)) {
824 		dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
825 		ret = 0;
826 		goto on;
827 	}
828 
829 	mutex_unlock(&dev->device_lock);
830 	wait_event_timeout(dev->wait_pg,
831 		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
832 	mutex_lock(&dev->device_lock);
833 
834 	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
835 		reg = mei_me_d0i3c_read(dev);
836 		if (!(reg & H_D0I3C_I3)) {
837 			ret = -ETIME;
838 			goto out;
839 		}
840 	}
841 
842 	ret = 0;
843 on:
844 	hw->pg_state = MEI_PG_ON;
845 out:
846 	dev->pg_event = MEI_PG_EVENT_IDLE;
847 	dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
848 	return ret;
849 }
850 
851 /**
852  * mei_me_d0i3_enter - perform d0i3 entry procedure
853  *   no hbm PG handshake
854  *   no waiting for confirmation; runs with interrupts
855  *   disabled
856  *
857  * @dev: the device structure
858  *
859  * Return: 0 on success an error code otherwise
860  */
861 static int mei_me_d0i3_enter(struct mei_device *dev)
862 {
863 	struct mei_me_hw *hw = to_me_hw(dev);
864 	u32 reg;
865 
866 	reg = mei_me_d0i3c_read(dev);
867 	if (reg & H_D0I3C_I3) {
868 		/* we are in d0i3, nothing to do */
869 		dev_dbg(dev->dev, "already d0i3 : set not needed\n");
870 		goto on;
871 	}
872 
873 	mei_me_d0i3_set(dev, false);
874 on:
875 	hw->pg_state = MEI_PG_ON;
876 	dev->pg_event = MEI_PG_EVENT_IDLE;
877 	dev_dbg(dev->dev, "d0i3 enter\n");
878 	return 0;
879 }
880 
881 /**
882  * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
883  *
884  * @dev: the device structure
885  *
886  * Return: 0 on success an error code otherwise
887  */
888 static int mei_me_d0i3_exit_sync(struct mei_device *dev)
889 {
890 	struct mei_me_hw *hw = to_me_hw(dev);
891 	unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
892 	int ret;
893 	u32 reg;
894 
895 	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
896 
897 	reg = mei_me_d0i3c_read(dev);
898 	if (!(reg & H_D0I3C_I3)) {
899 		/* we are not in d0i3, nothing to do */
900 		dev_dbg(dev->dev, "d0i3 exit not needed\n");
901 		ret = 0;
902 		goto off;
903 	}
904 
905 	reg = mei_me_d0i3_unset(dev);
906 	if (!(reg & H_D0I3C_CIP)) {
907 		dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
908 		ret = 0;
909 		goto off;
910 	}
911 
912 	mutex_unlock(&dev->device_lock);
913 	wait_event_timeout(dev->wait_pg,
914 		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
915 	mutex_lock(&dev->device_lock);
916 
917 	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
918 		reg = mei_me_d0i3c_read(dev);
919 		if (reg & H_D0I3C_I3) {
920 			ret = -ETIME;
921 			goto out;
922 		}
923 	}
924 
925 	ret = 0;
926 off:
927 	hw->pg_state = MEI_PG_OFF;
928 out:
929 	dev->pg_event = MEI_PG_EVENT_IDLE;
930 
931 	dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
932 	return ret;
933 }
934 
935 /**
936  * mei_me_pg_legacy_intr - perform legacy pg processing
937  *			   in interrupt thread handler
938  *
939  * @dev: the device structure
940  */
941 static void mei_me_pg_legacy_intr(struct mei_device *dev)
942 {
943 	struct mei_me_hw *hw = to_me_hw(dev);
944 
945 	if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
946 		return;
947 
948 	dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
949 	hw->pg_state = MEI_PG_OFF;
950 	if (waitqueue_active(&dev->wait_pg))
951 		wake_up(&dev->wait_pg);
952 }
953 
954 /**
955  * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
956  *
957  * @dev: the device structure
958  */
959 static void mei_me_d0i3_intr(struct mei_device *dev)
960 {
961 	struct mei_me_hw *hw = to_me_hw(dev);
962 
963 	if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
964 	    (hw->intr_source & H_D0I3C_IS)) {
965 		dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
966 		if (hw->pg_state == MEI_PG_ON) {
967 			hw->pg_state = MEI_PG_OFF;
968 			if (dev->hbm_state != MEI_HBM_IDLE) {
969 				/*
970 				 * force H_RDY because it could be
971 				 * wiped off during PG
972 				 */
973 				dev_dbg(dev->dev, "d0i3 set host ready\n");
974 				mei_me_host_set_ready(dev);
975 			}
976 		} else {
977 			hw->pg_state = MEI_PG_ON;
978 		}
979 
980 		wake_up(&dev->wait_pg);
981 	}
982 
983 	if (hw->pg_state == MEI_PG_ON && (hw->intr_source & H_IS)) {
984 		/*
985 		 * HW sent some data and we are in D0i3, so
986 		 * we got here because of HW initiated exit from D0i3.
987 		 * Start runtime pm resume sequence to exit low power state.
988 		 */
989 		dev_dbg(dev->dev, "d0i3 want resume\n");
990 		mei_hbm_pg_resume(dev);
991 	}
992 }
993 
994 /**
995  * mei_me_pg_intr - perform pg processing in interrupt thread handler
996  *
997  * @dev: the device structure
998  */
999 static void mei_me_pg_intr(struct mei_device *dev)
1000 {
1001 	struct mei_me_hw *hw = to_me_hw(dev);
1002 
1003 	if (hw->d0i3_supported)
1004 		mei_me_d0i3_intr(dev);
1005 	else
1006 		mei_me_pg_legacy_intr(dev);
1007 }
1008 
1009 /**
1010  * mei_me_pg_enter_sync - perform runtime pm entry procedure
1011  *
1012  * @dev: the device structure
1013  *
1014  * Return: 0 on success an error code otherwise
1015  */
1016 int mei_me_pg_enter_sync(struct mei_device *dev)
1017 {
1018 	struct mei_me_hw *hw = to_me_hw(dev);
1019 
1020 	if (hw->d0i3_supported)
1021 		return mei_me_d0i3_enter_sync(dev);
1022 	else
1023 		return mei_me_pg_legacy_enter_sync(dev);
1024 }
1025 
1026 /**
1027  * mei_me_pg_exit_sync - perform runtime pm exit procedure
1028  *
1029  * @dev: the device structure
1030  *
1031  * Return: 0 on success an error code otherwise
1032  */
1033 int mei_me_pg_exit_sync(struct mei_device *dev)
1034 {
1035 	struct mei_me_hw *hw = to_me_hw(dev);
1036 
1037 	if (hw->d0i3_supported)
1038 		return mei_me_d0i3_exit_sync(dev);
1039 	else
1040 		return mei_me_pg_legacy_exit_sync(dev);
1041 }
1042 
1043 /**
1044  * mei_me_hw_reset - resets fw via mei csr register.
1045  *
1046  * @dev: the device structure
1047  * @intr_enable: if interrupt should be enabled after reset.
1048  *
1049  * Return: 0 on success an error code otherwise
1050  */
1051 static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
1052 {
1053 	struct mei_me_hw *hw = to_me_hw(dev);
1054 	int ret;
1055 	u32 hcsr;
1056 
1057 	if (intr_enable) {
1058 		mei_me_intr_enable(dev);
1059 		if (hw->d0i3_supported) {
1060 			ret = mei_me_d0i3_exit_sync(dev);
1061 			if (ret)
1062 				return ret;
1063 		}
1064 	}
1065 
1066 	hcsr = mei_hcsr_read(dev);
1067 	/* H_RST may be found lit before reset is started,
1068 	 * for example if preceding reset flow hasn't completed.
1069 	 * In that case asserting H_RST will be ignored, therefore
1070 	 * we need to clean H_RST bit to start a successful reset sequence.
1071 	 */
1072 	if ((hcsr & H_RST) == H_RST) {
1073 		dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
1074 		hcsr &= ~H_RST;
1075 		mei_hcsr_set(dev, hcsr);
1076 		hcsr = mei_hcsr_read(dev);
1077 	}
1078 
1079 	hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
1080 
1081 	if (!intr_enable)
1082 		hcsr &= ~H_CSR_IE_MASK;
1083 
1084 	dev->recvd_hw_ready = false;
1085 	mei_hcsr_write(dev, hcsr);
1086 
1087 	/*
1088 	 * Host reads the H_CSR once to ensure that the
1089 	 * posted write to H_CSR completes.
1090 	 */
1091 	hcsr = mei_hcsr_read(dev);
1092 
1093 	if ((hcsr & H_RST) == 0)
1094 		dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
1095 
1096 	if ((hcsr & H_RDY) == H_RDY)
1097 		dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
1098 
1099 	if (!intr_enable) {
1100 		mei_me_hw_reset_release(dev);
1101 		if (hw->d0i3_supported) {
1102 			ret = mei_me_d0i3_enter(dev);
1103 			if (ret)
1104 				return ret;
1105 		}
1106 	}
1107 	return 0;
1108 }
1109 
1110 /**
1111  * mei_me_irq_quick_handler - The ISR of the MEI device
1112  *
1113  * @irq: The irq number
1114  * @dev_id: pointer to the device structure
1115  *
1116  * Return: irqreturn_t
1117  */
1118 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
1119 {
1120 	struct mei_device *dev = (struct mei_device *)dev_id;
1121 	struct mei_me_hw *hw = to_me_hw(dev);
1122 	u32 hcsr;
1123 
1124 	hcsr = mei_hcsr_read(dev);
1125 	if (!(hcsr & H_CSR_IS_MASK))
1126 		return IRQ_NONE;
1127 
1128 	hw->intr_source = hcsr & H_CSR_IS_MASK;
1129 	dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source);
1130 
1131 	/* clear H_IS and H_D0I3C_IS bits in H_CSR to clear the interrupts */
1132 	mei_hcsr_write(dev, hcsr);
1133 
1134 	return IRQ_WAKE_THREAD;
1135 }
1136 
1137 /**
1138  * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
1139  * processing.
1140  *
1141  * @irq: The irq number
1142  * @dev_id: pointer to the device structure
1143  *
1144  * Return: irqreturn_t
1145  *
1146  */
1147 irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1148 {
1149 	struct mei_device *dev = (struct mei_device *) dev_id;
1150 	struct mei_cl_cb complete_list;
1151 	s32 slots;
1152 	int rets = 0;
1153 
1154 	dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
1155 	/* initialize our complete list */
1156 	mutex_lock(&dev->device_lock);
1157 	mei_io_list_init(&complete_list);
1158 
1159 	/* check if ME wants a reset */
1160 	if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
1161 		dev_warn(dev->dev, "FW not ready: resetting.\n");
1162 		schedule_work(&dev->reset_work);
1163 		goto end;
1164 	}
1165 
1166 	mei_me_pg_intr(dev);
1167 
1168 	/*  check if we need to start the dev */
1169 	if (!mei_host_is_ready(dev)) {
1170 		if (mei_hw_is_ready(dev)) {
1171 			dev_dbg(dev->dev, "we need to start the dev.\n");
1172 			dev->recvd_hw_ready = true;
1173 			wake_up(&dev->wait_hw_ready);
1174 		} else {
1175 			dev_dbg(dev->dev, "Spurious Interrupt\n");
1176 		}
1177 		goto end;
1178 	}
1179 	/* check slots available for reading */
1180 	slots = mei_count_full_read_slots(dev);
1181 	while (slots > 0) {
1182 		dev_dbg(dev->dev, "slots to read = %08x\n", slots);
1183 		rets = mei_irq_read_handler(dev, &complete_list, &slots);
1184 		/* There is a race between ME write and interrupt delivery:
1185 		 * Not all data is always available immediately after the
1186 		 * interrupt, so try to read again on the next interrupt.
1187 		 */
1188 		if (rets == -ENODATA)
1189 			break;
1190 
1191 		if (rets && dev->dev_state != MEI_DEV_RESETTING) {
1192 			dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
1193 						rets);
1194 			schedule_work(&dev->reset_work);
1195 			goto end;
1196 		}
1197 	}
1198 
1199 	dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1200 
1201 	/*
1202 	 * During PG handshake only allowed write is the replay to the
1203 	 * PG exit message, so block calling write function
1204 	 * if the pg event is in PG handshake
1205 	 */
1206 	if (dev->pg_event != MEI_PG_EVENT_WAIT &&
1207 	    dev->pg_event != MEI_PG_EVENT_RECEIVED) {
1208 		rets = mei_irq_write_handler(dev, &complete_list);
1209 		dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1210 	}
1211 
1212 	mei_irq_compl_handler(dev, &complete_list);
1213 
1214 end:
1215 	dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
1216 	mutex_unlock(&dev->device_lock);
1217 	return IRQ_HANDLED;
1218 }
1219 
1220 static const struct mei_hw_ops mei_me_hw_ops = {
1221 
1222 	.fw_status = mei_me_fw_status,
1223 	.pg_state  = mei_me_pg_state,
1224 
1225 	.host_is_ready = mei_me_host_is_ready,
1226 
1227 	.hw_is_ready = mei_me_hw_is_ready,
1228 	.hw_reset = mei_me_hw_reset,
1229 	.hw_config = mei_me_hw_config,
1230 	.hw_start = mei_me_hw_start,
1231 
1232 	.pg_in_transition = mei_me_pg_in_transition,
1233 	.pg_is_enabled = mei_me_pg_is_enabled,
1234 
1235 	.intr_clear = mei_me_intr_clear,
1236 	.intr_enable = mei_me_intr_enable,
1237 	.intr_disable = mei_me_intr_disable,
1238 
1239 	.hbuf_free_slots = mei_me_hbuf_empty_slots,
1240 	.hbuf_is_ready = mei_me_hbuf_is_empty,
1241 	.hbuf_max_len = mei_me_hbuf_max_len,
1242 
1243 	.write = mei_me_write_message,
1244 
1245 	.rdbuf_full_slots = mei_me_count_full_read_slots,
1246 	.read_hdr = mei_me_mecbrw_read,
1247 	.read = mei_me_read_slots
1248 };
1249 
1250 static bool mei_me_fw_type_nm(struct pci_dev *pdev)
1251 {
1252 	u32 reg;
1253 
1254 	pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
1255 	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
1256 	/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
1257 	return (reg & 0x600) == 0x200;
1258 }
1259 
1260 #define MEI_CFG_FW_NM                           \
1261 	.quirk_probe = mei_me_fw_type_nm
1262 
1263 static bool mei_me_fw_type_sps(struct pci_dev *pdev)
1264 {
1265 	u32 reg;
1266 	/* Read ME FW Status check for SPS Firmware */
1267 	pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
1268 	trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
1269 	/* if bits [19:16] = 15, running SPS Firmware */
1270 	return (reg & 0xf0000) == 0xf0000;
1271 }
1272 
1273 #define MEI_CFG_FW_SPS                           \
1274 	.quirk_probe = mei_me_fw_type_sps
1275 
1276 
1277 #define MEI_CFG_LEGACY_HFS                      \
1278 	.fw_status.count = 0
1279 
1280 #define MEI_CFG_ICH_HFS                        \
1281 	.fw_status.count = 1,                   \
1282 	.fw_status.status[0] = PCI_CFG_HFS_1
1283 
1284 #define MEI_CFG_PCH_HFS                         \
1285 	.fw_status.count = 2,                   \
1286 	.fw_status.status[0] = PCI_CFG_HFS_1,   \
1287 	.fw_status.status[1] = PCI_CFG_HFS_2
1288 
1289 #define MEI_CFG_PCH8_HFS                        \
1290 	.fw_status.count = 6,                   \
1291 	.fw_status.status[0] = PCI_CFG_HFS_1,   \
1292 	.fw_status.status[1] = PCI_CFG_HFS_2,   \
1293 	.fw_status.status[2] = PCI_CFG_HFS_3,   \
1294 	.fw_status.status[3] = PCI_CFG_HFS_4,   \
1295 	.fw_status.status[4] = PCI_CFG_HFS_5,   \
1296 	.fw_status.status[5] = PCI_CFG_HFS_6
1297 
1298 /* ICH Legacy devices */
1299 const struct mei_cfg mei_me_legacy_cfg = {
1300 	MEI_CFG_LEGACY_HFS,
1301 };
1302 
1303 /* ICH devices */
1304 const struct mei_cfg mei_me_ich_cfg = {
1305 	MEI_CFG_ICH_HFS,
1306 };
1307 
1308 /* PCH devices */
1309 const struct mei_cfg mei_me_pch_cfg = {
1310 	MEI_CFG_PCH_HFS,
1311 };
1312 
1313 
1314 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
1315 const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
1316 	MEI_CFG_PCH_HFS,
1317 	MEI_CFG_FW_NM,
1318 };
1319 
1320 /* PCH8 Lynx Point and newer devices */
1321 const struct mei_cfg mei_me_pch8_cfg = {
1322 	MEI_CFG_PCH8_HFS,
1323 };
1324 
1325 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
1326 const struct mei_cfg mei_me_pch8_sps_cfg = {
1327 	MEI_CFG_PCH8_HFS,
1328 	MEI_CFG_FW_SPS,
1329 };
1330 
1331 /**
1332  * mei_me_dev_init - allocates and initializes the mei device structure
1333  *
1334  * @pdev: The pci device structure
1335  * @cfg: per device generation config
1336  *
1337  * Return: The mei_device_device pointer on success, NULL on failure.
1338  */
1339 struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
1340 				   const struct mei_cfg *cfg)
1341 {
1342 	struct mei_device *dev;
1343 	struct mei_me_hw *hw;
1344 
1345 	dev = kzalloc(sizeof(struct mei_device) +
1346 			 sizeof(struct mei_me_hw), GFP_KERNEL);
1347 	if (!dev)
1348 		return NULL;
1349 	hw = to_me_hw(dev);
1350 
1351 	mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
1352 	hw->cfg = cfg;
1353 	return dev;
1354 }
1355 
1356