xref: /openbmc/linux/drivers/misc/mei/hw-me.c (revision af958a38)
1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2012, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16 
17 #include <linux/pci.h>
18 
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 
22 #include "mei_dev.h"
23 #include "hbm.h"
24 
25 #include "hw-me.h"
26 #include "hw-me-regs.h"
27 
28 /**
29  * mei_me_reg_read - Reads 32bit data from the mei device
30  *
31  * @dev: the device structure
32  * @offset: offset from which to read the data
33  *
34  * returns register value (u32)
35  */
36 static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
37 			       unsigned long offset)
38 {
39 	return ioread32(hw->mem_addr + offset);
40 }
41 
42 
43 /**
44  * mei_me_reg_write - Writes 32bit data to the mei device
45  *
46  * @dev: the device structure
47  * @offset: offset from which to write the data
48  * @value: register value to write (u32)
49  */
50 static inline void mei_me_reg_write(const struct mei_me_hw *hw,
51 				 unsigned long offset, u32 value)
52 {
53 	iowrite32(value, hw->mem_addr + offset);
54 }
55 
56 /**
57  * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
58  *  read window register
59  *
60  * @dev: the device structure
61  *
62  * returns ME_CB_RW register value (u32)
63  */
64 static u32 mei_me_mecbrw_read(const struct mei_device *dev)
65 {
66 	return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
67 }
68 /**
69  * mei_me_mecsr_read - Reads 32bit data from the ME CSR
70  *
71  * @dev: the device structure
72  *
73  * returns ME_CSR_HA register value (u32)
74  */
75 static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw)
76 {
77 	return mei_me_reg_read(hw, ME_CSR_HA);
78 }
79 
80 /**
81  * mei_hcsr_read - Reads 32bit data from the host CSR
82  *
83  * @dev: the device structure
84  *
85  * returns H_CSR register value (u32)
86  */
87 static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
88 {
89 	return mei_me_reg_read(hw, H_CSR);
90 }
91 
92 /**
93  * mei_hcsr_set - writes H_CSR register to the mei device,
94  * and ignores the H_IS bit for it is write-one-to-zero.
95  *
96  * @dev: the device structure
97  */
98 static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
99 {
100 	hcsr &= ~H_IS;
101 	mei_me_reg_write(hw, H_CSR, hcsr);
102 }
103 
104 
105 /**
106  * mei_me_hw_config - configure hw dependent settings
107  *
108  * @dev: mei device
109  */
110 static void mei_me_hw_config(struct mei_device *dev)
111 {
112 	struct mei_me_hw *hw = to_me_hw(dev);
113 	u32 hcsr = mei_hcsr_read(to_me_hw(dev));
114 	/* Doesn't change in runtime */
115 	dev->hbuf_depth = (hcsr & H_CBD) >> 24;
116 
117 	hw->pg_state = MEI_PG_OFF;
118 }
119 
120 /**
121  * mei_me_pg_state  - translate internal pg state
122  *   to the mei power gating state
123  *
124  * @hw -  me hardware
125  * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
126  */
127 static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
128 {
129 	struct mei_me_hw *hw = to_me_hw(dev);
130 	return hw->pg_state;
131 }
132 
133 /**
134  * mei_clear_interrupts - clear and stop interrupts
135  *
136  * @dev: the device structure
137  */
138 static void mei_me_intr_clear(struct mei_device *dev)
139 {
140 	struct mei_me_hw *hw = to_me_hw(dev);
141 	u32 hcsr = mei_hcsr_read(hw);
142 	if ((hcsr & H_IS) == H_IS)
143 		mei_me_reg_write(hw, H_CSR, hcsr);
144 }
145 /**
146  * mei_me_intr_enable - enables mei device interrupts
147  *
148  * @dev: the device structure
149  */
150 static void mei_me_intr_enable(struct mei_device *dev)
151 {
152 	struct mei_me_hw *hw = to_me_hw(dev);
153 	u32 hcsr = mei_hcsr_read(hw);
154 	hcsr |= H_IE;
155 	mei_hcsr_set(hw, hcsr);
156 }
157 
158 /**
159  * mei_disable_interrupts - disables mei device interrupts
160  *
161  * @dev: the device structure
162  */
163 static void mei_me_intr_disable(struct mei_device *dev)
164 {
165 	struct mei_me_hw *hw = to_me_hw(dev);
166 	u32 hcsr = mei_hcsr_read(hw);
167 	hcsr  &= ~H_IE;
168 	mei_hcsr_set(hw, hcsr);
169 }
170 
171 /**
172  * mei_me_hw_reset_release - release device from the reset
173  *
174  * @dev: the device structure
175  */
176 static void mei_me_hw_reset_release(struct mei_device *dev)
177 {
178 	struct mei_me_hw *hw = to_me_hw(dev);
179 	u32 hcsr = mei_hcsr_read(hw);
180 
181 	hcsr |= H_IG;
182 	hcsr &= ~H_RST;
183 	mei_hcsr_set(hw, hcsr);
184 
185 	/* complete this write before we set host ready on another CPU */
186 	mmiowb();
187 }
188 /**
189  * mei_me_hw_reset - resets fw via mei csr register.
190  *
191  * @dev: the device structure
192  * @intr_enable: if interrupt should be enabled after reset.
193  */
194 static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
195 {
196 	struct mei_me_hw *hw = to_me_hw(dev);
197 	u32 hcsr = mei_hcsr_read(hw);
198 
199 	hcsr |= H_RST | H_IG | H_IS;
200 
201 	if (intr_enable)
202 		hcsr |= H_IE;
203 	else
204 		hcsr &= ~H_IE;
205 
206 	dev->recvd_hw_ready = false;
207 	mei_me_reg_write(hw, H_CSR, hcsr);
208 
209 	/*
210 	 * Host reads the H_CSR once to ensure that the
211 	 * posted write to H_CSR completes.
212 	 */
213 	hcsr = mei_hcsr_read(hw);
214 
215 	if ((hcsr & H_RST) == 0)
216 		dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr);
217 
218 	if ((hcsr & H_RDY) == H_RDY)
219 		dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr);
220 
221 	if (intr_enable == false)
222 		mei_me_hw_reset_release(dev);
223 
224 	return 0;
225 }
226 
227 /**
228  * mei_me_host_set_ready - enable device
229  *
230  * @dev - mei device
231  * returns bool
232  */
233 
234 static void mei_me_host_set_ready(struct mei_device *dev)
235 {
236 	struct mei_me_hw *hw = to_me_hw(dev);
237 	hw->host_hw_state = mei_hcsr_read(hw);
238 	hw->host_hw_state |= H_IE | H_IG | H_RDY;
239 	mei_hcsr_set(hw, hw->host_hw_state);
240 }
241 /**
242  * mei_me_host_is_ready - check whether the host has turned ready
243  *
244  * @dev - mei device
245  * returns bool
246  */
247 static bool mei_me_host_is_ready(struct mei_device *dev)
248 {
249 	struct mei_me_hw *hw = to_me_hw(dev);
250 	hw->host_hw_state = mei_hcsr_read(hw);
251 	return (hw->host_hw_state & H_RDY) == H_RDY;
252 }
253 
254 /**
255  * mei_me_hw_is_ready - check whether the me(hw) has turned ready
256  *
257  * @dev - mei device
258  * returns bool
259  */
260 static bool mei_me_hw_is_ready(struct mei_device *dev)
261 {
262 	struct mei_me_hw *hw = to_me_hw(dev);
263 	hw->me_hw_state = mei_me_mecsr_read(hw);
264 	return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
265 }
266 
267 static int mei_me_hw_ready_wait(struct mei_device *dev)
268 {
269 	int err;
270 
271 	mutex_unlock(&dev->device_lock);
272 	err = wait_event_interruptible_timeout(dev->wait_hw_ready,
273 			dev->recvd_hw_ready,
274 			mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
275 	mutex_lock(&dev->device_lock);
276 	if (!err && !dev->recvd_hw_ready) {
277 		if (!err)
278 			err = -ETIME;
279 		dev_err(&dev->pdev->dev,
280 			"wait hw ready failed. status = %d\n", err);
281 		return err;
282 	}
283 
284 	dev->recvd_hw_ready = false;
285 	return 0;
286 }
287 
288 static int mei_me_hw_start(struct mei_device *dev)
289 {
290 	int ret = mei_me_hw_ready_wait(dev);
291 	if (ret)
292 		return ret;
293 	dev_dbg(&dev->pdev->dev, "hw is ready\n");
294 
295 	mei_me_host_set_ready(dev);
296 	return ret;
297 }
298 
299 
300 /**
301  * mei_hbuf_filled_slots - gets number of device filled buffer slots
302  *
303  * @dev: the device structure
304  *
305  * returns number of filled slots
306  */
307 static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
308 {
309 	struct mei_me_hw *hw = to_me_hw(dev);
310 	char read_ptr, write_ptr;
311 
312 	hw->host_hw_state = mei_hcsr_read(hw);
313 
314 	read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
315 	write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
316 
317 	return (unsigned char) (write_ptr - read_ptr);
318 }
319 
320 /**
321  * mei_me_hbuf_is_empty - checks if host buffer is empty.
322  *
323  * @dev: the device structure
324  *
325  * returns true if empty, false - otherwise.
326  */
327 static bool mei_me_hbuf_is_empty(struct mei_device *dev)
328 {
329 	return mei_hbuf_filled_slots(dev) == 0;
330 }
331 
332 /**
333  * mei_me_hbuf_empty_slots - counts write empty slots.
334  *
335  * @dev: the device structure
336  *
337  * returns -EOVERFLOW if overflow, otherwise empty slots count
338  */
339 static int mei_me_hbuf_empty_slots(struct mei_device *dev)
340 {
341 	unsigned char filled_slots, empty_slots;
342 
343 	filled_slots = mei_hbuf_filled_slots(dev);
344 	empty_slots = dev->hbuf_depth - filled_slots;
345 
346 	/* check for overflow */
347 	if (filled_slots > dev->hbuf_depth)
348 		return -EOVERFLOW;
349 
350 	return empty_slots;
351 }
352 
353 static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
354 {
355 	return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
356 }
357 
358 
359 /**
360  * mei_me_write_message - writes a message to mei device.
361  *
362  * @dev: the device structure
363  * @header: mei HECI header of message
364  * @buf: message payload will be written
365  *
366  * This function returns -EIO if write has failed
367  */
368 static int mei_me_write_message(struct mei_device *dev,
369 			struct mei_msg_hdr *header,
370 			unsigned char *buf)
371 {
372 	struct mei_me_hw *hw = to_me_hw(dev);
373 	unsigned long rem;
374 	unsigned long length = header->length;
375 	u32 *reg_buf = (u32 *)buf;
376 	u32 hcsr;
377 	u32 dw_cnt;
378 	int i;
379 	int empty_slots;
380 
381 	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
382 
383 	empty_slots = mei_hbuf_empty_slots(dev);
384 	dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
385 
386 	dw_cnt = mei_data2slots(length);
387 	if (empty_slots < 0 || dw_cnt > empty_slots)
388 		return -EMSGSIZE;
389 
390 	mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
391 
392 	for (i = 0; i < length / 4; i++)
393 		mei_me_reg_write(hw, H_CB_WW, reg_buf[i]);
394 
395 	rem = length & 0x3;
396 	if (rem > 0) {
397 		u32 reg = 0;
398 		memcpy(&reg, &buf[length - rem], rem);
399 		mei_me_reg_write(hw, H_CB_WW, reg);
400 	}
401 
402 	hcsr = mei_hcsr_read(hw) | H_IG;
403 	mei_hcsr_set(hw, hcsr);
404 	if (!mei_me_hw_is_ready(dev))
405 		return -EIO;
406 
407 	return 0;
408 }
409 
410 /**
411  * mei_me_count_full_read_slots - counts read full slots.
412  *
413  * @dev: the device structure
414  *
415  * returns -EOVERFLOW if overflow, otherwise filled slots count
416  */
417 static int mei_me_count_full_read_slots(struct mei_device *dev)
418 {
419 	struct mei_me_hw *hw = to_me_hw(dev);
420 	char read_ptr, write_ptr;
421 	unsigned char buffer_depth, filled_slots;
422 
423 	hw->me_hw_state = mei_me_mecsr_read(hw);
424 	buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
425 	read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
426 	write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
427 	filled_slots = (unsigned char) (write_ptr - read_ptr);
428 
429 	/* check for overflow */
430 	if (filled_slots > buffer_depth)
431 		return -EOVERFLOW;
432 
433 	dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
434 	return (int)filled_slots;
435 }
436 
437 /**
438  * mei_me_read_slots - reads a message from mei device.
439  *
440  * @dev: the device structure
441  * @buffer: message buffer will be written
442  * @buffer_length: message size will be read
443  */
444 static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
445 		    unsigned long buffer_length)
446 {
447 	struct mei_me_hw *hw = to_me_hw(dev);
448 	u32 *reg_buf = (u32 *)buffer;
449 	u32 hcsr;
450 
451 	for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
452 		*reg_buf++ = mei_me_mecbrw_read(dev);
453 
454 	if (buffer_length > 0) {
455 		u32 reg = mei_me_mecbrw_read(dev);
456 		memcpy(reg_buf, &reg, buffer_length);
457 	}
458 
459 	hcsr = mei_hcsr_read(hw) | H_IG;
460 	mei_hcsr_set(hw, hcsr);
461 	return 0;
462 }
463 
464 /**
465  * mei_me_pg_enter - write pg enter register to mei device.
466  *
467  * @dev: the device structure
468  */
469 static void mei_me_pg_enter(struct mei_device *dev)
470 {
471 	struct mei_me_hw *hw = to_me_hw(dev);
472 	u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
473 	reg |= H_HPG_CSR_PGI;
474 	mei_me_reg_write(hw, H_HPG_CSR, reg);
475 }
476 
477 /**
478  * mei_me_pg_enter - write pg enter register to mei device.
479  *
480  * @dev: the device structure
481  */
482 static void mei_me_pg_exit(struct mei_device *dev)
483 {
484 	struct mei_me_hw *hw = to_me_hw(dev);
485 	u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
486 
487 	WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
488 
489 	reg |= H_HPG_CSR_PGIHEXR;
490 	mei_me_reg_write(hw, H_HPG_CSR, reg);
491 }
492 
493 /**
494  * mei_me_pg_set_sync - perform pg entry procedure
495  *
496  * @dev: the device structure
497  *
498  * returns 0 on success an error code otherwise
499  */
500 int mei_me_pg_set_sync(struct mei_device *dev)
501 {
502 	struct mei_me_hw *hw = to_me_hw(dev);
503 	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
504 	int ret;
505 
506 	dev->pg_event = MEI_PG_EVENT_WAIT;
507 
508 	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
509 	if (ret)
510 		return ret;
511 
512 	mutex_unlock(&dev->device_lock);
513 	wait_event_timeout(dev->wait_pg,
514 		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
515 	mutex_lock(&dev->device_lock);
516 
517 	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
518 		mei_me_pg_enter(dev);
519 		ret = 0;
520 	} else {
521 		ret = -ETIME;
522 	}
523 
524 	dev->pg_event = MEI_PG_EVENT_IDLE;
525 	hw->pg_state = MEI_PG_ON;
526 
527 	return ret;
528 }
529 
530 /**
531  * mei_me_pg_unset_sync - perform pg exit procedure
532  *
533  * @dev: the device structure
534  *
535  * returns 0 on success an error code otherwise
536  */
537 int mei_me_pg_unset_sync(struct mei_device *dev)
538 {
539 	struct mei_me_hw *hw = to_me_hw(dev);
540 	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
541 	int ret;
542 
543 	if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
544 		goto reply;
545 
546 	dev->pg_event = MEI_PG_EVENT_WAIT;
547 
548 	mei_me_pg_exit(dev);
549 
550 	mutex_unlock(&dev->device_lock);
551 	wait_event_timeout(dev->wait_pg,
552 		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
553 	mutex_lock(&dev->device_lock);
554 
555 reply:
556 	if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
557 		ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
558 	else
559 		ret = -ETIME;
560 
561 	dev->pg_event = MEI_PG_EVENT_IDLE;
562 	hw->pg_state = MEI_PG_OFF;
563 
564 	return ret;
565 }
566 
567 /**
568  * mei_me_pg_is_enabled - detect if PG is supported by HW
569  *
570  * @dev: the device structure
571  *
572  * returns: true is pg supported, false otherwise
573  */
574 static bool mei_me_pg_is_enabled(struct mei_device *dev)
575 {
576 	struct mei_me_hw *hw = to_me_hw(dev);
577 	u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
578 
579 	if ((reg & ME_PGIC_HRA) == 0)
580 		goto notsupported;
581 
582 	if (dev->version.major_version < HBM_MAJOR_VERSION_PGI)
583 		goto notsupported;
584 
585 	if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
586 	    dev->version.minor_version < HBM_MINOR_VERSION_PGI)
587 		goto notsupported;
588 
589 	return true;
590 
591 notsupported:
592 	dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
593 		!!(reg & ME_PGIC_HRA),
594 		dev->version.major_version,
595 		dev->version.minor_version,
596 		HBM_MAJOR_VERSION_PGI,
597 		HBM_MINOR_VERSION_PGI);
598 
599 	return false;
600 }
601 
602 /**
603  * mei_me_irq_quick_handler - The ISR of the MEI device
604  *
605  * @irq: The irq number
606  * @dev_id: pointer to the device structure
607  *
608  * returns irqreturn_t
609  */
610 
611 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
612 {
613 	struct mei_device *dev = (struct mei_device *) dev_id;
614 	struct mei_me_hw *hw = to_me_hw(dev);
615 	u32 csr_reg = mei_hcsr_read(hw);
616 
617 	if ((csr_reg & H_IS) != H_IS)
618 		return IRQ_NONE;
619 
620 	/* clear H_IS bit in H_CSR */
621 	mei_me_reg_write(hw, H_CSR, csr_reg);
622 
623 	return IRQ_WAKE_THREAD;
624 }
625 
626 /**
627  * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
628  * processing.
629  *
630  * @irq: The irq number
631  * @dev_id: pointer to the device structure
632  *
633  * returns irqreturn_t
634  *
635  */
636 irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
637 {
638 	struct mei_device *dev = (struct mei_device *) dev_id;
639 	struct mei_cl_cb complete_list;
640 	s32 slots;
641 	int rets = 0;
642 
643 	dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
644 	/* initialize our complete list */
645 	mutex_lock(&dev->device_lock);
646 	mei_io_list_init(&complete_list);
647 
648 	/* Ack the interrupt here
649 	 * In case of MSI we don't go through the quick handler */
650 	if (pci_dev_msi_enabled(dev->pdev))
651 		mei_clear_interrupts(dev);
652 
653 	/* check if ME wants a reset */
654 	if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
655 		dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n");
656 		schedule_work(&dev->reset_work);
657 		goto end;
658 	}
659 
660 	/*  check if we need to start the dev */
661 	if (!mei_host_is_ready(dev)) {
662 		if (mei_hw_is_ready(dev)) {
663 			mei_me_hw_reset_release(dev);
664 			dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
665 
666 			dev->recvd_hw_ready = true;
667 			wake_up_interruptible(&dev->wait_hw_ready);
668 		} else {
669 			dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n");
670 		}
671 		goto end;
672 	}
673 	/* check slots available for reading */
674 	slots = mei_count_full_read_slots(dev);
675 	while (slots > 0) {
676 		dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots);
677 		rets = mei_irq_read_handler(dev, &complete_list, &slots);
678 		/* There is a race between ME write and interrupt delivery:
679 		 * Not all data is always available immediately after the
680 		 * interrupt, so try to read again on the next interrupt.
681 		 */
682 		if (rets == -ENODATA)
683 			break;
684 
685 		if (rets && dev->dev_state != MEI_DEV_RESETTING) {
686 			dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n",
687 						rets);
688 			schedule_work(&dev->reset_work);
689 			goto end;
690 		}
691 	}
692 
693 	dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
694 
695 	/*
696 	 * During PG handshake only allowed write is the replay to the
697 	 * PG exit message, so block calling write function
698 	 * if the pg state is not idle
699 	 */
700 	if (dev->pg_event == MEI_PG_EVENT_IDLE) {
701 		rets = mei_irq_write_handler(dev, &complete_list);
702 		dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
703 	}
704 
705 	mei_irq_compl_handler(dev, &complete_list);
706 
707 end:
708 	dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets);
709 	mutex_unlock(&dev->device_lock);
710 	return IRQ_HANDLED;
711 }
712 
713 static const struct mei_hw_ops mei_me_hw_ops = {
714 
715 	.pg_state  = mei_me_pg_state,
716 
717 	.host_is_ready = mei_me_host_is_ready,
718 
719 	.hw_is_ready = mei_me_hw_is_ready,
720 	.hw_reset = mei_me_hw_reset,
721 	.hw_config = mei_me_hw_config,
722 	.hw_start = mei_me_hw_start,
723 
724 	.pg_is_enabled = mei_me_pg_is_enabled,
725 
726 	.intr_clear = mei_me_intr_clear,
727 	.intr_enable = mei_me_intr_enable,
728 	.intr_disable = mei_me_intr_disable,
729 
730 	.hbuf_free_slots = mei_me_hbuf_empty_slots,
731 	.hbuf_is_ready = mei_me_hbuf_is_empty,
732 	.hbuf_max_len = mei_me_hbuf_max_len,
733 
734 	.write = mei_me_write_message,
735 
736 	.rdbuf_full_slots = mei_me_count_full_read_slots,
737 	.read_hdr = mei_me_mecbrw_read,
738 	.read = mei_me_read_slots
739 };
740 
741 static bool mei_me_fw_type_nm(struct pci_dev *pdev)
742 {
743 	u32 reg;
744 	pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
745 	/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
746 	return (reg & 0x600) == 0x200;
747 }
748 
749 #define MEI_CFG_FW_NM                           \
750 	.quirk_probe = mei_me_fw_type_nm
751 
752 static bool mei_me_fw_type_sps(struct pci_dev *pdev)
753 {
754 	u32 reg;
755 	/* Read ME FW Status check for SPS Firmware */
756 	pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
757 	/* if bits [19:16] = 15, running SPS Firmware */
758 	return (reg & 0xf0000) == 0xf0000;
759 }
760 
761 #define MEI_CFG_FW_SPS                           \
762 	.quirk_probe = mei_me_fw_type_sps
763 
764 
765 #define MEI_CFG_LEGACY_HFS                      \
766 	.fw_status.count = 0
767 
768 #define MEI_CFG_ICH_HFS                        \
769 	.fw_status.count = 1,                   \
770 	.fw_status.status[0] = PCI_CFG_HFS_1
771 
772 #define MEI_CFG_PCH_HFS                         \
773 	.fw_status.count = 2,                   \
774 	.fw_status.status[0] = PCI_CFG_HFS_1,   \
775 	.fw_status.status[1] = PCI_CFG_HFS_2
776 
777 
778 /* ICH Legacy devices */
779 const struct mei_cfg mei_me_legacy_cfg = {
780 	MEI_CFG_LEGACY_HFS,
781 };
782 
783 /* ICH devices */
784 const struct mei_cfg mei_me_ich_cfg = {
785 	MEI_CFG_ICH_HFS,
786 };
787 
788 /* PCH devices */
789 const struct mei_cfg mei_me_pch_cfg = {
790 	MEI_CFG_PCH_HFS,
791 };
792 
793 
794 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
795 const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
796 	MEI_CFG_PCH_HFS,
797 	MEI_CFG_FW_NM,
798 };
799 
800 /* PCH Lynx Point with quirk for SPS Firmware exclusion */
801 const struct mei_cfg mei_me_lpt_cfg = {
802 	MEI_CFG_PCH_HFS,
803 	MEI_CFG_FW_SPS,
804 };
805 
806 /**
807  * mei_me_dev_init - allocates and initializes the mei device structure
808  *
809  * @pdev: The pci device structure
810  * @cfg: per device generation config
811  *
812  * returns The mei_device_device pointer on success, NULL on failure.
813  */
814 struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
815 				   const struct mei_cfg *cfg)
816 {
817 	struct mei_device *dev;
818 
819 	dev = kzalloc(sizeof(struct mei_device) +
820 			 sizeof(struct mei_me_hw), GFP_KERNEL);
821 	if (!dev)
822 		return NULL;
823 
824 	mei_device_init(dev, cfg);
825 
826 	dev->ops = &mei_me_hw_ops;
827 
828 	dev->pdev = pdev;
829 	return dev;
830 }
831 
832