1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #include <linux/pci.h> 8 9 #include <linux/kthread.h> 10 #include <linux/interrupt.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/sizes.h> 13 14 #include "mei_dev.h" 15 #include "hbm.h" 16 17 #include "hw-me.h" 18 #include "hw-me-regs.h" 19 20 #include "mei-trace.h" 21 22 /** 23 * mei_me_reg_read - Reads 32bit data from the mei device 24 * 25 * @hw: the me hardware structure 26 * @offset: offset from which to read the data 27 * 28 * Return: register value (u32) 29 */ 30 static inline u32 mei_me_reg_read(const struct mei_me_hw *hw, 31 unsigned long offset) 32 { 33 return ioread32(hw->mem_addr + offset); 34 } 35 36 37 /** 38 * mei_me_reg_write - Writes 32bit data to the mei device 39 * 40 * @hw: the me hardware structure 41 * @offset: offset from which to write the data 42 * @value: register value to write (u32) 43 */ 44 static inline void mei_me_reg_write(const struct mei_me_hw *hw, 45 unsigned long offset, u32 value) 46 { 47 iowrite32(value, hw->mem_addr + offset); 48 } 49 50 /** 51 * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer 52 * read window register 53 * 54 * @dev: the device structure 55 * 56 * Return: ME_CB_RW register value (u32) 57 */ 58 static inline u32 mei_me_mecbrw_read(const struct mei_device *dev) 59 { 60 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW); 61 } 62 63 /** 64 * mei_me_hcbww_write - write 32bit data to the host circular buffer 65 * 66 * @dev: the device structure 67 * @data: 32bit data to be written to the host circular buffer 68 */ 69 static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data) 70 { 71 mei_me_reg_write(to_me_hw(dev), H_CB_WW, data); 72 } 73 74 /** 75 * mei_me_mecsr_read - Reads 32bit data from the ME CSR 76 * 77 * @dev: the device structure 78 * 79 * Return: ME_CSR_HA register value (u32) 80 */ 81 static inline u32 mei_me_mecsr_read(const struct mei_device *dev) 82 { 83 u32 reg; 84 85 reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA); 86 trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg); 87 88 return reg; 89 } 90 91 /** 92 * mei_hcsr_read - Reads 32bit data from the host CSR 93 * 94 * @dev: the device structure 95 * 96 * Return: H_CSR register value (u32) 97 */ 98 static inline u32 mei_hcsr_read(const struct mei_device *dev) 99 { 100 u32 reg; 101 102 reg = mei_me_reg_read(to_me_hw(dev), H_CSR); 103 trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg); 104 105 return reg; 106 } 107 108 /** 109 * mei_hcsr_write - writes H_CSR register to the mei device 110 * 111 * @dev: the device structure 112 * @reg: new register value 113 */ 114 static inline void mei_hcsr_write(struct mei_device *dev, u32 reg) 115 { 116 trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg); 117 mei_me_reg_write(to_me_hw(dev), H_CSR, reg); 118 } 119 120 /** 121 * mei_hcsr_set - writes H_CSR register to the mei device, 122 * and ignores the H_IS bit for it is write-one-to-zero. 123 * 124 * @dev: the device structure 125 * @reg: new register value 126 */ 127 static inline void mei_hcsr_set(struct mei_device *dev, u32 reg) 128 { 129 reg &= ~H_CSR_IS_MASK; 130 mei_hcsr_write(dev, reg); 131 } 132 133 /** 134 * mei_hcsr_set_hig - set host interrupt (set H_IG) 135 * 136 * @dev: the device structure 137 */ 138 static inline void mei_hcsr_set_hig(struct mei_device *dev) 139 { 140 u32 hcsr; 141 142 hcsr = mei_hcsr_read(dev) | H_IG; 143 mei_hcsr_set(dev, hcsr); 144 } 145 146 /** 147 * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register 148 * 149 * @dev: the device structure 150 * 151 * Return: H_D0I3C register value (u32) 152 */ 153 static inline u32 mei_me_d0i3c_read(const struct mei_device *dev) 154 { 155 u32 reg; 156 157 reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C); 158 trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg); 159 160 return reg; 161 } 162 163 /** 164 * mei_me_d0i3c_write - writes H_D0I3C register to device 165 * 166 * @dev: the device structure 167 * @reg: new register value 168 */ 169 static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg) 170 { 171 trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg); 172 mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg); 173 } 174 175 /** 176 * mei_me_trc_status - read trc status register 177 * 178 * @dev: mei device 179 * @trc: trc status register value 180 * 181 * Return: 0 on success, error otherwise 182 */ 183 static int mei_me_trc_status(struct mei_device *dev, u32 *trc) 184 { 185 struct mei_me_hw *hw = to_me_hw(dev); 186 187 if (!hw->cfg->hw_trc_supported) 188 return -EOPNOTSUPP; 189 190 *trc = mei_me_reg_read(hw, ME_TRC); 191 trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc); 192 193 return 0; 194 } 195 196 /** 197 * mei_me_fw_status - read fw status register from pci config space 198 * 199 * @dev: mei device 200 * @fw_status: fw status register values 201 * 202 * Return: 0 on success, error otherwise 203 */ 204 static int mei_me_fw_status(struct mei_device *dev, 205 struct mei_fw_status *fw_status) 206 { 207 struct mei_me_hw *hw = to_me_hw(dev); 208 const struct mei_fw_status *fw_src = &hw->cfg->fw_status; 209 int ret; 210 int i; 211 212 if (!fw_status || !hw->read_fws) 213 return -EINVAL; 214 215 fw_status->count = fw_src->count; 216 for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { 217 ret = hw->read_fws(dev, fw_src->status[i], 218 &fw_status->status[i]); 219 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X", 220 fw_src->status[i], 221 fw_status->status[i]); 222 if (ret) 223 return ret; 224 } 225 226 return 0; 227 } 228 229 /** 230 * mei_me_hw_config - configure hw dependent settings 231 * 232 * @dev: mei device 233 * 234 * Return: 235 * * -EINVAL when read_fws is not set 236 * * 0 on success 237 * 238 */ 239 static int mei_me_hw_config(struct mei_device *dev) 240 { 241 struct mei_me_hw *hw = to_me_hw(dev); 242 u32 hcsr, reg; 243 244 if (WARN_ON(!hw->read_fws)) 245 return -EINVAL; 246 247 /* Doesn't change in runtime */ 248 hcsr = mei_hcsr_read(dev); 249 hw->hbuf_depth = (hcsr & H_CBD) >> 24; 250 251 reg = 0; 252 hw->read_fws(dev, PCI_CFG_HFS_1, ®); 253 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); 254 hw->d0i3_supported = 255 ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK); 256 257 hw->pg_state = MEI_PG_OFF; 258 if (hw->d0i3_supported) { 259 reg = mei_me_d0i3c_read(dev); 260 if (reg & H_D0I3C_I3) 261 hw->pg_state = MEI_PG_ON; 262 } 263 264 return 0; 265 } 266 267 /** 268 * mei_me_pg_state - translate internal pg state 269 * to the mei power gating state 270 * 271 * @dev: mei device 272 * 273 * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise 274 */ 275 static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev) 276 { 277 struct mei_me_hw *hw = to_me_hw(dev); 278 279 return hw->pg_state; 280 } 281 282 static inline u32 me_intr_src(u32 hcsr) 283 { 284 return hcsr & H_CSR_IS_MASK; 285 } 286 287 /** 288 * me_intr_disable - disables mei device interrupts 289 * using supplied hcsr register value. 290 * 291 * @dev: the device structure 292 * @hcsr: supplied hcsr register value 293 */ 294 static inline void me_intr_disable(struct mei_device *dev, u32 hcsr) 295 { 296 hcsr &= ~H_CSR_IE_MASK; 297 mei_hcsr_set(dev, hcsr); 298 } 299 300 /** 301 * me_intr_clear - clear and stop interrupts 302 * 303 * @dev: the device structure 304 * @hcsr: supplied hcsr register value 305 */ 306 static inline void me_intr_clear(struct mei_device *dev, u32 hcsr) 307 { 308 if (me_intr_src(hcsr)) 309 mei_hcsr_write(dev, hcsr); 310 } 311 312 /** 313 * mei_me_intr_clear - clear and stop interrupts 314 * 315 * @dev: the device structure 316 */ 317 static void mei_me_intr_clear(struct mei_device *dev) 318 { 319 u32 hcsr = mei_hcsr_read(dev); 320 321 me_intr_clear(dev, hcsr); 322 } 323 /** 324 * mei_me_intr_enable - enables mei device interrupts 325 * 326 * @dev: the device structure 327 */ 328 static void mei_me_intr_enable(struct mei_device *dev) 329 { 330 u32 hcsr = mei_hcsr_read(dev); 331 332 hcsr |= H_CSR_IE_MASK; 333 mei_hcsr_set(dev, hcsr); 334 } 335 336 /** 337 * mei_me_intr_disable - disables mei device interrupts 338 * 339 * @dev: the device structure 340 */ 341 static void mei_me_intr_disable(struct mei_device *dev) 342 { 343 u32 hcsr = mei_hcsr_read(dev); 344 345 me_intr_disable(dev, hcsr); 346 } 347 348 /** 349 * mei_me_synchronize_irq - wait for pending IRQ handlers 350 * 351 * @dev: the device structure 352 */ 353 static void mei_me_synchronize_irq(struct mei_device *dev) 354 { 355 struct mei_me_hw *hw = to_me_hw(dev); 356 357 synchronize_irq(hw->irq); 358 } 359 360 /** 361 * mei_me_hw_reset_release - release device from the reset 362 * 363 * @dev: the device structure 364 */ 365 static void mei_me_hw_reset_release(struct mei_device *dev) 366 { 367 u32 hcsr = mei_hcsr_read(dev); 368 369 hcsr |= H_IG; 370 hcsr &= ~H_RST; 371 mei_hcsr_set(dev, hcsr); 372 } 373 374 /** 375 * mei_me_host_set_ready - enable device 376 * 377 * @dev: mei device 378 */ 379 static void mei_me_host_set_ready(struct mei_device *dev) 380 { 381 u32 hcsr = mei_hcsr_read(dev); 382 383 hcsr |= H_CSR_IE_MASK | H_IG | H_RDY; 384 mei_hcsr_set(dev, hcsr); 385 } 386 387 /** 388 * mei_me_host_is_ready - check whether the host has turned ready 389 * 390 * @dev: mei device 391 * Return: bool 392 */ 393 static bool mei_me_host_is_ready(struct mei_device *dev) 394 { 395 u32 hcsr = mei_hcsr_read(dev); 396 397 return (hcsr & H_RDY) == H_RDY; 398 } 399 400 /** 401 * mei_me_hw_is_ready - check whether the me(hw) has turned ready 402 * 403 * @dev: mei device 404 * Return: bool 405 */ 406 static bool mei_me_hw_is_ready(struct mei_device *dev) 407 { 408 u32 mecsr = mei_me_mecsr_read(dev); 409 410 return (mecsr & ME_RDY_HRA) == ME_RDY_HRA; 411 } 412 413 /** 414 * mei_me_hw_is_resetting - check whether the me(hw) is in reset 415 * 416 * @dev: mei device 417 * Return: bool 418 */ 419 static bool mei_me_hw_is_resetting(struct mei_device *dev) 420 { 421 u32 mecsr = mei_me_mecsr_read(dev); 422 423 return (mecsr & ME_RST_HRA) == ME_RST_HRA; 424 } 425 426 /** 427 * mei_me_hw_ready_wait - wait until the me(hw) has turned ready 428 * or timeout is reached 429 * 430 * @dev: mei device 431 * Return: 0 on success, error otherwise 432 */ 433 static int mei_me_hw_ready_wait(struct mei_device *dev) 434 { 435 mutex_unlock(&dev->device_lock); 436 wait_event_timeout(dev->wait_hw_ready, 437 dev->recvd_hw_ready, 438 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); 439 mutex_lock(&dev->device_lock); 440 if (!dev->recvd_hw_ready) { 441 dev_err(dev->dev, "wait hw ready failed\n"); 442 return -ETIME; 443 } 444 445 mei_me_hw_reset_release(dev); 446 dev->recvd_hw_ready = false; 447 return 0; 448 } 449 450 /** 451 * mei_me_hw_start - hw start routine 452 * 453 * @dev: mei device 454 * Return: 0 on success, error otherwise 455 */ 456 static int mei_me_hw_start(struct mei_device *dev) 457 { 458 int ret = mei_me_hw_ready_wait(dev); 459 460 if (ret) 461 return ret; 462 dev_dbg(dev->dev, "hw is ready\n"); 463 464 mei_me_host_set_ready(dev); 465 return ret; 466 } 467 468 469 /** 470 * mei_hbuf_filled_slots - gets number of device filled buffer slots 471 * 472 * @dev: the device structure 473 * 474 * Return: number of filled slots 475 */ 476 static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) 477 { 478 u32 hcsr; 479 char read_ptr, write_ptr; 480 481 hcsr = mei_hcsr_read(dev); 482 483 read_ptr = (char) ((hcsr & H_CBRP) >> 8); 484 write_ptr = (char) ((hcsr & H_CBWP) >> 16); 485 486 return (unsigned char) (write_ptr - read_ptr); 487 } 488 489 /** 490 * mei_me_hbuf_is_empty - checks if host buffer is empty. 491 * 492 * @dev: the device structure 493 * 494 * Return: true if empty, false - otherwise. 495 */ 496 static bool mei_me_hbuf_is_empty(struct mei_device *dev) 497 { 498 return mei_hbuf_filled_slots(dev) == 0; 499 } 500 501 /** 502 * mei_me_hbuf_empty_slots - counts write empty slots. 503 * 504 * @dev: the device structure 505 * 506 * Return: -EOVERFLOW if overflow, otherwise empty slots count 507 */ 508 static int mei_me_hbuf_empty_slots(struct mei_device *dev) 509 { 510 struct mei_me_hw *hw = to_me_hw(dev); 511 unsigned char filled_slots, empty_slots; 512 513 filled_slots = mei_hbuf_filled_slots(dev); 514 empty_slots = hw->hbuf_depth - filled_slots; 515 516 /* check for overflow */ 517 if (filled_slots > hw->hbuf_depth) 518 return -EOVERFLOW; 519 520 return empty_slots; 521 } 522 523 /** 524 * mei_me_hbuf_depth - returns depth of the hw buffer. 525 * 526 * @dev: the device structure 527 * 528 * Return: size of hw buffer in slots 529 */ 530 static u32 mei_me_hbuf_depth(const struct mei_device *dev) 531 { 532 struct mei_me_hw *hw = to_me_hw(dev); 533 534 return hw->hbuf_depth; 535 } 536 537 /** 538 * mei_me_hbuf_write - writes a message to host hw buffer. 539 * 540 * @dev: the device structure 541 * @hdr: header of message 542 * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes) 543 * @data: payload 544 * @data_len: payload length in bytes 545 * 546 * Return: 0 if success, < 0 - otherwise. 547 */ 548 static int mei_me_hbuf_write(struct mei_device *dev, 549 const void *hdr, size_t hdr_len, 550 const void *data, size_t data_len) 551 { 552 unsigned long rem; 553 unsigned long i; 554 const u32 *reg_buf; 555 u32 dw_cnt; 556 int empty_slots; 557 558 if (WARN_ON(!hdr || !data || hdr_len & 0x3)) 559 return -EINVAL; 560 561 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr)); 562 563 empty_slots = mei_hbuf_empty_slots(dev); 564 dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots); 565 566 if (empty_slots < 0) 567 return -EOVERFLOW; 568 569 dw_cnt = mei_data2slots(hdr_len + data_len); 570 if (dw_cnt > (u32)empty_slots) 571 return -EMSGSIZE; 572 573 reg_buf = hdr; 574 for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++) 575 mei_me_hcbww_write(dev, reg_buf[i]); 576 577 reg_buf = data; 578 for (i = 0; i < data_len / MEI_SLOT_SIZE; i++) 579 mei_me_hcbww_write(dev, reg_buf[i]); 580 581 rem = data_len & 0x3; 582 if (rem > 0) { 583 u32 reg = 0; 584 585 memcpy(®, (const u8 *)data + data_len - rem, rem); 586 mei_me_hcbww_write(dev, reg); 587 } 588 589 mei_hcsr_set_hig(dev); 590 if (!mei_me_hw_is_ready(dev)) 591 return -EIO; 592 593 return 0; 594 } 595 596 /** 597 * mei_me_count_full_read_slots - counts read full slots. 598 * 599 * @dev: the device structure 600 * 601 * Return: -EOVERFLOW if overflow, otherwise filled slots count 602 */ 603 static int mei_me_count_full_read_slots(struct mei_device *dev) 604 { 605 u32 me_csr; 606 char read_ptr, write_ptr; 607 unsigned char buffer_depth, filled_slots; 608 609 me_csr = mei_me_mecsr_read(dev); 610 buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24); 611 read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8); 612 write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16); 613 filled_slots = (unsigned char) (write_ptr - read_ptr); 614 615 /* check for overflow */ 616 if (filled_slots > buffer_depth) 617 return -EOVERFLOW; 618 619 dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots); 620 return (int)filled_slots; 621 } 622 623 /** 624 * mei_me_read_slots - reads a message from mei device. 625 * 626 * @dev: the device structure 627 * @buffer: message buffer will be written 628 * @buffer_length: message size will be read 629 * 630 * Return: always 0 631 */ 632 static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, 633 unsigned long buffer_length) 634 { 635 u32 *reg_buf = (u32 *)buffer; 636 637 for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE) 638 *reg_buf++ = mei_me_mecbrw_read(dev); 639 640 if (buffer_length > 0) { 641 u32 reg = mei_me_mecbrw_read(dev); 642 643 memcpy(reg_buf, ®, buffer_length); 644 } 645 646 mei_hcsr_set_hig(dev); 647 return 0; 648 } 649 650 /** 651 * mei_me_pg_set - write pg enter register 652 * 653 * @dev: the device structure 654 */ 655 static void mei_me_pg_set(struct mei_device *dev) 656 { 657 struct mei_me_hw *hw = to_me_hw(dev); 658 u32 reg; 659 660 reg = mei_me_reg_read(hw, H_HPG_CSR); 661 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); 662 663 reg |= H_HPG_CSR_PGI; 664 665 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); 666 mei_me_reg_write(hw, H_HPG_CSR, reg); 667 } 668 669 /** 670 * mei_me_pg_unset - write pg exit register 671 * 672 * @dev: the device structure 673 */ 674 static void mei_me_pg_unset(struct mei_device *dev) 675 { 676 struct mei_me_hw *hw = to_me_hw(dev); 677 u32 reg; 678 679 reg = mei_me_reg_read(hw, H_HPG_CSR); 680 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); 681 682 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n"); 683 684 reg |= H_HPG_CSR_PGIHEXR; 685 686 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); 687 mei_me_reg_write(hw, H_HPG_CSR, reg); 688 } 689 690 /** 691 * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure 692 * 693 * @dev: the device structure 694 * 695 * Return: 0 on success an error code otherwise 696 */ 697 static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) 698 { 699 struct mei_me_hw *hw = to_me_hw(dev); 700 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); 701 int ret; 702 703 dev->pg_event = MEI_PG_EVENT_WAIT; 704 705 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); 706 if (ret) 707 return ret; 708 709 mutex_unlock(&dev->device_lock); 710 wait_event_timeout(dev->wait_pg, 711 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); 712 mutex_lock(&dev->device_lock); 713 714 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { 715 mei_me_pg_set(dev); 716 ret = 0; 717 } else { 718 ret = -ETIME; 719 } 720 721 dev->pg_event = MEI_PG_EVENT_IDLE; 722 hw->pg_state = MEI_PG_ON; 723 724 return ret; 725 } 726 727 /** 728 * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure 729 * 730 * @dev: the device structure 731 * 732 * Return: 0 on success an error code otherwise 733 */ 734 static int mei_me_pg_legacy_exit_sync(struct mei_device *dev) 735 { 736 struct mei_me_hw *hw = to_me_hw(dev); 737 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); 738 int ret; 739 740 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) 741 goto reply; 742 743 dev->pg_event = MEI_PG_EVENT_WAIT; 744 745 mei_me_pg_unset(dev); 746 747 mutex_unlock(&dev->device_lock); 748 wait_event_timeout(dev->wait_pg, 749 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); 750 mutex_lock(&dev->device_lock); 751 752 reply: 753 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) { 754 ret = -ETIME; 755 goto out; 756 } 757 758 dev->pg_event = MEI_PG_EVENT_INTR_WAIT; 759 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD); 760 if (ret) 761 return ret; 762 763 mutex_unlock(&dev->device_lock); 764 wait_event_timeout(dev->wait_pg, 765 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); 766 mutex_lock(&dev->device_lock); 767 768 if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED) 769 ret = 0; 770 else 771 ret = -ETIME; 772 773 out: 774 dev->pg_event = MEI_PG_EVENT_IDLE; 775 hw->pg_state = MEI_PG_OFF; 776 777 return ret; 778 } 779 780 /** 781 * mei_me_pg_in_transition - is device now in pg transition 782 * 783 * @dev: the device structure 784 * 785 * Return: true if in pg transition, false otherwise 786 */ 787 static bool mei_me_pg_in_transition(struct mei_device *dev) 788 { 789 return dev->pg_event >= MEI_PG_EVENT_WAIT && 790 dev->pg_event <= MEI_PG_EVENT_INTR_WAIT; 791 } 792 793 /** 794 * mei_me_pg_is_enabled - detect if PG is supported by HW 795 * 796 * @dev: the device structure 797 * 798 * Return: true is pg supported, false otherwise 799 */ 800 static bool mei_me_pg_is_enabled(struct mei_device *dev) 801 { 802 struct mei_me_hw *hw = to_me_hw(dev); 803 u32 reg = mei_me_mecsr_read(dev); 804 805 if (hw->d0i3_supported) 806 return true; 807 808 if ((reg & ME_PGIC_HRA) == 0) 809 goto notsupported; 810 811 if (!dev->hbm_f_pg_supported) 812 goto notsupported; 813 814 return true; 815 816 notsupported: 817 dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n", 818 hw->d0i3_supported, 819 !!(reg & ME_PGIC_HRA), 820 dev->version.major_version, 821 dev->version.minor_version, 822 HBM_MAJOR_VERSION_PGI, 823 HBM_MINOR_VERSION_PGI); 824 825 return false; 826 } 827 828 /** 829 * mei_me_d0i3_set - write d0i3 register bit on mei device. 830 * 831 * @dev: the device structure 832 * @intr: ask for interrupt 833 * 834 * Return: D0I3C register value 835 */ 836 static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr) 837 { 838 u32 reg = mei_me_d0i3c_read(dev); 839 840 reg |= H_D0I3C_I3; 841 if (intr) 842 reg |= H_D0I3C_IR; 843 else 844 reg &= ~H_D0I3C_IR; 845 mei_me_d0i3c_write(dev, reg); 846 /* read it to ensure HW consistency */ 847 reg = mei_me_d0i3c_read(dev); 848 return reg; 849 } 850 851 /** 852 * mei_me_d0i3_unset - clean d0i3 register bit on mei device. 853 * 854 * @dev: the device structure 855 * 856 * Return: D0I3C register value 857 */ 858 static u32 mei_me_d0i3_unset(struct mei_device *dev) 859 { 860 u32 reg = mei_me_d0i3c_read(dev); 861 862 reg &= ~H_D0I3C_I3; 863 reg |= H_D0I3C_IR; 864 mei_me_d0i3c_write(dev, reg); 865 /* read it to ensure HW consistency */ 866 reg = mei_me_d0i3c_read(dev); 867 return reg; 868 } 869 870 /** 871 * mei_me_d0i3_enter_sync - perform d0i3 entry procedure 872 * 873 * @dev: the device structure 874 * 875 * Return: 0 on success an error code otherwise 876 */ 877 static int mei_me_d0i3_enter_sync(struct mei_device *dev) 878 { 879 struct mei_me_hw *hw = to_me_hw(dev); 880 unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); 881 unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); 882 int ret; 883 u32 reg; 884 885 reg = mei_me_d0i3c_read(dev); 886 if (reg & H_D0I3C_I3) { 887 /* we are in d0i3, nothing to do */ 888 dev_dbg(dev->dev, "d0i3 set not needed\n"); 889 ret = 0; 890 goto on; 891 } 892 893 /* PGI entry procedure */ 894 dev->pg_event = MEI_PG_EVENT_WAIT; 895 896 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); 897 if (ret) 898 /* FIXME: should we reset here? */ 899 goto out; 900 901 mutex_unlock(&dev->device_lock); 902 wait_event_timeout(dev->wait_pg, 903 dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout); 904 mutex_lock(&dev->device_lock); 905 906 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) { 907 ret = -ETIME; 908 goto out; 909 } 910 /* end PGI entry procedure */ 911 912 dev->pg_event = MEI_PG_EVENT_INTR_WAIT; 913 914 reg = mei_me_d0i3_set(dev, true); 915 if (!(reg & H_D0I3C_CIP)) { 916 dev_dbg(dev->dev, "d0i3 enter wait not needed\n"); 917 ret = 0; 918 goto on; 919 } 920 921 mutex_unlock(&dev->device_lock); 922 wait_event_timeout(dev->wait_pg, 923 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout); 924 mutex_lock(&dev->device_lock); 925 926 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { 927 reg = mei_me_d0i3c_read(dev); 928 if (!(reg & H_D0I3C_I3)) { 929 ret = -ETIME; 930 goto out; 931 } 932 } 933 934 ret = 0; 935 on: 936 hw->pg_state = MEI_PG_ON; 937 out: 938 dev->pg_event = MEI_PG_EVENT_IDLE; 939 dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret); 940 return ret; 941 } 942 943 /** 944 * mei_me_d0i3_enter - perform d0i3 entry procedure 945 * no hbm PG handshake 946 * no waiting for confirmation; runs with interrupts 947 * disabled 948 * 949 * @dev: the device structure 950 * 951 * Return: 0 on success an error code otherwise 952 */ 953 static int mei_me_d0i3_enter(struct mei_device *dev) 954 { 955 struct mei_me_hw *hw = to_me_hw(dev); 956 u32 reg; 957 958 reg = mei_me_d0i3c_read(dev); 959 if (reg & H_D0I3C_I3) { 960 /* we are in d0i3, nothing to do */ 961 dev_dbg(dev->dev, "already d0i3 : set not needed\n"); 962 goto on; 963 } 964 965 mei_me_d0i3_set(dev, false); 966 on: 967 hw->pg_state = MEI_PG_ON; 968 dev->pg_event = MEI_PG_EVENT_IDLE; 969 dev_dbg(dev->dev, "d0i3 enter\n"); 970 return 0; 971 } 972 973 /** 974 * mei_me_d0i3_exit_sync - perform d0i3 exit procedure 975 * 976 * @dev: the device structure 977 * 978 * Return: 0 on success an error code otherwise 979 */ 980 static int mei_me_d0i3_exit_sync(struct mei_device *dev) 981 { 982 struct mei_me_hw *hw = to_me_hw(dev); 983 unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); 984 int ret; 985 u32 reg; 986 987 dev->pg_event = MEI_PG_EVENT_INTR_WAIT; 988 989 reg = mei_me_d0i3c_read(dev); 990 if (!(reg & H_D0I3C_I3)) { 991 /* we are not in d0i3, nothing to do */ 992 dev_dbg(dev->dev, "d0i3 exit not needed\n"); 993 ret = 0; 994 goto off; 995 } 996 997 reg = mei_me_d0i3_unset(dev); 998 if (!(reg & H_D0I3C_CIP)) { 999 dev_dbg(dev->dev, "d0i3 exit wait not needed\n"); 1000 ret = 0; 1001 goto off; 1002 } 1003 1004 mutex_unlock(&dev->device_lock); 1005 wait_event_timeout(dev->wait_pg, 1006 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); 1007 mutex_lock(&dev->device_lock); 1008 1009 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { 1010 reg = mei_me_d0i3c_read(dev); 1011 if (reg & H_D0I3C_I3) { 1012 ret = -ETIME; 1013 goto out; 1014 } 1015 } 1016 1017 ret = 0; 1018 off: 1019 hw->pg_state = MEI_PG_OFF; 1020 out: 1021 dev->pg_event = MEI_PG_EVENT_IDLE; 1022 1023 dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret); 1024 return ret; 1025 } 1026 1027 /** 1028 * mei_me_pg_legacy_intr - perform legacy pg processing 1029 * in interrupt thread handler 1030 * 1031 * @dev: the device structure 1032 */ 1033 static void mei_me_pg_legacy_intr(struct mei_device *dev) 1034 { 1035 struct mei_me_hw *hw = to_me_hw(dev); 1036 1037 if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT) 1038 return; 1039 1040 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED; 1041 hw->pg_state = MEI_PG_OFF; 1042 if (waitqueue_active(&dev->wait_pg)) 1043 wake_up(&dev->wait_pg); 1044 } 1045 1046 /** 1047 * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler 1048 * 1049 * @dev: the device structure 1050 * @intr_source: interrupt source 1051 */ 1052 static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source) 1053 { 1054 struct mei_me_hw *hw = to_me_hw(dev); 1055 1056 if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT && 1057 (intr_source & H_D0I3C_IS)) { 1058 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED; 1059 if (hw->pg_state == MEI_PG_ON) { 1060 hw->pg_state = MEI_PG_OFF; 1061 if (dev->hbm_state != MEI_HBM_IDLE) { 1062 /* 1063 * force H_RDY because it could be 1064 * wiped off during PG 1065 */ 1066 dev_dbg(dev->dev, "d0i3 set host ready\n"); 1067 mei_me_host_set_ready(dev); 1068 } 1069 } else { 1070 hw->pg_state = MEI_PG_ON; 1071 } 1072 1073 wake_up(&dev->wait_pg); 1074 } 1075 1076 if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) { 1077 /* 1078 * HW sent some data and we are in D0i3, so 1079 * we got here because of HW initiated exit from D0i3. 1080 * Start runtime pm resume sequence to exit low power state. 1081 */ 1082 dev_dbg(dev->dev, "d0i3 want resume\n"); 1083 mei_hbm_pg_resume(dev); 1084 } 1085 } 1086 1087 /** 1088 * mei_me_pg_intr - perform pg processing in interrupt thread handler 1089 * 1090 * @dev: the device structure 1091 * @intr_source: interrupt source 1092 */ 1093 static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source) 1094 { 1095 struct mei_me_hw *hw = to_me_hw(dev); 1096 1097 if (hw->d0i3_supported) 1098 mei_me_d0i3_intr(dev, intr_source); 1099 else 1100 mei_me_pg_legacy_intr(dev); 1101 } 1102 1103 /** 1104 * mei_me_pg_enter_sync - perform runtime pm entry procedure 1105 * 1106 * @dev: the device structure 1107 * 1108 * Return: 0 on success an error code otherwise 1109 */ 1110 int mei_me_pg_enter_sync(struct mei_device *dev) 1111 { 1112 struct mei_me_hw *hw = to_me_hw(dev); 1113 1114 if (hw->d0i3_supported) 1115 return mei_me_d0i3_enter_sync(dev); 1116 else 1117 return mei_me_pg_legacy_enter_sync(dev); 1118 } 1119 1120 /** 1121 * mei_me_pg_exit_sync - perform runtime pm exit procedure 1122 * 1123 * @dev: the device structure 1124 * 1125 * Return: 0 on success an error code otherwise 1126 */ 1127 int mei_me_pg_exit_sync(struct mei_device *dev) 1128 { 1129 struct mei_me_hw *hw = to_me_hw(dev); 1130 1131 if (hw->d0i3_supported) 1132 return mei_me_d0i3_exit_sync(dev); 1133 else 1134 return mei_me_pg_legacy_exit_sync(dev); 1135 } 1136 1137 /** 1138 * mei_me_hw_reset - resets fw via mei csr register. 1139 * 1140 * @dev: the device structure 1141 * @intr_enable: if interrupt should be enabled after reset. 1142 * 1143 * Return: 0 on success an error code otherwise 1144 */ 1145 static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) 1146 { 1147 struct mei_me_hw *hw = to_me_hw(dev); 1148 int ret; 1149 u32 hcsr; 1150 1151 if (intr_enable) { 1152 mei_me_intr_enable(dev); 1153 if (hw->d0i3_supported) { 1154 ret = mei_me_d0i3_exit_sync(dev); 1155 if (ret) 1156 return ret; 1157 } else { 1158 hw->pg_state = MEI_PG_OFF; 1159 } 1160 } 1161 1162 pm_runtime_set_active(dev->dev); 1163 1164 hcsr = mei_hcsr_read(dev); 1165 /* H_RST may be found lit before reset is started, 1166 * for example if preceding reset flow hasn't completed. 1167 * In that case asserting H_RST will be ignored, therefore 1168 * we need to clean H_RST bit to start a successful reset sequence. 1169 */ 1170 if ((hcsr & H_RST) == H_RST) { 1171 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); 1172 hcsr &= ~H_RST; 1173 mei_hcsr_set(dev, hcsr); 1174 hcsr = mei_hcsr_read(dev); 1175 } 1176 1177 hcsr |= H_RST | H_IG | H_CSR_IS_MASK; 1178 1179 if (!intr_enable) 1180 hcsr &= ~H_CSR_IE_MASK; 1181 1182 dev->recvd_hw_ready = false; 1183 mei_hcsr_write(dev, hcsr); 1184 1185 /* 1186 * Host reads the H_CSR once to ensure that the 1187 * posted write to H_CSR completes. 1188 */ 1189 hcsr = mei_hcsr_read(dev); 1190 1191 if ((hcsr & H_RST) == 0) 1192 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr); 1193 1194 if ((hcsr & H_RDY) == H_RDY) 1195 dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr); 1196 1197 if (!intr_enable) { 1198 mei_me_hw_reset_release(dev); 1199 if (hw->d0i3_supported) { 1200 ret = mei_me_d0i3_enter(dev); 1201 if (ret) 1202 return ret; 1203 } 1204 } 1205 return 0; 1206 } 1207 1208 /** 1209 * mei_me_irq_quick_handler - The ISR of the MEI device 1210 * 1211 * @irq: The irq number 1212 * @dev_id: pointer to the device structure 1213 * 1214 * Return: irqreturn_t 1215 */ 1216 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) 1217 { 1218 struct mei_device *dev = (struct mei_device *)dev_id; 1219 u32 hcsr; 1220 1221 hcsr = mei_hcsr_read(dev); 1222 if (!me_intr_src(hcsr)) 1223 return IRQ_NONE; 1224 1225 dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr)); 1226 1227 /* disable interrupts on device */ 1228 me_intr_disable(dev, hcsr); 1229 return IRQ_WAKE_THREAD; 1230 } 1231 EXPORT_SYMBOL_GPL(mei_me_irq_quick_handler); 1232 1233 /** 1234 * mei_me_irq_thread_handler - function called after ISR to handle the interrupt 1235 * processing. 1236 * 1237 * @irq: The irq number 1238 * @dev_id: pointer to the device structure 1239 * 1240 * Return: irqreturn_t 1241 * 1242 */ 1243 irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) 1244 { 1245 struct mei_device *dev = (struct mei_device *) dev_id; 1246 struct list_head cmpl_list; 1247 s32 slots; 1248 u32 hcsr; 1249 int rets = 0; 1250 1251 dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n"); 1252 /* initialize our complete list */ 1253 mutex_lock(&dev->device_lock); 1254 1255 hcsr = mei_hcsr_read(dev); 1256 me_intr_clear(dev, hcsr); 1257 1258 INIT_LIST_HEAD(&cmpl_list); 1259 1260 /* check if ME wants a reset */ 1261 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { 1262 dev_warn(dev->dev, "FW not ready: resetting.\n"); 1263 if (dev->dev_state == MEI_DEV_POWERING_DOWN || 1264 dev->dev_state == MEI_DEV_POWER_DOWN) 1265 mei_cl_all_disconnect(dev); 1266 else if (dev->dev_state != MEI_DEV_DISABLED) 1267 schedule_work(&dev->reset_work); 1268 goto end; 1269 } 1270 1271 if (mei_me_hw_is_resetting(dev)) 1272 mei_hcsr_set_hig(dev); 1273 1274 mei_me_pg_intr(dev, me_intr_src(hcsr)); 1275 1276 /* check if we need to start the dev */ 1277 if (!mei_host_is_ready(dev)) { 1278 if (mei_hw_is_ready(dev)) { 1279 dev_dbg(dev->dev, "we need to start the dev.\n"); 1280 dev->recvd_hw_ready = true; 1281 wake_up(&dev->wait_hw_ready); 1282 } else { 1283 dev_dbg(dev->dev, "Spurious Interrupt\n"); 1284 } 1285 goto end; 1286 } 1287 /* check slots available for reading */ 1288 slots = mei_count_full_read_slots(dev); 1289 while (slots > 0) { 1290 dev_dbg(dev->dev, "slots to read = %08x\n", slots); 1291 rets = mei_irq_read_handler(dev, &cmpl_list, &slots); 1292 /* There is a race between ME write and interrupt delivery: 1293 * Not all data is always available immediately after the 1294 * interrupt, so try to read again on the next interrupt. 1295 */ 1296 if (rets == -ENODATA) 1297 break; 1298 1299 if (rets) { 1300 dev_err(dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n", 1301 rets, dev->dev_state); 1302 if (dev->dev_state != MEI_DEV_RESETTING && 1303 dev->dev_state != MEI_DEV_DISABLED && 1304 dev->dev_state != MEI_DEV_POWERING_DOWN && 1305 dev->dev_state != MEI_DEV_POWER_DOWN) 1306 schedule_work(&dev->reset_work); 1307 goto end; 1308 } 1309 } 1310 1311 dev->hbuf_is_ready = mei_hbuf_is_ready(dev); 1312 1313 /* 1314 * During PG handshake only allowed write is the replay to the 1315 * PG exit message, so block calling write function 1316 * if the pg event is in PG handshake 1317 */ 1318 if (dev->pg_event != MEI_PG_EVENT_WAIT && 1319 dev->pg_event != MEI_PG_EVENT_RECEIVED) { 1320 rets = mei_irq_write_handler(dev, &cmpl_list); 1321 dev->hbuf_is_ready = mei_hbuf_is_ready(dev); 1322 } 1323 1324 mei_irq_compl_handler(dev, &cmpl_list); 1325 1326 end: 1327 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); 1328 mei_me_intr_enable(dev); 1329 mutex_unlock(&dev->device_lock); 1330 return IRQ_HANDLED; 1331 } 1332 EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler); 1333 1334 static const struct mei_hw_ops mei_me_hw_ops = { 1335 1336 .trc_status = mei_me_trc_status, 1337 .fw_status = mei_me_fw_status, 1338 .pg_state = mei_me_pg_state, 1339 1340 .host_is_ready = mei_me_host_is_ready, 1341 1342 .hw_is_ready = mei_me_hw_is_ready, 1343 .hw_reset = mei_me_hw_reset, 1344 .hw_config = mei_me_hw_config, 1345 .hw_start = mei_me_hw_start, 1346 1347 .pg_in_transition = mei_me_pg_in_transition, 1348 .pg_is_enabled = mei_me_pg_is_enabled, 1349 1350 .intr_clear = mei_me_intr_clear, 1351 .intr_enable = mei_me_intr_enable, 1352 .intr_disable = mei_me_intr_disable, 1353 .synchronize_irq = mei_me_synchronize_irq, 1354 1355 .hbuf_free_slots = mei_me_hbuf_empty_slots, 1356 .hbuf_is_ready = mei_me_hbuf_is_empty, 1357 .hbuf_depth = mei_me_hbuf_depth, 1358 1359 .write = mei_me_hbuf_write, 1360 1361 .rdbuf_full_slots = mei_me_count_full_read_slots, 1362 .read_hdr = mei_me_mecbrw_read, 1363 .read = mei_me_read_slots 1364 }; 1365 1366 /** 1367 * mei_me_fw_type_nm() - check for nm sku 1368 * 1369 * Read ME FW Status register to check for the Node Manager (NM) Firmware. 1370 * The NM FW is only signaled in PCI function 0. 1371 * __Note__: Deprecated by PCH8 and newer. 1372 * 1373 * @pdev: pci device 1374 * 1375 * Return: true in case of NM firmware 1376 */ 1377 static bool mei_me_fw_type_nm(const struct pci_dev *pdev) 1378 { 1379 u32 reg; 1380 unsigned int devfn; 1381 1382 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); 1383 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, ®); 1384 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg); 1385 /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ 1386 return (reg & 0x600) == 0x200; 1387 } 1388 1389 #define MEI_CFG_FW_NM \ 1390 .quirk_probe = mei_me_fw_type_nm 1391 1392 /** 1393 * mei_me_fw_type_sps_4() - check for sps 4.0 sku 1394 * 1395 * Read ME FW Status register to check for SPS Firmware. 1396 * The SPS FW is only signaled in the PCI function 0. 1397 * __Note__: Deprecated by SPS 5.0 and newer. 1398 * 1399 * @pdev: pci device 1400 * 1401 * Return: true in case of SPS firmware 1402 */ 1403 static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev) 1404 { 1405 u32 reg; 1406 unsigned int devfn; 1407 1408 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); 1409 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, ®); 1410 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); 1411 return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS; 1412 } 1413 1414 #define MEI_CFG_FW_SPS_4 \ 1415 .quirk_probe = mei_me_fw_type_sps_4 1416 1417 /** 1418 * mei_me_fw_type_sps_ign() - check for sps or ign sku 1419 * 1420 * Read ME FW Status register to check for SPS or IGN Firmware. 1421 * The SPS/IGN FW is only signaled in pci function 0 1422 * 1423 * @pdev: pci device 1424 * 1425 * Return: true in case of SPS/IGN firmware 1426 */ 1427 static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev) 1428 { 1429 u32 reg; 1430 u32 fw_type; 1431 unsigned int devfn; 1432 1433 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); 1434 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, ®); 1435 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg); 1436 fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK); 1437 1438 dev_dbg(&pdev->dev, "fw type is %d\n", fw_type); 1439 1440 return fw_type == PCI_CFG_HFS_3_FW_SKU_IGN || 1441 fw_type == PCI_CFG_HFS_3_FW_SKU_SPS; 1442 } 1443 1444 #define MEI_CFG_KIND_ITOUCH \ 1445 .kind = "itouch" 1446 1447 #define MEI_CFG_TYPE_GSC \ 1448 .kind = "gsc" 1449 1450 #define MEI_CFG_TYPE_GSCFI \ 1451 .kind = "gscfi" 1452 1453 #define MEI_CFG_FW_SPS_IGN \ 1454 .quirk_probe = mei_me_fw_type_sps_ign 1455 1456 #define MEI_CFG_FW_VER_SUPP \ 1457 .fw_ver_supported = 1 1458 1459 #define MEI_CFG_ICH_HFS \ 1460 .fw_status.count = 0 1461 1462 #define MEI_CFG_ICH10_HFS \ 1463 .fw_status.count = 1, \ 1464 .fw_status.status[0] = PCI_CFG_HFS_1 1465 1466 #define MEI_CFG_PCH_HFS \ 1467 .fw_status.count = 2, \ 1468 .fw_status.status[0] = PCI_CFG_HFS_1, \ 1469 .fw_status.status[1] = PCI_CFG_HFS_2 1470 1471 #define MEI_CFG_PCH8_HFS \ 1472 .fw_status.count = 6, \ 1473 .fw_status.status[0] = PCI_CFG_HFS_1, \ 1474 .fw_status.status[1] = PCI_CFG_HFS_2, \ 1475 .fw_status.status[2] = PCI_CFG_HFS_3, \ 1476 .fw_status.status[3] = PCI_CFG_HFS_4, \ 1477 .fw_status.status[4] = PCI_CFG_HFS_5, \ 1478 .fw_status.status[5] = PCI_CFG_HFS_6 1479 1480 #define MEI_CFG_DMA_128 \ 1481 .dma_size[DMA_DSCR_HOST] = SZ_128K, \ 1482 .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \ 1483 .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE 1484 1485 #define MEI_CFG_TRC \ 1486 .hw_trc_supported = 1 1487 1488 /* ICH Legacy devices */ 1489 static const struct mei_cfg mei_me_ich_cfg = { 1490 MEI_CFG_ICH_HFS, 1491 }; 1492 1493 /* ICH devices */ 1494 static const struct mei_cfg mei_me_ich10_cfg = { 1495 MEI_CFG_ICH10_HFS, 1496 }; 1497 1498 /* PCH6 devices */ 1499 static const struct mei_cfg mei_me_pch6_cfg = { 1500 MEI_CFG_PCH_HFS, 1501 }; 1502 1503 /* PCH7 devices */ 1504 static const struct mei_cfg mei_me_pch7_cfg = { 1505 MEI_CFG_PCH_HFS, 1506 MEI_CFG_FW_VER_SUPP, 1507 }; 1508 1509 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */ 1510 static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = { 1511 MEI_CFG_PCH_HFS, 1512 MEI_CFG_FW_VER_SUPP, 1513 MEI_CFG_FW_NM, 1514 }; 1515 1516 /* PCH8 Lynx Point and newer devices */ 1517 static const struct mei_cfg mei_me_pch8_cfg = { 1518 MEI_CFG_PCH8_HFS, 1519 MEI_CFG_FW_VER_SUPP, 1520 }; 1521 1522 /* PCH8 Lynx Point and newer devices - iTouch */ 1523 static const struct mei_cfg mei_me_pch8_itouch_cfg = { 1524 MEI_CFG_KIND_ITOUCH, 1525 MEI_CFG_PCH8_HFS, 1526 MEI_CFG_FW_VER_SUPP, 1527 }; 1528 1529 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */ 1530 static const struct mei_cfg mei_me_pch8_sps_4_cfg = { 1531 MEI_CFG_PCH8_HFS, 1532 MEI_CFG_FW_VER_SUPP, 1533 MEI_CFG_FW_SPS_4, 1534 }; 1535 1536 /* LBG with quirk for SPS (4.0) Firmware exclusion */ 1537 static const struct mei_cfg mei_me_pch12_sps_4_cfg = { 1538 MEI_CFG_PCH8_HFS, 1539 MEI_CFG_FW_VER_SUPP, 1540 MEI_CFG_FW_SPS_4, 1541 }; 1542 1543 /* Cannon Lake and newer devices */ 1544 static const struct mei_cfg mei_me_pch12_cfg = { 1545 MEI_CFG_PCH8_HFS, 1546 MEI_CFG_FW_VER_SUPP, 1547 MEI_CFG_DMA_128, 1548 }; 1549 1550 /* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */ 1551 static const struct mei_cfg mei_me_pch12_sps_cfg = { 1552 MEI_CFG_PCH8_HFS, 1553 MEI_CFG_FW_VER_SUPP, 1554 MEI_CFG_DMA_128, 1555 MEI_CFG_FW_SPS_IGN, 1556 }; 1557 1558 /* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion 1559 * w/o DMA support. 1560 */ 1561 static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = { 1562 MEI_CFG_KIND_ITOUCH, 1563 MEI_CFG_PCH8_HFS, 1564 MEI_CFG_FW_VER_SUPP, 1565 MEI_CFG_FW_SPS_IGN, 1566 }; 1567 1568 /* Tiger Lake and newer devices */ 1569 static const struct mei_cfg mei_me_pch15_cfg = { 1570 MEI_CFG_PCH8_HFS, 1571 MEI_CFG_FW_VER_SUPP, 1572 MEI_CFG_DMA_128, 1573 MEI_CFG_TRC, 1574 }; 1575 1576 /* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */ 1577 static const struct mei_cfg mei_me_pch15_sps_cfg = { 1578 MEI_CFG_PCH8_HFS, 1579 MEI_CFG_FW_VER_SUPP, 1580 MEI_CFG_DMA_128, 1581 MEI_CFG_TRC, 1582 MEI_CFG_FW_SPS_IGN, 1583 }; 1584 1585 /* Graphics System Controller */ 1586 static const struct mei_cfg mei_me_gsc_cfg = { 1587 MEI_CFG_TYPE_GSC, 1588 MEI_CFG_PCH8_HFS, 1589 MEI_CFG_FW_VER_SUPP, 1590 }; 1591 1592 /* Graphics System Controller Firmware Interface */ 1593 static const struct mei_cfg mei_me_gscfi_cfg = { 1594 MEI_CFG_TYPE_GSCFI, 1595 MEI_CFG_PCH8_HFS, 1596 MEI_CFG_FW_VER_SUPP, 1597 }; 1598 1599 /* 1600 * mei_cfg_list - A list of platform platform specific configurations. 1601 * Note: has to be synchronized with enum mei_cfg_idx. 1602 */ 1603 static const struct mei_cfg *const mei_cfg_list[] = { 1604 [MEI_ME_UNDEF_CFG] = NULL, 1605 [MEI_ME_ICH_CFG] = &mei_me_ich_cfg, 1606 [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg, 1607 [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg, 1608 [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg, 1609 [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg, 1610 [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, 1611 [MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg, 1612 [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg, 1613 [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg, 1614 [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg, 1615 [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg, 1616 [MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg, 1617 [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg, 1618 [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg, 1619 [MEI_ME_GSC_CFG] = &mei_me_gsc_cfg, 1620 [MEI_ME_GSCFI_CFG] = &mei_me_gscfi_cfg, 1621 }; 1622 1623 const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx) 1624 { 1625 BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG); 1626 1627 if (idx >= MEI_ME_NUM_CFG) 1628 return NULL; 1629 1630 return mei_cfg_list[idx]; 1631 } 1632 EXPORT_SYMBOL_GPL(mei_me_get_cfg); 1633 1634 /** 1635 * mei_me_dev_init - allocates and initializes the mei device structure 1636 * 1637 * @parent: device associated with physical device (pci/platform) 1638 * @cfg: per device generation config 1639 * 1640 * Return: The mei_device pointer on success, NULL on failure. 1641 */ 1642 struct mei_device *mei_me_dev_init(struct device *parent, 1643 const struct mei_cfg *cfg) 1644 { 1645 struct mei_device *dev; 1646 struct mei_me_hw *hw; 1647 int i; 1648 1649 dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL); 1650 if (!dev) 1651 return NULL; 1652 1653 hw = to_me_hw(dev); 1654 1655 for (i = 0; i < DMA_DSCR_NUM; i++) 1656 dev->dr_dscr[i].size = cfg->dma_size[i]; 1657 1658 mei_device_init(dev, parent, &mei_me_hw_ops); 1659 hw->cfg = cfg; 1660 1661 dev->fw_f_fw_ver_supported = cfg->fw_ver_supported; 1662 1663 dev->kind = cfg->kind; 1664 1665 return dev; 1666 } 1667 EXPORT_SYMBOL_GPL(mei_me_dev_init); 1668