Lines Matching +full:pcie +full:- +full:phy +full:- +full:0

1 // SPDX-License-Identifier: GPL-2.0
3 * MediaTek PCIe host controller driver.
19 #include <linux/phy/phy.h>
27 #define PCIE_SETTING_REG 0x80
28 #define PCIE_PCI_IDS_1 0x9c
30 #define PCIE_RC_MODE BIT(0)
32 #define PCIE_CFGNUM_REG 0x140
33 #define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
37 #define PCIE_CFG_OFFSET_ADDR 0x1000
41 #define PCIE_RST_CTRL_REG 0x148
42 #define PCIE_MAC_RSTB BIT(0)
47 #define PCIE_LTSSM_STATUS_REG 0x150
50 #define PCIE_LTSSM_STATE_L2_IDLE 0x14
52 #define PCIE_LINK_STATUS_REG 0x154
60 #define PCIE_INT_ENABLE_REG 0x180
61 #define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
65 GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
67 #define PCIE_INT_STATUS_REG 0x184
68 #define PCIE_MSI_SET_ENABLE_REG 0x190
69 #define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
71 #define PCIE_MSI_SET_BASE_REG 0xc00
72 #define PCIE_MSI_SET_OFFSET 0x10
73 #define PCIE_MSI_SET_STATUS_OFFSET 0x04
74 #define PCIE_MSI_SET_ENABLE_OFFSET 0x08
76 #define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
77 #define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
79 #define PCIE_ICMD_PM_REG 0x198
82 #define PCIE_MISC_CTRL_REG 0x348
85 #define PCIE_TRANS_TABLE_BASE_REG 0x800
86 #define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
87 #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
88 #define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
89 #define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
90 #define PCIE_ATR_TLB_SET_OFFSET 0x20
93 #define PCIE_ATR_EN BIT(0)
95 (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
96 #define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
97 #define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
100 #define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
104 * struct mtk_msi_set - MSI information for each set
116 * struct mtk_gen3_pcie - PCIe port information
117 * @dev: pointer to PCIe device
121 * @phy_reset: PHY reset control
122 * @phy: PHY controller block
123 * @clks: PCIe clocks
124 * @num_clks: PCIe clocks count for this port
125 * @irq: PCIe controller interrupt number
141 struct phy *phy; member
158 "detect.quiet", /* 0x00 */
159 "detect.active", /* 0x01 */
160 "polling.active", /* 0x02 */
161 "polling.compliance", /* 0x03 */
162 "polling.configuration", /* 0x04 */
163 "config.linkwidthstart", /* 0x05 */
164 "config.linkwidthaccept", /* 0x06 */
165 "config.lanenumwait", /* 0x07 */
166 "config.lanenumaccept", /* 0x08 */
167 "config.complete", /* 0x09 */
168 "config.idle", /* 0x0A */
169 "recovery.receiverlock", /* 0x0B */
170 "recovery.equalization", /* 0x0C */
171 "recovery.speed", /* 0x0D */
172 "recovery.receiverconfig", /* 0x0E */
173 "recovery.idle", /* 0x0F */
174 "L0", /* 0x10 */
175 "L0s", /* 0x11 */
176 "L1.entry", /* 0x12 */
177 "L1.idle", /* 0x13 */
178 "L2.idle", /* 0x14 */
179 "L2.transmitwake", /* 0x15 */
180 "disable", /* 0x16 */
181 "loopback.entry", /* 0x17 */
182 "loopback.active", /* 0x18 */
183 "loopback.exit", /* 0x19 */
184 "hotreset", /* 0x1A */
188 * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
199 struct mtk_gen3_pcie *pcie = bus->sysdata; in mtk_pcie_config_tlp_header() local
203 bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3); in mtk_pcie_config_tlp_header()
206 PCIE_CFG_HEADER(bus->number, devfn); in mtk_pcie_config_tlp_header()
208 writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG); in mtk_pcie_config_tlp_header()
214 struct mtk_gen3_pcie *pcie = bus->sysdata; in mtk_pcie_map_bus() local
216 return pcie->base + PCIE_CFG_OFFSET_ADDR + where; in mtk_pcie_map_bus()
233 val <<= (where & 0x3) * 8; in mtk_pcie_config_write()
244 static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie, in mtk_pcie_set_trans_table() argument
259 table_size = BIT(fls(remaining) - 1); in mtk_pcie_set_trans_table()
261 if (cpu_addr > 0) { in mtk_pcie_set_trans_table()
262 addr_align = BIT(ffs(cpu_addr) - 1); in mtk_pcie_set_trans_table()
267 if (table_size < 0x1000) { in mtk_pcie_set_trans_table()
268 dev_err(pcie->dev, "illegal table size %#llx\n", in mtk_pcie_set_trans_table()
270 return -EINVAL; in mtk_pcie_set_trans_table()
273 table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET; in mtk_pcie_set_trans_table()
274 writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table); in mtk_pcie_set_trans_table()
289 dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", in mtk_pcie_set_trans_table()
295 remaining -= table_size; in mtk_pcie_set_trans_table()
300 dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n", in mtk_pcie_set_trans_table()
303 return 0; in mtk_pcie_set_trans_table()
306 static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie) in mtk_pcie_enable_msi() argument
311 for (i = 0; i < PCIE_MSI_SET_NUM; i++) { in mtk_pcie_enable_msi()
312 struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; in mtk_pcie_enable_msi()
314 msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG + in mtk_pcie_enable_msi()
316 msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG + in mtk_pcie_enable_msi()
320 writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base); in mtk_pcie_enable_msi()
321 writel_relaxed(upper_32_bits(msi_set->msg_addr), in mtk_pcie_enable_msi()
322 pcie->base + PCIE_MSI_SET_ADDR_HI_BASE + in mtk_pcie_enable_msi()
326 val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG); in mtk_pcie_enable_msi()
328 writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG); in mtk_pcie_enable_msi()
330 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_enable_msi()
332 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_enable_msi()
335 static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) in mtk_pcie_startup_port() argument
338 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); in mtk_pcie_startup_port()
339 unsigned int table_index = 0; in mtk_pcie_startup_port()
344 val = readl_relaxed(pcie->base + PCIE_SETTING_REG); in mtk_pcie_startup_port()
346 writel_relaxed(val, pcie->base + PCIE_SETTING_REG); in mtk_pcie_startup_port()
349 val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1); in mtk_pcie_startup_port()
352 writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1); in mtk_pcie_startup_port()
355 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_startup_port()
357 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_startup_port()
360 val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG); in mtk_pcie_startup_port()
362 writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG); in mtk_pcie_startup_port()
365 val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_startup_port()
367 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_startup_port()
370 * Described in PCIe CEM specification sections 2.2 (PERST# Signal) in mtk_pcie_startup_port()
371 * and 2.2.1 (Initial Power-Up (G3 to S0)). in mtk_pcie_startup_port()
377 /* De-assert reset signals */ in mtk_pcie_startup_port()
379 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_startup_port()
382 err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val, in mtk_pcie_startup_port()
389 val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG); in mtk_pcie_startup_port()
393 dev_err(pcie->dev, in mtk_pcie_startup_port()
394 "PCIe link down, current LTSSM state: %s (%#x)\n", in mtk_pcie_startup_port()
399 mtk_pcie_enable_msi(pcie); in mtk_pcie_startup_port()
401 /* Set PCIe translation windows */ in mtk_pcie_startup_port()
402 resource_list_for_each_entry(entry, &host->windows) { in mtk_pcie_startup_port()
403 struct resource *res = entry->res; in mtk_pcie_startup_port()
410 cpu_addr = pci_pio_to_address(res->start); in mtk_pcie_startup_port()
412 cpu_addr = res->start; in mtk_pcie_startup_port()
416 pci_addr = res->start - entry->offset; in mtk_pcie_startup_port()
418 err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size, in mtk_pcie_startup_port()
424 return 0; in mtk_pcie_startup_port()
430 return -EINVAL; in mtk_pcie_set_affinity()
461 struct mtk_gen3_pcie *pcie = data->domain->host_data; in mtk_compose_msi_msg() local
464 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; in mtk_compose_msi_msg()
466 msg->address_hi = upper_32_bits(msi_set->msg_addr); in mtk_compose_msi_msg()
467 msg->address_lo = lower_32_bits(msi_set->msg_addr); in mtk_compose_msi_msg()
468 msg->data = hwirq; in mtk_compose_msi_msg()
469 dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n", in mtk_compose_msi_msg()
470 hwirq, msg->address_hi, msg->address_lo, msg->data); in mtk_compose_msi_msg()
478 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; in mtk_msi_bottom_irq_ack()
480 writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET); in mtk_msi_bottom_irq_ack()
486 struct mtk_gen3_pcie *pcie = data->domain->host_data; in mtk_msi_bottom_irq_mask() local
490 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; in mtk_msi_bottom_irq_mask()
492 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_mask()
493 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_msi_bottom_irq_mask()
495 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_msi_bottom_irq_mask()
496 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_mask()
502 struct mtk_gen3_pcie *pcie = data->domain->host_data; in mtk_msi_bottom_irq_unmask() local
506 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; in mtk_msi_bottom_irq_unmask()
508 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_unmask()
509 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_msi_bottom_irq_unmask()
511 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_msi_bottom_irq_unmask()
512 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_unmask()
528 struct mtk_gen3_pcie *pcie = domain->host_data; in mtk_msi_bottom_domain_alloc() local
532 mutex_lock(&pcie->lock); in mtk_msi_bottom_domain_alloc()
534 hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM, in mtk_msi_bottom_domain_alloc()
537 mutex_unlock(&pcie->lock); in mtk_msi_bottom_domain_alloc()
539 if (hwirq < 0) in mtk_msi_bottom_domain_alloc()
540 return -ENOSPC; in mtk_msi_bottom_domain_alloc()
543 msi_set = &pcie->msi_sets[set_idx]; in mtk_msi_bottom_domain_alloc()
545 for (i = 0; i < nr_irqs; i++) in mtk_msi_bottom_domain_alloc()
550 return 0; in mtk_msi_bottom_domain_alloc()
556 struct mtk_gen3_pcie *pcie = domain->host_data; in mtk_msi_bottom_domain_free() local
559 mutex_lock(&pcie->lock); in mtk_msi_bottom_domain_free()
561 bitmap_release_region(pcie->msi_irq_in_use, data->hwirq, in mtk_msi_bottom_domain_free()
564 mutex_unlock(&pcie->lock); in mtk_msi_bottom_domain_free()
576 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); in mtk_intx_mask() local
580 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_intx_mask()
581 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_mask()
582 val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT); in mtk_intx_mask()
583 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_mask()
584 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_intx_mask()
589 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); in mtk_intx_unmask() local
593 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_intx_unmask()
594 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_unmask()
595 val |= BIT(data->hwirq + PCIE_INTX_SHIFT); in mtk_intx_unmask()
596 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_unmask()
597 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_intx_unmask()
601 * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
605 * until the corresponding de-assert message is received; hence that
610 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); in mtk_intx_eoi() local
613 hwirq = data->hwirq + PCIE_INTX_SHIFT; in mtk_intx_eoi()
614 writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG); in mtk_intx_eoi()
628 irq_set_chip_data(irq, domain->host_data); in mtk_pcie_intx_map()
631 return 0; in mtk_pcie_intx_map()
638 static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) in mtk_pcie_init_irq_domains() argument
640 struct device *dev = pcie->dev; in mtk_pcie_init_irq_domains()
641 struct device_node *intc_node, *node = dev->of_node; in mtk_pcie_init_irq_domains()
644 raw_spin_lock_init(&pcie->irq_lock); in mtk_pcie_init_irq_domains()
647 intc_node = of_get_child_by_name(node, "interrupt-controller"); in mtk_pcie_init_irq_domains()
649 dev_err(dev, "missing interrupt-controller node\n"); in mtk_pcie_init_irq_domains()
650 return -ENODEV; in mtk_pcie_init_irq_domains()
653 pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX, in mtk_pcie_init_irq_domains()
654 &intx_domain_ops, pcie); in mtk_pcie_init_irq_domains()
655 if (!pcie->intx_domain) { in mtk_pcie_init_irq_domains()
657 ret = -ENODEV; in mtk_pcie_init_irq_domains()
662 mutex_init(&pcie->lock); in mtk_pcie_init_irq_domains()
664 pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM, in mtk_pcie_init_irq_domains()
665 &mtk_msi_bottom_domain_ops, pcie); in mtk_pcie_init_irq_domains()
666 if (!pcie->msi_bottom_domain) { in mtk_pcie_init_irq_domains()
668 ret = -ENODEV; in mtk_pcie_init_irq_domains()
672 pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode, in mtk_pcie_init_irq_domains()
674 pcie->msi_bottom_domain); in mtk_pcie_init_irq_domains()
675 if (!pcie->msi_domain) { in mtk_pcie_init_irq_domains()
677 ret = -ENODEV; in mtk_pcie_init_irq_domains()
682 return 0; in mtk_pcie_init_irq_domains()
685 irq_domain_remove(pcie->msi_bottom_domain); in mtk_pcie_init_irq_domains()
687 irq_domain_remove(pcie->intx_domain); in mtk_pcie_init_irq_domains()
693 static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie) in mtk_pcie_irq_teardown() argument
695 irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); in mtk_pcie_irq_teardown()
697 if (pcie->intx_domain) in mtk_pcie_irq_teardown()
698 irq_domain_remove(pcie->intx_domain); in mtk_pcie_irq_teardown()
700 if (pcie->msi_domain) in mtk_pcie_irq_teardown()
701 irq_domain_remove(pcie->msi_domain); in mtk_pcie_irq_teardown()
703 if (pcie->msi_bottom_domain) in mtk_pcie_irq_teardown()
704 irq_domain_remove(pcie->msi_bottom_domain); in mtk_pcie_irq_teardown()
706 irq_dispose_mapping(pcie->irq); in mtk_pcie_irq_teardown()
709 static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx) in mtk_pcie_msi_handler() argument
711 struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx]; in mtk_pcie_msi_handler()
715 msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_pcie_msi_handler()
718 msi_status = readl_relaxed(msi_set->base + in mtk_pcie_msi_handler()
726 generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq); in mtk_pcie_msi_handler()
733 struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc); in mtk_pcie_irq_handler() local
740 status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG); in mtk_pcie_irq_handler()
743 generic_handle_domain_irq(pcie->intx_domain, in mtk_pcie_irq_handler()
744 irq_bit - PCIE_INTX_SHIFT); in mtk_pcie_irq_handler()
749 mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT); in mtk_pcie_irq_handler()
751 writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG); in mtk_pcie_irq_handler()
757 static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie) in mtk_pcie_setup_irq() argument
759 struct device *dev = pcie->dev; in mtk_pcie_setup_irq()
763 err = mtk_pcie_init_irq_domains(pcie); in mtk_pcie_setup_irq()
767 pcie->irq = platform_get_irq(pdev, 0); in mtk_pcie_setup_irq()
768 if (pcie->irq < 0) in mtk_pcie_setup_irq()
769 return pcie->irq; in mtk_pcie_setup_irq()
771 irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie); in mtk_pcie_setup_irq()
773 return 0; in mtk_pcie_setup_irq()
776 static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) in mtk_pcie_parse_port() argument
778 struct device *dev = pcie->dev; in mtk_pcie_parse_port()
783 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac"); in mtk_pcie_parse_port()
785 return -EINVAL; in mtk_pcie_parse_port()
786 pcie->base = devm_ioremap_resource(dev, regs); in mtk_pcie_parse_port()
787 if (IS_ERR(pcie->base)) { in mtk_pcie_parse_port()
789 return PTR_ERR(pcie->base); in mtk_pcie_parse_port()
792 pcie->reg_base = regs->start; in mtk_pcie_parse_port()
794 pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy"); in mtk_pcie_parse_port()
795 if (IS_ERR(pcie->phy_reset)) { in mtk_pcie_parse_port()
796 ret = PTR_ERR(pcie->phy_reset); in mtk_pcie_parse_port()
797 if (ret != -EPROBE_DEFER) in mtk_pcie_parse_port()
798 dev_err(dev, "failed to get PHY reset\n"); in mtk_pcie_parse_port()
803 pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac"); in mtk_pcie_parse_port()
804 if (IS_ERR(pcie->mac_reset)) { in mtk_pcie_parse_port()
805 ret = PTR_ERR(pcie->mac_reset); in mtk_pcie_parse_port()
806 if (ret != -EPROBE_DEFER) in mtk_pcie_parse_port()
812 pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); in mtk_pcie_parse_port()
813 if (IS_ERR(pcie->phy)) { in mtk_pcie_parse_port()
814 ret = PTR_ERR(pcie->phy); in mtk_pcie_parse_port()
815 if (ret != -EPROBE_DEFER) in mtk_pcie_parse_port()
816 dev_err(dev, "failed to get PHY\n"); in mtk_pcie_parse_port()
821 pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks); in mtk_pcie_parse_port()
822 if (pcie->num_clks < 0) { in mtk_pcie_parse_port()
824 return pcie->num_clks; in mtk_pcie_parse_port()
827 return 0; in mtk_pcie_parse_port()
830 static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie) in mtk_pcie_power_up() argument
832 struct device *dev = pcie->dev; in mtk_pcie_power_up()
835 /* PHY power on and enable pipe clock */ in mtk_pcie_power_up()
836 reset_control_deassert(pcie->phy_reset); in mtk_pcie_power_up()
838 err = phy_init(pcie->phy); in mtk_pcie_power_up()
840 dev_err(dev, "failed to initialize PHY\n"); in mtk_pcie_power_up()
844 err = phy_power_on(pcie->phy); in mtk_pcie_power_up()
846 dev_err(dev, "failed to power on PHY\n"); in mtk_pcie_power_up()
851 reset_control_deassert(pcie->mac_reset); in mtk_pcie_power_up()
856 err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); in mtk_pcie_power_up()
862 return 0; in mtk_pcie_power_up()
867 reset_control_assert(pcie->mac_reset); in mtk_pcie_power_up()
868 phy_power_off(pcie->phy); in mtk_pcie_power_up()
870 phy_exit(pcie->phy); in mtk_pcie_power_up()
872 reset_control_assert(pcie->phy_reset); in mtk_pcie_power_up()
877 static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie) in mtk_pcie_power_down() argument
879 clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks); in mtk_pcie_power_down()
881 pm_runtime_put_sync(pcie->dev); in mtk_pcie_power_down()
882 pm_runtime_disable(pcie->dev); in mtk_pcie_power_down()
883 reset_control_assert(pcie->mac_reset); in mtk_pcie_power_down()
885 phy_power_off(pcie->phy); in mtk_pcie_power_down()
886 phy_exit(pcie->phy); in mtk_pcie_power_down()
887 reset_control_assert(pcie->phy_reset); in mtk_pcie_power_down()
890 static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie) in mtk_pcie_setup() argument
894 err = mtk_pcie_parse_port(pcie); in mtk_pcie_setup()
902 reset_control_assert(pcie->phy_reset); in mtk_pcie_setup()
903 reset_control_assert(pcie->mac_reset); in mtk_pcie_setup()
907 err = mtk_pcie_power_up(pcie); in mtk_pcie_setup()
912 err = mtk_pcie_startup_port(pcie); in mtk_pcie_setup()
916 err = mtk_pcie_setup_irq(pcie); in mtk_pcie_setup()
920 return 0; in mtk_pcie_setup()
923 mtk_pcie_power_down(pcie); in mtk_pcie_setup()
930 struct device *dev = &pdev->dev; in mtk_pcie_probe()
931 struct mtk_gen3_pcie *pcie; in mtk_pcie_probe() local
935 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); in mtk_pcie_probe()
937 return -ENOMEM; in mtk_pcie_probe()
939 pcie = pci_host_bridge_priv(host); in mtk_pcie_probe()
941 pcie->dev = dev; in mtk_pcie_probe()
942 platform_set_drvdata(pdev, pcie); in mtk_pcie_probe()
944 err = mtk_pcie_setup(pcie); in mtk_pcie_probe()
948 host->ops = &mtk_pcie_ops; in mtk_pcie_probe()
949 host->sysdata = pcie; in mtk_pcie_probe()
953 mtk_pcie_irq_teardown(pcie); in mtk_pcie_probe()
954 mtk_pcie_power_down(pcie); in mtk_pcie_probe()
958 return 0; in mtk_pcie_probe()
963 struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev); in mtk_pcie_remove() local
964 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); in mtk_pcie_remove()
967 pci_stop_root_bus(host->bus); in mtk_pcie_remove()
968 pci_remove_root_bus(host->bus); in mtk_pcie_remove()
971 mtk_pcie_irq_teardown(pcie); in mtk_pcie_remove()
972 mtk_pcie_power_down(pcie); in mtk_pcie_remove()
975 static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie) in mtk_pcie_irq_save() argument
979 raw_spin_lock(&pcie->irq_lock); in mtk_pcie_irq_save()
981 pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_irq_save()
983 for (i = 0; i < PCIE_MSI_SET_NUM; i++) { in mtk_pcie_irq_save()
984 struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; in mtk_pcie_irq_save()
986 msi_set->saved_irq_state = readl_relaxed(msi_set->base + in mtk_pcie_irq_save()
990 raw_spin_unlock(&pcie->irq_lock); in mtk_pcie_irq_save()
993 static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie) in mtk_pcie_irq_restore() argument
997 raw_spin_lock(&pcie->irq_lock); in mtk_pcie_irq_restore()
999 writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_irq_restore()
1001 for (i = 0; i < PCIE_MSI_SET_NUM; i++) { in mtk_pcie_irq_restore()
1002 struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; in mtk_pcie_irq_restore()
1004 writel_relaxed(msi_set->saved_irq_state, in mtk_pcie_irq_restore()
1005 msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_pcie_irq_restore()
1008 raw_spin_unlock(&pcie->irq_lock); in mtk_pcie_irq_restore()
1011 static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie) in mtk_pcie_turn_off_link() argument
1015 val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG); in mtk_pcie_turn_off_link()
1017 writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG); in mtk_pcie_turn_off_link()
1020 return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val, in mtk_pcie_turn_off_link()
1028 struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); in mtk_pcie_suspend_noirq() local
1033 err = mtk_pcie_turn_off_link(pcie); in mtk_pcie_suspend_noirq()
1035 dev_err(pcie->dev, "cannot enter L2 state\n"); in mtk_pcie_suspend_noirq()
1040 val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_suspend_noirq()
1042 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_suspend_noirq()
1044 dev_dbg(pcie->dev, "entered L2 states successfully"); in mtk_pcie_suspend_noirq()
1046 mtk_pcie_irq_save(pcie); in mtk_pcie_suspend_noirq()
1047 mtk_pcie_power_down(pcie); in mtk_pcie_suspend_noirq()
1049 return 0; in mtk_pcie_suspend_noirq()
1054 struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); in mtk_pcie_resume_noirq() local
1057 err = mtk_pcie_power_up(pcie); in mtk_pcie_resume_noirq()
1061 err = mtk_pcie_startup_port(pcie); in mtk_pcie_resume_noirq()
1063 mtk_pcie_power_down(pcie); in mtk_pcie_resume_noirq()
1067 mtk_pcie_irq_restore(pcie); in mtk_pcie_resume_noirq()
1069 return 0; in mtk_pcie_resume_noirq()
1078 { .compatible = "mediatek,mt8192-pcie" },
1087 .name = "mtk-pcie-gen3",