1a1b36958SXiangliang Yu /*
2a1b36958SXiangliang Yu * This file is provided under a dual BSD/GPLv2 license. When using or
3a1b36958SXiangliang Yu * redistributing this file, you may do so under either license.
4a1b36958SXiangliang Yu *
5a1b36958SXiangliang Yu * GPL LICENSE SUMMARY
6a1b36958SXiangliang Yu *
7a1b36958SXiangliang Yu * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
8443b9a14SSerge Semin * Copyright (C) 2016 T-Platforms. All Rights Reserved.
9a1b36958SXiangliang Yu *
10a1b36958SXiangliang Yu * This program is free software; you can redistribute it and/or modify
11a1b36958SXiangliang Yu * it under the terms of version 2 of the GNU General Public License as
12a1b36958SXiangliang Yu * published by the Free Software Foundation.
13a1b36958SXiangliang Yu *
14a1b36958SXiangliang Yu * BSD LICENSE
15a1b36958SXiangliang Yu *
16a1b36958SXiangliang Yu * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
17443b9a14SSerge Semin * Copyright (C) 2016 T-Platforms. All Rights Reserved.
18a1b36958SXiangliang Yu *
19a1b36958SXiangliang Yu * Redistribution and use in source and binary forms, with or without
20a1b36958SXiangliang Yu * modification, are permitted provided that the following conditions
21a1b36958SXiangliang Yu * are met:
22a1b36958SXiangliang Yu *
23a1b36958SXiangliang Yu * * Redistributions of source code must retain the above copyright
24a1b36958SXiangliang Yu * notice, this list of conditions and the following disclaimer.
25a1b36958SXiangliang Yu * * Redistributions in binary form must reproduce the above copy
26a1b36958SXiangliang Yu * notice, this list of conditions and the following disclaimer in
27a1b36958SXiangliang Yu * the documentation and/or other materials provided with the
28a1b36958SXiangliang Yu * distribution.
29a1b36958SXiangliang Yu * * Neither the name of AMD Corporation nor the names of its
30a1b36958SXiangliang Yu * contributors may be used to endorse or promote products derived
31a1b36958SXiangliang Yu * from this software without specific prior written permission.
32a1b36958SXiangliang Yu *
33a1b36958SXiangliang Yu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34a1b36958SXiangliang Yu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35a1b36958SXiangliang Yu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36a1b36958SXiangliang Yu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37a1b36958SXiangliang Yu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38a1b36958SXiangliang Yu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39a1b36958SXiangliang Yu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40a1b36958SXiangliang Yu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41a1b36958SXiangliang Yu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42a1b36958SXiangliang Yu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43a1b36958SXiangliang Yu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44a1b36958SXiangliang Yu *
45a1b36958SXiangliang Yu * AMD PCIe NTB Linux driver
46a1b36958SXiangliang Yu *
47a1b36958SXiangliang Yu * Contact Information:
48a1b36958SXiangliang Yu * Xiangliang Yu <Xiangliang.Yu@amd.com>
49a1b36958SXiangliang Yu */
50a1b36958SXiangliang Yu
51a1b36958SXiangliang Yu #include <linux/debugfs.h>
52a1b36958SXiangliang Yu #include <linux/delay.h>
53a1b36958SXiangliang Yu #include <linux/init.h>
54a1b36958SXiangliang Yu #include <linux/interrupt.h>
55a1b36958SXiangliang Yu #include <linux/module.h>
56a1b36958SXiangliang Yu #include <linux/acpi.h>
57a1b36958SXiangliang Yu #include <linux/pci.h>
58a1b36958SXiangliang Yu #include <linux/random.h>
59a1b36958SXiangliang Yu #include <linux/slab.h>
60a1b36958SXiangliang Yu #include <linux/ntb.h>
61a1b36958SXiangliang Yu
62a1b36958SXiangliang Yu #include "ntb_hw_amd.h"
63a1b36958SXiangliang Yu
64a1b36958SXiangliang Yu #define NTB_NAME "ntb_hw_amd"
65a1b36958SXiangliang Yu #define NTB_DESC "AMD(R) PCI-E Non-Transparent Bridge Driver"
66a1b36958SXiangliang Yu #define NTB_VER "1.0"
67a1b36958SXiangliang Yu
68a1b36958SXiangliang Yu MODULE_DESCRIPTION(NTB_DESC);
69a1b36958SXiangliang Yu MODULE_VERSION(NTB_VER);
70a1b36958SXiangliang Yu MODULE_LICENSE("Dual BSD/GPL");
71a1b36958SXiangliang Yu MODULE_AUTHOR("AMD Inc.");
72a1b36958SXiangliang Yu
73a1b36958SXiangliang Yu static const struct file_operations amd_ntb_debugfs_info;
74a1b36958SXiangliang Yu static struct dentry *debugfs_dir;
75a1b36958SXiangliang Yu
ndev_mw_to_bar(struct amd_ntb_dev * ndev,int idx)76a1b36958SXiangliang Yu static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx)
77a1b36958SXiangliang Yu {
78a1b36958SXiangliang Yu if (idx < 0 || idx > ndev->mw_count)
79a1b36958SXiangliang Yu return -EINVAL;
80a1b36958SXiangliang Yu
81a1472e73SSanjay R Mehta return ndev->dev_data->mw_idx << idx;
82a1b36958SXiangliang Yu }
83a1b36958SXiangliang Yu
amd_ntb_mw_count(struct ntb_dev * ntb,int pidx)84443b9a14SSerge Semin static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx)
85a1b36958SXiangliang Yu {
86443b9a14SSerge Semin if (pidx != NTB_DEF_PEER_IDX)
87443b9a14SSerge Semin return -EINVAL;
88443b9a14SSerge Semin
89a1b36958SXiangliang Yu return ntb_ndev(ntb)->mw_count;
90a1b36958SXiangliang Yu }
91a1b36958SXiangliang Yu
amd_ntb_mw_get_align(struct ntb_dev * ntb,int pidx,int idx,resource_size_t * addr_align,resource_size_t * size_align,resource_size_t * size_max)92443b9a14SSerge Semin static int amd_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
93443b9a14SSerge Semin resource_size_t *addr_align,
94443b9a14SSerge Semin resource_size_t *size_align,
95443b9a14SSerge Semin resource_size_t *size_max)
96a1b36958SXiangliang Yu {
97a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
98a1b36958SXiangliang Yu int bar;
99a1b36958SXiangliang Yu
100443b9a14SSerge Semin if (pidx != NTB_DEF_PEER_IDX)
101443b9a14SSerge Semin return -EINVAL;
102443b9a14SSerge Semin
103a1b36958SXiangliang Yu bar = ndev_mw_to_bar(ndev, idx);
104a1b36958SXiangliang Yu if (bar < 0)
105a1b36958SXiangliang Yu return bar;
106a1b36958SXiangliang Yu
107443b9a14SSerge Semin if (addr_align)
108443b9a14SSerge Semin *addr_align = SZ_4K;
109a1b36958SXiangliang Yu
110443b9a14SSerge Semin if (size_align)
111443b9a14SSerge Semin *size_align = 1;
112a1b36958SXiangliang Yu
113443b9a14SSerge Semin if (size_max)
114443b9a14SSerge Semin *size_max = pci_resource_len(ndev->ntb.pdev, bar);
115a1b36958SXiangliang Yu
116a1b36958SXiangliang Yu return 0;
117a1b36958SXiangliang Yu }
118a1b36958SXiangliang Yu
amd_ntb_mw_set_trans(struct ntb_dev * ntb,int pidx,int idx,dma_addr_t addr,resource_size_t size)119443b9a14SSerge Semin static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
120a1b36958SXiangliang Yu dma_addr_t addr, resource_size_t size)
121a1b36958SXiangliang Yu {
122a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
123a1b36958SXiangliang Yu unsigned long xlat_reg, limit_reg = 0;
124a1b36958SXiangliang Yu resource_size_t mw_size;
125a1b36958SXiangliang Yu void __iomem *mmio, *peer_mmio;
126a1b36958SXiangliang Yu u64 base_addr, limit, reg_val;
127a1b36958SXiangliang Yu int bar;
128a1b36958SXiangliang Yu
129443b9a14SSerge Semin if (pidx != NTB_DEF_PEER_IDX)
130443b9a14SSerge Semin return -EINVAL;
131443b9a14SSerge Semin
132a1b36958SXiangliang Yu bar = ndev_mw_to_bar(ndev, idx);
133a1b36958SXiangliang Yu if (bar < 0)
134a1b36958SXiangliang Yu return bar;
135a1b36958SXiangliang Yu
1360f9bfb97SLogan Gunthorpe mw_size = pci_resource_len(ntb->pdev, bar);
137a1b36958SXiangliang Yu
138a1b36958SXiangliang Yu /* make sure the range fits in the usable mw size */
139a1b36958SXiangliang Yu if (size > mw_size)
140a1b36958SXiangliang Yu return -EINVAL;
141a1b36958SXiangliang Yu
142a1b36958SXiangliang Yu mmio = ndev->self_mmio;
143a1b36958SXiangliang Yu peer_mmio = ndev->peer_mmio;
144a1b36958SXiangliang Yu
1450f9bfb97SLogan Gunthorpe base_addr = pci_resource_start(ntb->pdev, bar);
146a1b36958SXiangliang Yu
147a1b36958SXiangliang Yu if (bar != 1) {
148872deb21SShyam Sundar S K xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2);
149872deb21SShyam Sundar S K limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2);
150a1b36958SXiangliang Yu
151a1b36958SXiangliang Yu /* Set the limit if supported */
152872deb21SShyam Sundar S K limit = size;
153a1b36958SXiangliang Yu
154a1b36958SXiangliang Yu /* set and verify setting the translation address */
155a1b36958SXiangliang Yu write64(addr, peer_mmio + xlat_reg);
156a1b36958SXiangliang Yu reg_val = read64(peer_mmio + xlat_reg);
157a1b36958SXiangliang Yu if (reg_val != addr) {
158a1b36958SXiangliang Yu write64(0, peer_mmio + xlat_reg);
159a1b36958SXiangliang Yu return -EIO;
160a1b36958SXiangliang Yu }
161a1b36958SXiangliang Yu
162a1b36958SXiangliang Yu /* set and verify setting the limit */
163a5c3666fSSanjay R Mehta write64(limit, peer_mmio + limit_reg);
164a5c3666fSSanjay R Mehta reg_val = read64(peer_mmio + limit_reg);
165a1b36958SXiangliang Yu if (reg_val != limit) {
166a1b36958SXiangliang Yu write64(base_addr, mmio + limit_reg);
167a1b36958SXiangliang Yu write64(0, peer_mmio + xlat_reg);
168a1b36958SXiangliang Yu return -EIO;
169a1b36958SXiangliang Yu }
170a1b36958SXiangliang Yu } else {
171a1b36958SXiangliang Yu xlat_reg = AMD_BAR1XLAT_OFFSET;
172a1b36958SXiangliang Yu limit_reg = AMD_BAR1LMT_OFFSET;
173a1b36958SXiangliang Yu
174a1b36958SXiangliang Yu /* Set the limit if supported */
175872deb21SShyam Sundar S K limit = size;
176a1b36958SXiangliang Yu
177a1b36958SXiangliang Yu /* set and verify setting the translation address */
178a1b36958SXiangliang Yu write64(addr, peer_mmio + xlat_reg);
179a1b36958SXiangliang Yu reg_val = read64(peer_mmio + xlat_reg);
180a1b36958SXiangliang Yu if (reg_val != addr) {
181a1b36958SXiangliang Yu write64(0, peer_mmio + xlat_reg);
182a1b36958SXiangliang Yu return -EIO;
183a1b36958SXiangliang Yu }
184a1b36958SXiangliang Yu
185a1b36958SXiangliang Yu /* set and verify setting the limit */
186a5c3666fSSanjay R Mehta writel(limit, peer_mmio + limit_reg);
187a5c3666fSSanjay R Mehta reg_val = readl(peer_mmio + limit_reg);
188a1b36958SXiangliang Yu if (reg_val != limit) {
189a1b36958SXiangliang Yu writel(base_addr, mmio + limit_reg);
190a1b36958SXiangliang Yu writel(0, peer_mmio + xlat_reg);
191a1b36958SXiangliang Yu return -EIO;
192a1b36958SXiangliang Yu }
193a1b36958SXiangliang Yu }
194a1b36958SXiangliang Yu
195a1b36958SXiangliang Yu return 0;
196a1b36958SXiangliang Yu }
197a1b36958SXiangliang Yu
amd_ntb_get_link_status(struct amd_ntb_dev * ndev)1985c6404d5SArindam Nath static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev)
1995c6404d5SArindam Nath {
2005c6404d5SArindam Nath struct pci_dev *pdev = NULL;
2015c6404d5SArindam Nath struct pci_dev *pci_swds = NULL;
2025c6404d5SArindam Nath struct pci_dev *pci_swus = NULL;
2035c6404d5SArindam Nath u32 stat;
2045c6404d5SArindam Nath int rc;
2055c6404d5SArindam Nath
2065c6404d5SArindam Nath if (ndev->ntb.topo == NTB_TOPO_SEC) {
2075c6404d5SArindam Nath /* Locate the pointer to Downstream Switch for this device */
2085c6404d5SArindam Nath pci_swds = pci_upstream_bridge(ndev->ntb.pdev);
2095c6404d5SArindam Nath if (pci_swds) {
2105c6404d5SArindam Nath /*
2115c6404d5SArindam Nath * Locate the pointer to Upstream Switch for
2125c6404d5SArindam Nath * the Downstream Switch.
2135c6404d5SArindam Nath */
2145c6404d5SArindam Nath pci_swus = pci_upstream_bridge(pci_swds);
2155c6404d5SArindam Nath if (pci_swus) {
2165c6404d5SArindam Nath rc = pcie_capability_read_dword(pci_swus,
2175c6404d5SArindam Nath PCI_EXP_LNKCTL,
2185c6404d5SArindam Nath &stat);
2195c6404d5SArindam Nath if (rc)
2205c6404d5SArindam Nath return 0;
2215c6404d5SArindam Nath } else {
2225c6404d5SArindam Nath return 0;
2235c6404d5SArindam Nath }
2245c6404d5SArindam Nath } else {
2255c6404d5SArindam Nath return 0;
2265c6404d5SArindam Nath }
2275c6404d5SArindam Nath } else if (ndev->ntb.topo == NTB_TOPO_PRI) {
2285c6404d5SArindam Nath /*
2295c6404d5SArindam Nath * For NTB primary, we simply read the Link Status and control
2305c6404d5SArindam Nath * register of the NTB device itself.
2315c6404d5SArindam Nath */
2325c6404d5SArindam Nath pdev = ndev->ntb.pdev;
2335c6404d5SArindam Nath rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat);
2345c6404d5SArindam Nath if (rc)
2355c6404d5SArindam Nath return 0;
2365c6404d5SArindam Nath } else {
2375c6404d5SArindam Nath /* Catch all for everything else */
2385c6404d5SArindam Nath return 0;
2395c6404d5SArindam Nath }
2405c6404d5SArindam Nath
2415c6404d5SArindam Nath ndev->lnk_sta = stat;
2425c6404d5SArindam Nath
2435c6404d5SArindam Nath return 1;
2445c6404d5SArindam Nath }
2455c6404d5SArindam Nath
amd_link_is_up(struct amd_ntb_dev * ndev)246a1b36958SXiangliang Yu static int amd_link_is_up(struct amd_ntb_dev *ndev)
247a1b36958SXiangliang Yu {
2485f0856beSArindam Nath int ret;
249a1b36958SXiangliang Yu
2505f0856beSArindam Nath /*
2515f0856beSArindam Nath * We consider the link to be up under two conditions:
2525f0856beSArindam Nath *
2535f0856beSArindam Nath * - When a link-up event is received. This is indicated by
2545f0856beSArindam Nath * AMD_LINK_UP_EVENT set in peer_sta.
2555f0856beSArindam Nath * - When driver on both sides of the link have been loaded.
2565f0856beSArindam Nath * This is indicated by bit 1 being set in the peer
2575f0856beSArindam Nath * SIDEINFO register.
2585f0856beSArindam Nath *
2595f0856beSArindam Nath * This function should return 1 when the latter of the above
2605f0856beSArindam Nath * two conditions is true.
2615f0856beSArindam Nath *
2625f0856beSArindam Nath * Now consider the sequence of events - Link-Up event occurs,
2635f0856beSArindam Nath * then the peer side driver loads. In this case, we would have
2645f0856beSArindam Nath * received LINK_UP event and bit 1 of peer SIDEINFO is also
2655f0856beSArindam Nath * set. What happens now if the link goes down? Bit 1 of
2665f0856beSArindam Nath * peer SIDEINFO remains set, but LINK_DOWN bit is set in
2675f0856beSArindam Nath * peer_sta. So we should return 0 from this function. Not only
2685f0856beSArindam Nath * that, we clear bit 1 of peer SIDEINFO to 0, since the peer
2695f0856beSArindam Nath * side driver did not even get a chance to clear it before
2705f0856beSArindam Nath * the link went down. This can be the case of surprise link
2715f0856beSArindam Nath * removal.
2725f0856beSArindam Nath *
2735f0856beSArindam Nath * LINK_UP event will always occur before the peer side driver
2745f0856beSArindam Nath * gets loaded the very first time. So there can be a case when
2755f0856beSArindam Nath * the LINK_UP event has occurred, but the peer side driver hasn't
2765f0856beSArindam Nath * yet loaded. We return 0 in that case.
2775f0856beSArindam Nath *
2785f0856beSArindam Nath * There is also a special case when the primary side driver is
2795f0856beSArindam Nath * unloaded and then loaded again. Since there is no change in
2805f0856beSArindam Nath * the status of NTB secondary in this case, there is no Link-Up
2815f0856beSArindam Nath * or Link-Down notification received. We recognize this condition
2825f0856beSArindam Nath * with peer_sta being set to 0.
2835f0856beSArindam Nath *
2845f0856beSArindam Nath * If bit 1 of peer SIDEINFO register is not set, then we
2855f0856beSArindam Nath * simply return 0 irrespective of the link up or down status
2865f0856beSArindam Nath * set in peer_sta.
2875f0856beSArindam Nath */
2885f0856beSArindam Nath ret = amd_poll_link(ndev);
2895f0856beSArindam Nath if (ret) {
2905f0856beSArindam Nath /*
2915f0856beSArindam Nath * We need to check the below only for NTB primary. For NTB
2925f0856beSArindam Nath * secondary, simply checking the result of PSIDE_INFO
2935f0856beSArindam Nath * register will suffice.
2945f0856beSArindam Nath */
2955f0856beSArindam Nath if (ndev->ntb.topo == NTB_TOPO_PRI) {
2965f0856beSArindam Nath if ((ndev->peer_sta & AMD_LINK_UP_EVENT) ||
2975f0856beSArindam Nath (ndev->peer_sta == 0))
2985f0856beSArindam Nath return ret;
2995f0856beSArindam Nath else if (ndev->peer_sta & AMD_LINK_DOWN_EVENT) {
3005f0856beSArindam Nath /* Clear peer sideinfo register */
3015f0856beSArindam Nath amd_clear_side_info_reg(ndev, true);
3025f0856beSArindam Nath
3035f0856beSArindam Nath return 0;
3045f0856beSArindam Nath }
3055f0856beSArindam Nath } else { /* NTB_TOPO_SEC */
3065f0856beSArindam Nath return ret;
3075f0856beSArindam Nath }
308e5b0d2d1SXiangliang Yu }
309e5b0d2d1SXiangliang Yu
310a1b36958SXiangliang Yu return 0;
311a1b36958SXiangliang Yu }
312a1b36958SXiangliang Yu
amd_ntb_link_is_up(struct ntb_dev * ntb,enum ntb_speed * speed,enum ntb_width * width)3134e8c11b7SSerge Semin static u64 amd_ntb_link_is_up(struct ntb_dev *ntb,
314a1b36958SXiangliang Yu enum ntb_speed *speed,
315a1b36958SXiangliang Yu enum ntb_width *width)
316a1b36958SXiangliang Yu {
317a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
318a1b36958SXiangliang Yu int ret = 0;
319a1b36958SXiangliang Yu
320a1b36958SXiangliang Yu if (amd_link_is_up(ndev)) {
321a1b36958SXiangliang Yu if (speed)
322a1b36958SXiangliang Yu *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
323a1b36958SXiangliang Yu if (width)
324a1b36958SXiangliang Yu *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
325a1b36958SXiangliang Yu
3260f9bfb97SLogan Gunthorpe dev_dbg(&ntb->pdev->dev, "link is up.\n");
327a1b36958SXiangliang Yu
328a1b36958SXiangliang Yu ret = 1;
329a1b36958SXiangliang Yu } else {
330a1b36958SXiangliang Yu if (speed)
331a1b36958SXiangliang Yu *speed = NTB_SPEED_NONE;
332a1b36958SXiangliang Yu if (width)
333a1b36958SXiangliang Yu *width = NTB_WIDTH_NONE;
334a1b36958SXiangliang Yu
3350f9bfb97SLogan Gunthorpe dev_dbg(&ntb->pdev->dev, "link is down.\n");
336a1b36958SXiangliang Yu }
337a1b36958SXiangliang Yu
338a1b36958SXiangliang Yu return ret;
339a1b36958SXiangliang Yu }
340a1b36958SXiangliang Yu
amd_ntb_link_enable(struct ntb_dev * ntb,enum ntb_speed max_speed,enum ntb_width max_width)341a1b36958SXiangliang Yu static int amd_ntb_link_enable(struct ntb_dev *ntb,
342a1b36958SXiangliang Yu enum ntb_speed max_speed,
343a1b36958SXiangliang Yu enum ntb_width max_width)
344a1b36958SXiangliang Yu {
345a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
346a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
347a1b36958SXiangliang Yu
348a1b36958SXiangliang Yu /* Enable event interrupt */
349a1b36958SXiangliang Yu ndev->int_mask &= ~AMD_EVENT_INTMASK;
350a1b36958SXiangliang Yu writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
351a1b36958SXiangliang Yu
352a1b36958SXiangliang Yu if (ndev->ntb.topo == NTB_TOPO_SEC)
353a1b36958SXiangliang Yu return -EINVAL;
3540f9bfb97SLogan Gunthorpe dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
355a1b36958SXiangliang Yu
356a1b36958SXiangliang Yu return 0;
357a1b36958SXiangliang Yu }
358a1b36958SXiangliang Yu
amd_ntb_link_disable(struct ntb_dev * ntb)359a1b36958SXiangliang Yu static int amd_ntb_link_disable(struct ntb_dev *ntb)
360a1b36958SXiangliang Yu {
361a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
362a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
363a1b36958SXiangliang Yu
364a1b36958SXiangliang Yu /* Disable event interrupt */
365a1b36958SXiangliang Yu ndev->int_mask |= AMD_EVENT_INTMASK;
366a1b36958SXiangliang Yu writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
367a1b36958SXiangliang Yu
368a1b36958SXiangliang Yu if (ndev->ntb.topo == NTB_TOPO_SEC)
369a1b36958SXiangliang Yu return -EINVAL;
3700f9bfb97SLogan Gunthorpe dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
371a1b36958SXiangliang Yu
372a1b36958SXiangliang Yu return 0;
373a1b36958SXiangliang Yu }
374a1b36958SXiangliang Yu
amd_ntb_peer_mw_count(struct ntb_dev * ntb)375443b9a14SSerge Semin static int amd_ntb_peer_mw_count(struct ntb_dev *ntb)
376443b9a14SSerge Semin {
377443b9a14SSerge Semin /* The same as for inbound MWs */
378443b9a14SSerge Semin return ntb_ndev(ntb)->mw_count;
379443b9a14SSerge Semin }
380443b9a14SSerge Semin
amd_ntb_peer_mw_get_addr(struct ntb_dev * ntb,int idx,phys_addr_t * base,resource_size_t * size)381443b9a14SSerge Semin static int amd_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
382443b9a14SSerge Semin phys_addr_t *base, resource_size_t *size)
383443b9a14SSerge Semin {
384443b9a14SSerge Semin struct amd_ntb_dev *ndev = ntb_ndev(ntb);
385443b9a14SSerge Semin int bar;
386443b9a14SSerge Semin
387443b9a14SSerge Semin bar = ndev_mw_to_bar(ndev, idx);
388443b9a14SSerge Semin if (bar < 0)
389443b9a14SSerge Semin return bar;
390443b9a14SSerge Semin
391443b9a14SSerge Semin if (base)
392443b9a14SSerge Semin *base = pci_resource_start(ndev->ntb.pdev, bar);
393443b9a14SSerge Semin
394443b9a14SSerge Semin if (size)
395443b9a14SSerge Semin *size = pci_resource_len(ndev->ntb.pdev, bar);
396443b9a14SSerge Semin
397443b9a14SSerge Semin return 0;
398443b9a14SSerge Semin }
399443b9a14SSerge Semin
amd_ntb_db_valid_mask(struct ntb_dev * ntb)400a1b36958SXiangliang Yu static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb)
401a1b36958SXiangliang Yu {
402a1b36958SXiangliang Yu return ntb_ndev(ntb)->db_valid_mask;
403a1b36958SXiangliang Yu }
404a1b36958SXiangliang Yu
amd_ntb_db_vector_count(struct ntb_dev * ntb)405a1b36958SXiangliang Yu static int amd_ntb_db_vector_count(struct ntb_dev *ntb)
406a1b36958SXiangliang Yu {
407a1b36958SXiangliang Yu return ntb_ndev(ntb)->db_count;
408a1b36958SXiangliang Yu }
409a1b36958SXiangliang Yu
amd_ntb_db_vector_mask(struct ntb_dev * ntb,int db_vector)410a1b36958SXiangliang Yu static u64 amd_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
411a1b36958SXiangliang Yu {
412a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
413a1b36958SXiangliang Yu
414a1b36958SXiangliang Yu if (db_vector < 0 || db_vector > ndev->db_count)
415a1b36958SXiangliang Yu return 0;
416a1b36958SXiangliang Yu
4171e590decSDan Carpenter return ntb_ndev(ntb)->db_valid_mask & (1ULL << db_vector);
418a1b36958SXiangliang Yu }
419a1b36958SXiangliang Yu
amd_ntb_db_read(struct ntb_dev * ntb)420a1b36958SXiangliang Yu static u64 amd_ntb_db_read(struct ntb_dev *ntb)
421a1b36958SXiangliang Yu {
422a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
423a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
424a1b36958SXiangliang Yu
425a1b36958SXiangliang Yu return (u64)readw(mmio + AMD_DBSTAT_OFFSET);
426a1b36958SXiangliang Yu }
427a1b36958SXiangliang Yu
amd_ntb_db_clear(struct ntb_dev * ntb,u64 db_bits)428a1b36958SXiangliang Yu static int amd_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
429a1b36958SXiangliang Yu {
430a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
431a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
432a1b36958SXiangliang Yu
433a1b36958SXiangliang Yu writew((u16)db_bits, mmio + AMD_DBSTAT_OFFSET);
434a1b36958SXiangliang Yu
435a1b36958SXiangliang Yu return 0;
436a1b36958SXiangliang Yu }
437a1b36958SXiangliang Yu
amd_ntb_db_set_mask(struct ntb_dev * ntb,u64 db_bits)438a1b36958SXiangliang Yu static int amd_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
439a1b36958SXiangliang Yu {
440a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
441a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
442a1b36958SXiangliang Yu unsigned long flags;
443a1b36958SXiangliang Yu
444a1b36958SXiangliang Yu if (db_bits & ~ndev->db_valid_mask)
445a1b36958SXiangliang Yu return -EINVAL;
446a1b36958SXiangliang Yu
447a1b36958SXiangliang Yu spin_lock_irqsave(&ndev->db_mask_lock, flags);
448a1b36958SXiangliang Yu ndev->db_mask |= db_bits;
449a1b36958SXiangliang Yu writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
450a1b36958SXiangliang Yu spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
451a1b36958SXiangliang Yu
452a1b36958SXiangliang Yu return 0;
453a1b36958SXiangliang Yu }
454a1b36958SXiangliang Yu
amd_ntb_db_clear_mask(struct ntb_dev * ntb,u64 db_bits)455a1b36958SXiangliang Yu static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
456a1b36958SXiangliang Yu {
457a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
458a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
459a1b36958SXiangliang Yu unsigned long flags;
460a1b36958SXiangliang Yu
461a1b36958SXiangliang Yu if (db_bits & ~ndev->db_valid_mask)
462a1b36958SXiangliang Yu return -EINVAL;
463a1b36958SXiangliang Yu
464a1b36958SXiangliang Yu spin_lock_irqsave(&ndev->db_mask_lock, flags);
465a1b36958SXiangliang Yu ndev->db_mask &= ~db_bits;
466a1b36958SXiangliang Yu writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
467a1b36958SXiangliang Yu spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
468a1b36958SXiangliang Yu
469a1b36958SXiangliang Yu return 0;
470a1b36958SXiangliang Yu }
471a1b36958SXiangliang Yu
amd_ntb_peer_db_set(struct ntb_dev * ntb,u64 db_bits)472a1b36958SXiangliang Yu static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
473a1b36958SXiangliang Yu {
474a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
475a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
476a1b36958SXiangliang Yu
477a1b36958SXiangliang Yu writew((u16)db_bits, mmio + AMD_DBREQ_OFFSET);
478a1b36958SXiangliang Yu
479a1b36958SXiangliang Yu return 0;
480a1b36958SXiangliang Yu }
481a1b36958SXiangliang Yu
amd_ntb_spad_count(struct ntb_dev * ntb)482a1b36958SXiangliang Yu static int amd_ntb_spad_count(struct ntb_dev *ntb)
483a1b36958SXiangliang Yu {
484a1b36958SXiangliang Yu return ntb_ndev(ntb)->spad_count;
485a1b36958SXiangliang Yu }
486a1b36958SXiangliang Yu
amd_ntb_spad_read(struct ntb_dev * ntb,int idx)487a1b36958SXiangliang Yu static u32 amd_ntb_spad_read(struct ntb_dev *ntb, int idx)
488a1b36958SXiangliang Yu {
489a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
490a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
491a1b36958SXiangliang Yu u32 offset;
492a1b36958SXiangliang Yu
493a1b36958SXiangliang Yu if (idx < 0 || idx >= ndev->spad_count)
494a1b36958SXiangliang Yu return 0;
495a1b36958SXiangliang Yu
496a1b36958SXiangliang Yu offset = ndev->self_spad + (idx << 2);
497a1b36958SXiangliang Yu return readl(mmio + AMD_SPAD_OFFSET + offset);
498a1b36958SXiangliang Yu }
499a1b36958SXiangliang Yu
amd_ntb_spad_write(struct ntb_dev * ntb,int idx,u32 val)500a1b36958SXiangliang Yu static int amd_ntb_spad_write(struct ntb_dev *ntb,
501a1b36958SXiangliang Yu int idx, u32 val)
502a1b36958SXiangliang Yu {
503a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
504a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
505a1b36958SXiangliang Yu u32 offset;
506a1b36958SXiangliang Yu
507a1b36958SXiangliang Yu if (idx < 0 || idx >= ndev->spad_count)
508a1b36958SXiangliang Yu return -EINVAL;
509a1b36958SXiangliang Yu
510a1b36958SXiangliang Yu offset = ndev->self_spad + (idx << 2);
511a1b36958SXiangliang Yu writel(val, mmio + AMD_SPAD_OFFSET + offset);
512a1b36958SXiangliang Yu
513a1b36958SXiangliang Yu return 0;
514a1b36958SXiangliang Yu }
515a1b36958SXiangliang Yu
amd_ntb_peer_spad_read(struct ntb_dev * ntb,int pidx,int sidx)516d67288a3SSerge Semin static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
517a1b36958SXiangliang Yu {
518a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
519a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
520a1b36958SXiangliang Yu u32 offset;
521a1b36958SXiangliang Yu
522d67288a3SSerge Semin if (sidx < 0 || sidx >= ndev->spad_count)
523a1b36958SXiangliang Yu return -EINVAL;
524a1b36958SXiangliang Yu
525d67288a3SSerge Semin offset = ndev->peer_spad + (sidx << 2);
526a1b36958SXiangliang Yu return readl(mmio + AMD_SPAD_OFFSET + offset);
527a1b36958SXiangliang Yu }
528a1b36958SXiangliang Yu
amd_ntb_peer_spad_write(struct ntb_dev * ntb,int pidx,int sidx,u32 val)529d67288a3SSerge Semin static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
530d67288a3SSerge Semin int sidx, u32 val)
531a1b36958SXiangliang Yu {
532a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = ntb_ndev(ntb);
533a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
534a1b36958SXiangliang Yu u32 offset;
535a1b36958SXiangliang Yu
536d67288a3SSerge Semin if (sidx < 0 || sidx >= ndev->spad_count)
537a1b36958SXiangliang Yu return -EINVAL;
538a1b36958SXiangliang Yu
539d67288a3SSerge Semin offset = ndev->peer_spad + (sidx << 2);
540a1b36958SXiangliang Yu writel(val, mmio + AMD_SPAD_OFFSET + offset);
541a1b36958SXiangliang Yu
542a1b36958SXiangliang Yu return 0;
543a1b36958SXiangliang Yu }
544a1b36958SXiangliang Yu
545a1b36958SXiangliang Yu static const struct ntb_dev_ops amd_ntb_ops = {
546a1b36958SXiangliang Yu .mw_count = amd_ntb_mw_count,
547443b9a14SSerge Semin .mw_get_align = amd_ntb_mw_get_align,
548a1b36958SXiangliang Yu .mw_set_trans = amd_ntb_mw_set_trans,
549443b9a14SSerge Semin .peer_mw_count = amd_ntb_peer_mw_count,
550443b9a14SSerge Semin .peer_mw_get_addr = amd_ntb_peer_mw_get_addr,
551a1b36958SXiangliang Yu .link_is_up = amd_ntb_link_is_up,
552a1b36958SXiangliang Yu .link_enable = amd_ntb_link_enable,
553a1b36958SXiangliang Yu .link_disable = amd_ntb_link_disable,
554a1b36958SXiangliang Yu .db_valid_mask = amd_ntb_db_valid_mask,
555a1b36958SXiangliang Yu .db_vector_count = amd_ntb_db_vector_count,
556a1b36958SXiangliang Yu .db_vector_mask = amd_ntb_db_vector_mask,
557a1b36958SXiangliang Yu .db_read = amd_ntb_db_read,
558a1b36958SXiangliang Yu .db_clear = amd_ntb_db_clear,
559a1b36958SXiangliang Yu .db_set_mask = amd_ntb_db_set_mask,
560a1b36958SXiangliang Yu .db_clear_mask = amd_ntb_db_clear_mask,
561a1b36958SXiangliang Yu .peer_db_set = amd_ntb_peer_db_set,
562a1b36958SXiangliang Yu .spad_count = amd_ntb_spad_count,
563a1b36958SXiangliang Yu .spad_read = amd_ntb_spad_read,
564a1b36958SXiangliang Yu .spad_write = amd_ntb_spad_write,
565a1b36958SXiangliang Yu .peer_spad_read = amd_ntb_peer_spad_read,
566a1b36958SXiangliang Yu .peer_spad_write = amd_ntb_peer_spad_write,
567a1b36958SXiangliang Yu };
568a1b36958SXiangliang Yu
amd_ack_smu(struct amd_ntb_dev * ndev,u32 bit)569a1b36958SXiangliang Yu static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit)
570a1b36958SXiangliang Yu {
571a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
572a1b36958SXiangliang Yu int reg;
573a1b36958SXiangliang Yu
574a1b36958SXiangliang Yu reg = readl(mmio + AMD_SMUACK_OFFSET);
575a1b36958SXiangliang Yu reg |= bit;
576a1b36958SXiangliang Yu writel(reg, mmio + AMD_SMUACK_OFFSET);
577a1b36958SXiangliang Yu }
578a1b36958SXiangliang Yu
amd_handle_event(struct amd_ntb_dev * ndev,int vec)579a1b36958SXiangliang Yu static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
580a1b36958SXiangliang Yu {
581a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
5820f9bfb97SLogan Gunthorpe struct device *dev = &ndev->ntb.pdev->dev;
583a1b36958SXiangliang Yu u32 status;
584a1b36958SXiangliang Yu
585a1b36958SXiangliang Yu status = readl(mmio + AMD_INTSTAT_OFFSET);
586a1b36958SXiangliang Yu if (!(status & AMD_EVENT_INTMASK))
587a1b36958SXiangliang Yu return;
588a1b36958SXiangliang Yu
5890f9bfb97SLogan Gunthorpe dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec);
590a1b36958SXiangliang Yu
591a1b36958SXiangliang Yu status &= AMD_EVENT_INTMASK;
592a1b36958SXiangliang Yu switch (status) {
593a1b36958SXiangliang Yu case AMD_PEER_FLUSH_EVENT:
5942465b87cSArindam Nath ndev->peer_sta |= AMD_PEER_FLUSH_EVENT;
5950f9bfb97SLogan Gunthorpe dev_info(dev, "Flush is done.\n");
596a1b36958SXiangliang Yu break;
597a1b36958SXiangliang Yu case AMD_PEER_RESET_EVENT:
59860ceafd1SArindam Nath case AMD_LINK_DOWN_EVENT:
59960ceafd1SArindam Nath ndev->peer_sta |= status;
60060ceafd1SArindam Nath if (status == AMD_LINK_DOWN_EVENT)
60160ceafd1SArindam Nath ndev->peer_sta &= ~AMD_LINK_UP_EVENT;
60260ceafd1SArindam Nath
60360ceafd1SArindam Nath amd_ack_smu(ndev, status);
604a1b36958SXiangliang Yu
605a1b36958SXiangliang Yu /* link down first */
606a1b36958SXiangliang Yu ntb_link_event(&ndev->ntb);
607a1b36958SXiangliang Yu /* polling peer status */
608a1b36958SXiangliang Yu schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
609a1b36958SXiangliang Yu
610a1b36958SXiangliang Yu break;
611a1b36958SXiangliang Yu case AMD_PEER_D3_EVENT:
612a1b36958SXiangliang Yu case AMD_PEER_PMETO_EVENT:
613e5b0d2d1SXiangliang Yu case AMD_LINK_UP_EVENT:
6142465b87cSArindam Nath ndev->peer_sta |= status;
615673dd0c2SArindam Nath if (status == AMD_LINK_UP_EVENT)
616673dd0c2SArindam Nath ndev->peer_sta &= ~AMD_LINK_DOWN_EVENT;
617673dd0c2SArindam Nath else if (status == AMD_PEER_D3_EVENT)
618673dd0c2SArindam Nath ndev->peer_sta &= ~AMD_PEER_D0_EVENT;
619673dd0c2SArindam Nath
620a1b36958SXiangliang Yu amd_ack_smu(ndev, status);
621a1b36958SXiangliang Yu
622a1b36958SXiangliang Yu /* link down */
623a1b36958SXiangliang Yu ntb_link_event(&ndev->ntb);
624a1b36958SXiangliang Yu
625a1b36958SXiangliang Yu break;
626a1b36958SXiangliang Yu case AMD_PEER_D0_EVENT:
627a1b36958SXiangliang Yu mmio = ndev->peer_mmio;
628a1b36958SXiangliang Yu status = readl(mmio + AMD_PMESTAT_OFFSET);
629a1b36958SXiangliang Yu /* check if this is WAKEUP event */
630a1b36958SXiangliang Yu if (status & 0x1)
6310f9bfb97SLogan Gunthorpe dev_info(dev, "Wakeup is done.\n");
632a1b36958SXiangliang Yu
6332465b87cSArindam Nath ndev->peer_sta |= AMD_PEER_D0_EVENT;
634673dd0c2SArindam Nath ndev->peer_sta &= ~AMD_PEER_D3_EVENT;
635a1b36958SXiangliang Yu amd_ack_smu(ndev, AMD_PEER_D0_EVENT);
636a1b36958SXiangliang Yu
637a1b36958SXiangliang Yu /* start a timer to poll link status */
638a1b36958SXiangliang Yu schedule_delayed_work(&ndev->hb_timer,
639a1b36958SXiangliang Yu AMD_LINK_HB_TIMEOUT);
640a1b36958SXiangliang Yu break;
641a1b36958SXiangliang Yu default:
6420f9bfb97SLogan Gunthorpe dev_info(dev, "event status = 0x%x.\n", status);
643a1b36958SXiangliang Yu break;
644a1b36958SXiangliang Yu }
64552ba4478SArindam Nath
64652ba4478SArindam Nath /* Clear the interrupt status */
64752ba4478SArindam Nath writel(status, mmio + AMD_INTSTAT_OFFSET);
648a1b36958SXiangliang Yu }
649a1b36958SXiangliang Yu
amd_handle_db_event(struct amd_ntb_dev * ndev,int vec)650ac10d4f6SArindam Nath static void amd_handle_db_event(struct amd_ntb_dev *ndev, int vec)
651ac10d4f6SArindam Nath {
652ac10d4f6SArindam Nath struct device *dev = &ndev->ntb.pdev->dev;
653ac10d4f6SArindam Nath u64 status;
654ac10d4f6SArindam Nath
655ac10d4f6SArindam Nath status = amd_ntb_db_read(&ndev->ntb);
656ac10d4f6SArindam Nath
657ac10d4f6SArindam Nath dev_dbg(dev, "status = 0x%llx and vec = %d\n", status, vec);
658ac10d4f6SArindam Nath
659ac10d4f6SArindam Nath /*
660ac10d4f6SArindam Nath * Since we had reserved highest order bit of DB for signaling peer of
661ac10d4f6SArindam Nath * a special event, this is the only status bit we should be concerned
662ac10d4f6SArindam Nath * here now.
663ac10d4f6SArindam Nath */
664ac10d4f6SArindam Nath if (status & BIT(ndev->db_last_bit)) {
665ac10d4f6SArindam Nath ntb_db_clear(&ndev->ntb, BIT(ndev->db_last_bit));
666ac10d4f6SArindam Nath /* send link down event notification */
667ac10d4f6SArindam Nath ntb_link_event(&ndev->ntb);
668ac10d4f6SArindam Nath
669ac10d4f6SArindam Nath /*
670ac10d4f6SArindam Nath * If we are here, that means the peer has signalled a special
671ac10d4f6SArindam Nath * event which notifies that the peer driver has been
672ac10d4f6SArindam Nath * un-loaded for some reason. Since there is a chance that the
673ac10d4f6SArindam Nath * peer will load its driver again sometime, we schedule link
674ac10d4f6SArindam Nath * polling routine.
675ac10d4f6SArindam Nath */
676ac10d4f6SArindam Nath schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
677ac10d4f6SArindam Nath }
678ac10d4f6SArindam Nath }
679ac10d4f6SArindam Nath
ndev_interrupt(struct amd_ntb_dev * ndev,int vec)680a1b36958SXiangliang Yu static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec)
681a1b36958SXiangliang Yu {
6820f9bfb97SLogan Gunthorpe dev_dbg(&ndev->ntb.pdev->dev, "vec %d\n", vec);
683a1b36958SXiangliang Yu
684a1b36958SXiangliang Yu if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1))
685a1b36958SXiangliang Yu amd_handle_event(ndev, vec);
686a1b36958SXiangliang Yu
687ac10d4f6SArindam Nath if (vec < AMD_DB_CNT) {
688ac10d4f6SArindam Nath amd_handle_db_event(ndev, vec);
689a1b36958SXiangliang Yu ntb_db_event(&ndev->ntb, vec);
690ac10d4f6SArindam Nath }
691a1b36958SXiangliang Yu
692a1b36958SXiangliang Yu return IRQ_HANDLED;
693a1b36958SXiangliang Yu }
694a1b36958SXiangliang Yu
ndev_vec_isr(int irq,void * dev)695a1b36958SXiangliang Yu static irqreturn_t ndev_vec_isr(int irq, void *dev)
696a1b36958SXiangliang Yu {
697a1b36958SXiangliang Yu struct amd_ntb_vec *nvec = dev;
698a1b36958SXiangliang Yu
699a1b36958SXiangliang Yu return ndev_interrupt(nvec->ndev, nvec->num);
700a1b36958SXiangliang Yu }
701a1b36958SXiangliang Yu
ndev_irq_isr(int irq,void * dev)702a1b36958SXiangliang Yu static irqreturn_t ndev_irq_isr(int irq, void *dev)
703a1b36958SXiangliang Yu {
704a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = dev;
705a1b36958SXiangliang Yu
7060f9bfb97SLogan Gunthorpe return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
707a1b36958SXiangliang Yu }
708a1b36958SXiangliang Yu
ndev_init_isr(struct amd_ntb_dev * ndev,int msix_min,int msix_max)709a1b36958SXiangliang Yu static int ndev_init_isr(struct amd_ntb_dev *ndev,
710a1b36958SXiangliang Yu int msix_min, int msix_max)
711a1b36958SXiangliang Yu {
712a1b36958SXiangliang Yu struct pci_dev *pdev;
713a1b36958SXiangliang Yu int rc, i, msix_count, node;
714a1b36958SXiangliang Yu
7150f9bfb97SLogan Gunthorpe pdev = ndev->ntb.pdev;
716a1b36958SXiangliang Yu
717a1b36958SXiangliang Yu node = dev_to_node(&pdev->dev);
718a1b36958SXiangliang Yu
719a1b36958SXiangliang Yu ndev->db_mask = ndev->db_valid_mask;
720a1b36958SXiangliang Yu
721a1b36958SXiangliang Yu /* Try to set up msix irq */
722590b5b7dSKees Cook ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
723a1b36958SXiangliang Yu GFP_KERNEL, node);
724a1b36958SXiangliang Yu if (!ndev->vec)
725a1b36958SXiangliang Yu goto err_msix_vec_alloc;
726a1b36958SXiangliang Yu
727590b5b7dSKees Cook ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
728a1b36958SXiangliang Yu GFP_KERNEL, node);
729a1b36958SXiangliang Yu if (!ndev->msix)
730a1b36958SXiangliang Yu goto err_msix_alloc;
731a1b36958SXiangliang Yu
732a1b36958SXiangliang Yu for (i = 0; i < msix_max; ++i)
733a1b36958SXiangliang Yu ndev->msix[i].entry = i;
734a1b36958SXiangliang Yu
735a1b36958SXiangliang Yu msix_count = pci_enable_msix_range(pdev, ndev->msix,
736a1b36958SXiangliang Yu msix_min, msix_max);
737a1b36958SXiangliang Yu if (msix_count < 0)
738a1b36958SXiangliang Yu goto err_msix_enable;
739a1b36958SXiangliang Yu
740a1b36958SXiangliang Yu /* NOTE: Disable MSIX if msix count is less than 16 because of
741a1b36958SXiangliang Yu * hardware limitation.
742a1b36958SXiangliang Yu */
743a1b36958SXiangliang Yu if (msix_count < msix_min) {
744a1b36958SXiangliang Yu pci_disable_msix(pdev);
745a1b36958SXiangliang Yu goto err_msix_enable;
746a1b36958SXiangliang Yu }
747a1b36958SXiangliang Yu
748a1b36958SXiangliang Yu for (i = 0; i < msix_count; ++i) {
749a1b36958SXiangliang Yu ndev->vec[i].ndev = ndev;
750a1b36958SXiangliang Yu ndev->vec[i].num = i;
751a1b36958SXiangliang Yu rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
752a1b36958SXiangliang Yu "ndev_vec_isr", &ndev->vec[i]);
753a1b36958SXiangliang Yu if (rc)
754a1b36958SXiangliang Yu goto err_msix_request;
755a1b36958SXiangliang Yu }
756a1b36958SXiangliang Yu
7570f9bfb97SLogan Gunthorpe dev_dbg(&pdev->dev, "Using msix interrupts\n");
758a1b36958SXiangliang Yu ndev->db_count = msix_min;
759a1b36958SXiangliang Yu ndev->msix_vec_count = msix_max;
760a1b36958SXiangliang Yu return 0;
761a1b36958SXiangliang Yu
762a1b36958SXiangliang Yu err_msix_request:
763a1b36958SXiangliang Yu while (i-- > 0)
76428734e8fSChristophe JAILLET free_irq(ndev->msix[i].vector, &ndev->vec[i]);
765a1b36958SXiangliang Yu pci_disable_msix(pdev);
766a1b36958SXiangliang Yu err_msix_enable:
767a1b36958SXiangliang Yu kfree(ndev->msix);
768a1b36958SXiangliang Yu err_msix_alloc:
769a1b36958SXiangliang Yu kfree(ndev->vec);
770a1b36958SXiangliang Yu err_msix_vec_alloc:
771a1b36958SXiangliang Yu ndev->msix = NULL;
772a1b36958SXiangliang Yu ndev->vec = NULL;
773a1b36958SXiangliang Yu
774a1b36958SXiangliang Yu /* Try to set up msi irq */
775a1b36958SXiangliang Yu rc = pci_enable_msi(pdev);
776a1b36958SXiangliang Yu if (rc)
777a1b36958SXiangliang Yu goto err_msi_enable;
778a1b36958SXiangliang Yu
779a1b36958SXiangliang Yu rc = request_irq(pdev->irq, ndev_irq_isr, 0,
780a1b36958SXiangliang Yu "ndev_irq_isr", ndev);
781a1b36958SXiangliang Yu if (rc)
782a1b36958SXiangliang Yu goto err_msi_request;
783a1b36958SXiangliang Yu
7840f9bfb97SLogan Gunthorpe dev_dbg(&pdev->dev, "Using msi interrupts\n");
785a1b36958SXiangliang Yu ndev->db_count = 1;
786a1b36958SXiangliang Yu ndev->msix_vec_count = 1;
787a1b36958SXiangliang Yu return 0;
788a1b36958SXiangliang Yu
789a1b36958SXiangliang Yu err_msi_request:
790a1b36958SXiangliang Yu pci_disable_msi(pdev);
791a1b36958SXiangliang Yu err_msi_enable:
792a1b36958SXiangliang Yu
793a1b36958SXiangliang Yu /* Try to set up intx irq */
794a1b36958SXiangliang Yu pci_intx(pdev, 1);
795a1b36958SXiangliang Yu
796a1b36958SXiangliang Yu rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
797a1b36958SXiangliang Yu "ndev_irq_isr", ndev);
798a1b36958SXiangliang Yu if (rc)
799a1b36958SXiangliang Yu goto err_intx_request;
800a1b36958SXiangliang Yu
8010f9bfb97SLogan Gunthorpe dev_dbg(&pdev->dev, "Using intx interrupts\n");
802a1b36958SXiangliang Yu ndev->db_count = 1;
803a1b36958SXiangliang Yu ndev->msix_vec_count = 1;
804a1b36958SXiangliang Yu return 0;
805a1b36958SXiangliang Yu
806a1b36958SXiangliang Yu err_intx_request:
807a1b36958SXiangliang Yu return rc;
808a1b36958SXiangliang Yu }
809a1b36958SXiangliang Yu
ndev_deinit_isr(struct amd_ntb_dev * ndev)810a1b36958SXiangliang Yu static void ndev_deinit_isr(struct amd_ntb_dev *ndev)
811a1b36958SXiangliang Yu {
812a1b36958SXiangliang Yu struct pci_dev *pdev;
813a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
814a1b36958SXiangliang Yu int i;
815a1b36958SXiangliang Yu
8160f9bfb97SLogan Gunthorpe pdev = ndev->ntb.pdev;
817a1b36958SXiangliang Yu
818a1b36958SXiangliang Yu /* Mask all doorbell interrupts */
819a1b36958SXiangliang Yu ndev->db_mask = ndev->db_valid_mask;
820a1b36958SXiangliang Yu writel(ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
821a1b36958SXiangliang Yu
822a1b36958SXiangliang Yu if (ndev->msix) {
823a1b36958SXiangliang Yu i = ndev->msix_vec_count;
824a1b36958SXiangliang Yu while (i--)
825a1b36958SXiangliang Yu free_irq(ndev->msix[i].vector, &ndev->vec[i]);
826a1b36958SXiangliang Yu pci_disable_msix(pdev);
827a1b36958SXiangliang Yu kfree(ndev->msix);
828a1b36958SXiangliang Yu kfree(ndev->vec);
829a1b36958SXiangliang Yu } else {
830a1b36958SXiangliang Yu free_irq(pdev->irq, ndev);
831a1b36958SXiangliang Yu if (pci_dev_msi_enabled(pdev))
832a1b36958SXiangliang Yu pci_disable_msi(pdev);
833a1b36958SXiangliang Yu else
834a1b36958SXiangliang Yu pci_intx(pdev, 0);
835a1b36958SXiangliang Yu }
836a1b36958SXiangliang Yu }
837a1b36958SXiangliang Yu
ndev_debugfs_read(struct file * filp,char __user * ubuf,size_t count,loff_t * offp)838a1b36958SXiangliang Yu static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
839a1b36958SXiangliang Yu size_t count, loff_t *offp)
840a1b36958SXiangliang Yu {
841a1b36958SXiangliang Yu struct amd_ntb_dev *ndev;
842a1b36958SXiangliang Yu void __iomem *mmio;
843a1b36958SXiangliang Yu char *buf;
844a1b36958SXiangliang Yu size_t buf_size;
845a1b36958SXiangliang Yu ssize_t ret, off;
846a1b36958SXiangliang Yu union { u64 v64; u32 v32; u16 v16; } u;
847a1b36958SXiangliang Yu
848a1b36958SXiangliang Yu ndev = filp->private_data;
849a1b36958SXiangliang Yu mmio = ndev->self_mmio;
850a1b36958SXiangliang Yu
851a1b36958SXiangliang Yu buf_size = min(count, 0x800ul);
852a1b36958SXiangliang Yu
853a1b36958SXiangliang Yu buf = kmalloc(buf_size, GFP_KERNEL);
854a1b36958SXiangliang Yu if (!buf)
855a1b36958SXiangliang Yu return -ENOMEM;
856a1b36958SXiangliang Yu
857a1b36958SXiangliang Yu off = 0;
858a1b36958SXiangliang Yu
859a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
860a1b36958SXiangliang Yu "NTB Device Information:\n");
861a1b36958SXiangliang Yu
862a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
863a1b36958SXiangliang Yu "Connection Topology -\t%s\n",
864a1b36958SXiangliang Yu ntb_topo_string(ndev->ntb.topo));
865a1b36958SXiangliang Yu
866a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
867a1b36958SXiangliang Yu "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
868a1b36958SXiangliang Yu
869a1b36958SXiangliang Yu if (!amd_link_is_up(ndev)) {
870a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
871a1b36958SXiangliang Yu "Link Status -\t\tDown\n");
872a1b36958SXiangliang Yu } else {
873a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
874a1b36958SXiangliang Yu "Link Status -\t\tUp\n");
875a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
876a1b36958SXiangliang Yu "Link Speed -\t\tPCI-E Gen %u\n",
877a1b36958SXiangliang Yu NTB_LNK_STA_SPEED(ndev->lnk_sta));
878a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
879a1b36958SXiangliang Yu "Link Width -\t\tx%u\n",
880a1b36958SXiangliang Yu NTB_LNK_STA_WIDTH(ndev->lnk_sta));
881a1b36958SXiangliang Yu }
882a1b36958SXiangliang Yu
883a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
884a1b36958SXiangliang Yu "Memory Window Count -\t%u\n", ndev->mw_count);
885a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
886a1b36958SXiangliang Yu "Scratchpad Count -\t%u\n", ndev->spad_count);
887a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
888a1b36958SXiangliang Yu "Doorbell Count -\t%u\n", ndev->db_count);
889a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
890a1b36958SXiangliang Yu "MSIX Vector Count -\t%u\n", ndev->msix_vec_count);
891a1b36958SXiangliang Yu
892a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
893a1b36958SXiangliang Yu "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
894a1b36958SXiangliang Yu
895a1b36958SXiangliang Yu u.v32 = readl(ndev->self_mmio + AMD_DBMASK_OFFSET);
896a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
897a1b36958SXiangliang Yu "Doorbell Mask -\t\t\t%#06x\n", u.v32);
898a1b36958SXiangliang Yu
899a1b36958SXiangliang Yu u.v32 = readl(mmio + AMD_DBSTAT_OFFSET);
900a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
901a1b36958SXiangliang Yu "Doorbell Bell -\t\t\t%#06x\n", u.v32);
902a1b36958SXiangliang Yu
903a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
904a1b36958SXiangliang Yu "\nNTB Incoming XLAT:\n");
905a1b36958SXiangliang Yu
906a1b36958SXiangliang Yu u.v64 = read64(mmio + AMD_BAR1XLAT_OFFSET);
907a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
908a1b36958SXiangliang Yu "XLAT1 -\t\t%#018llx\n", u.v64);
909a1b36958SXiangliang Yu
910a1b36958SXiangliang Yu u.v64 = read64(ndev->self_mmio + AMD_BAR23XLAT_OFFSET);
911a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
912a1b36958SXiangliang Yu "XLAT23 -\t\t%#018llx\n", u.v64);
913a1b36958SXiangliang Yu
914a1b36958SXiangliang Yu u.v64 = read64(ndev->self_mmio + AMD_BAR45XLAT_OFFSET);
915a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
916a1b36958SXiangliang Yu "XLAT45 -\t\t%#018llx\n", u.v64);
917a1b36958SXiangliang Yu
918a1b36958SXiangliang Yu u.v32 = readl(mmio + AMD_BAR1LMT_OFFSET);
919a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
920a1b36958SXiangliang Yu "LMT1 -\t\t\t%#06x\n", u.v32);
921a1b36958SXiangliang Yu
922a1b36958SXiangliang Yu u.v64 = read64(ndev->self_mmio + AMD_BAR23LMT_OFFSET);
923a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
924a1b36958SXiangliang Yu "LMT23 -\t\t\t%#018llx\n", u.v64);
925a1b36958SXiangliang Yu
926a1b36958SXiangliang Yu u.v64 = read64(ndev->self_mmio + AMD_BAR45LMT_OFFSET);
927a1b36958SXiangliang Yu off += scnprintf(buf + off, buf_size - off,
928a1b36958SXiangliang Yu "LMT45 -\t\t\t%#018llx\n", u.v64);
929a1b36958SXiangliang Yu
930a1b36958SXiangliang Yu ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
931a1b36958SXiangliang Yu kfree(buf);
932a1b36958SXiangliang Yu return ret;
933a1b36958SXiangliang Yu }
934a1b36958SXiangliang Yu
ndev_init_debugfs(struct amd_ntb_dev * ndev)935a1b36958SXiangliang Yu static void ndev_init_debugfs(struct amd_ntb_dev *ndev)
936a1b36958SXiangliang Yu {
937a1b36958SXiangliang Yu if (!debugfs_dir) {
938a1b36958SXiangliang Yu ndev->debugfs_dir = NULL;
939a1b36958SXiangliang Yu ndev->debugfs_info = NULL;
940a1b36958SXiangliang Yu } else {
941a1b36958SXiangliang Yu ndev->debugfs_dir =
9420f9bfb97SLogan Gunthorpe debugfs_create_dir(pci_name(ndev->ntb.pdev),
9430f9bfb97SLogan Gunthorpe debugfs_dir);
944a1b36958SXiangliang Yu ndev->debugfs_info =
945a1b36958SXiangliang Yu debugfs_create_file("info", S_IRUSR,
946a1b36958SXiangliang Yu ndev->debugfs_dir, ndev,
947a1b36958SXiangliang Yu &amd_ntb_debugfs_info);
948a1b36958SXiangliang Yu }
949a1b36958SXiangliang Yu }
950a1b36958SXiangliang Yu
ndev_deinit_debugfs(struct amd_ntb_dev * ndev)951a1b36958SXiangliang Yu static void ndev_deinit_debugfs(struct amd_ntb_dev *ndev)
952a1b36958SXiangliang Yu {
953a1b36958SXiangliang Yu debugfs_remove_recursive(ndev->debugfs_dir);
954a1b36958SXiangliang Yu }
955a1b36958SXiangliang Yu
ndev_init_struct(struct amd_ntb_dev * ndev,struct pci_dev * pdev)956a1b36958SXiangliang Yu static inline void ndev_init_struct(struct amd_ntb_dev *ndev,
957a1b36958SXiangliang Yu struct pci_dev *pdev)
958a1b36958SXiangliang Yu {
959a1b36958SXiangliang Yu ndev->ntb.pdev = pdev;
960a1b36958SXiangliang Yu ndev->ntb.topo = NTB_TOPO_NONE;
961a1b36958SXiangliang Yu ndev->ntb.ops = &amd_ntb_ops;
962a1b36958SXiangliang Yu ndev->int_mask = AMD_EVENT_INTMASK;
963a1b36958SXiangliang Yu spin_lock_init(&ndev->db_mask_lock);
964a1b36958SXiangliang Yu }
965a1b36958SXiangliang Yu
amd_poll_link(struct amd_ntb_dev * ndev)966a1b36958SXiangliang Yu static int amd_poll_link(struct amd_ntb_dev *ndev)
967a1b36958SXiangliang Yu {
968a1b36958SXiangliang Yu void __iomem *mmio = ndev->peer_mmio;
9695c6404d5SArindam Nath u32 reg;
970a1b36958SXiangliang Yu
971a1b36958SXiangliang Yu reg = readl(mmio + AMD_SIDEINFO_OFFSET);
9725cafa485SArindam Nath reg &= AMD_SIDE_READY;
973a1b36958SXiangliang Yu
9740f9bfb97SLogan Gunthorpe dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg);
975a1b36958SXiangliang Yu
976a1b36958SXiangliang Yu ndev->cntl_sta = reg;
977a1b36958SXiangliang Yu
9785cafa485SArindam Nath amd_ntb_get_link_status(ndev);
9795cafa485SArindam Nath
9805cafa485SArindam Nath return ndev->cntl_sta;
981a1b36958SXiangliang Yu }
982a1b36958SXiangliang Yu
amd_link_hb(struct work_struct * work)983a1b36958SXiangliang Yu static void amd_link_hb(struct work_struct *work)
984a1b36958SXiangliang Yu {
985a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = hb_ndev(work);
986a1b36958SXiangliang Yu
987a1b36958SXiangliang Yu if (amd_poll_link(ndev))
988a1b36958SXiangliang Yu ntb_link_event(&ndev->ntb);
989a1b36958SXiangliang Yu
990a1b36958SXiangliang Yu if (!amd_link_is_up(ndev))
991a1b36958SXiangliang Yu schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
992a1b36958SXiangliang Yu }
993a1b36958SXiangliang Yu
amd_init_isr(struct amd_ntb_dev * ndev)994a1b36958SXiangliang Yu static int amd_init_isr(struct amd_ntb_dev *ndev)
995a1b36958SXiangliang Yu {
996a1b36958SXiangliang Yu return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT);
997a1b36958SXiangliang Yu }
998a1b36958SXiangliang Yu
amd_set_side_info_reg(struct amd_ntb_dev * ndev,bool peer)999ae5f4bdcSArindam Nath static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer)
1000a1b36958SXiangliang Yu {
1001ae5f4bdcSArindam Nath void __iomem *mmio = NULL;
1002a1b36958SXiangliang Yu unsigned int reg;
1003ae5f4bdcSArindam Nath
1004ae5f4bdcSArindam Nath if (peer)
1005ae5f4bdcSArindam Nath mmio = ndev->peer_mmio;
1006ae5f4bdcSArindam Nath else
1007ae5f4bdcSArindam Nath mmio = ndev->self_mmio;
1008a1b36958SXiangliang Yu
1009a1b36958SXiangliang Yu reg = readl(mmio + AMD_SIDEINFO_OFFSET);
1010a1b36958SXiangliang Yu if (!(reg & AMD_SIDE_READY)) {
1011a1b36958SXiangliang Yu reg |= AMD_SIDE_READY;
1012a1b36958SXiangliang Yu writel(reg, mmio + AMD_SIDEINFO_OFFSET);
1013a1b36958SXiangliang Yu }
1014ae5f4bdcSArindam Nath }
1015ae5f4bdcSArindam Nath
amd_clear_side_info_reg(struct amd_ntb_dev * ndev,bool peer)1016ae5f4bdcSArindam Nath static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer)
1017ae5f4bdcSArindam Nath {
1018ae5f4bdcSArindam Nath void __iomem *mmio = NULL;
1019ae5f4bdcSArindam Nath unsigned int reg;
1020ae5f4bdcSArindam Nath
1021ae5f4bdcSArindam Nath if (peer)
1022ae5f4bdcSArindam Nath mmio = ndev->peer_mmio;
1023ae5f4bdcSArindam Nath else
1024ae5f4bdcSArindam Nath mmio = ndev->self_mmio;
1025ae5f4bdcSArindam Nath
1026ae5f4bdcSArindam Nath reg = readl(mmio + AMD_SIDEINFO_OFFSET);
1027ae5f4bdcSArindam Nath if (reg & AMD_SIDE_READY) {
1028ae5f4bdcSArindam Nath reg &= ~AMD_SIDE_READY;
1029ae5f4bdcSArindam Nath writel(reg, mmio + AMD_SIDEINFO_OFFSET);
1030ae5f4bdcSArindam Nath readl(mmio + AMD_SIDEINFO_OFFSET);
1031ae5f4bdcSArindam Nath }
1032ae5f4bdcSArindam Nath }
1033ae5f4bdcSArindam Nath
amd_init_side_info(struct amd_ntb_dev * ndev)1034ae5f4bdcSArindam Nath static void amd_init_side_info(struct amd_ntb_dev *ndev)
1035ae5f4bdcSArindam Nath {
1036ae5f4bdcSArindam Nath void __iomem *mmio = ndev->self_mmio;
1037ae5f4bdcSArindam Nath u32 ntb_ctl;
1038ae5f4bdcSArindam Nath
1039ae5f4bdcSArindam Nath amd_set_side_info_reg(ndev, false);
104092abf4cbSArindam Nath
104192abf4cbSArindam Nath ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
104292abf4cbSArindam Nath ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
104392abf4cbSArindam Nath writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
1044a1b36958SXiangliang Yu }
1045a1b36958SXiangliang Yu
amd_deinit_side_info(struct amd_ntb_dev * ndev)1046a1b36958SXiangliang Yu static void amd_deinit_side_info(struct amd_ntb_dev *ndev)
1047a1b36958SXiangliang Yu {
1048a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
104992abf4cbSArindam Nath u32 ntb_ctl;
1050a1b36958SXiangliang Yu
1051ae5f4bdcSArindam Nath amd_clear_side_info_reg(ndev, false);
105292abf4cbSArindam Nath
105392abf4cbSArindam Nath ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
105492abf4cbSArindam Nath ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
105592abf4cbSArindam Nath writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
1056a1b36958SXiangliang Yu }
1057a1b36958SXiangliang Yu
amd_init_ntb(struct amd_ntb_dev * ndev)1058a1b36958SXiangliang Yu static int amd_init_ntb(struct amd_ntb_dev *ndev)
1059a1b36958SXiangliang Yu {
1060a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
1061a1b36958SXiangliang Yu
1062a1472e73SSanjay R Mehta ndev->mw_count = ndev->dev_data->mw_count;
1063a1b36958SXiangliang Yu ndev->spad_count = AMD_SPADS_CNT;
1064a1b36958SXiangliang Yu ndev->db_count = AMD_DB_CNT;
1065a1b36958SXiangliang Yu
1066a1b36958SXiangliang Yu switch (ndev->ntb.topo) {
1067a1b36958SXiangliang Yu case NTB_TOPO_PRI:
1068a1b36958SXiangliang Yu case NTB_TOPO_SEC:
1069a1b36958SXiangliang Yu ndev->spad_count >>= 1;
1070a1b36958SXiangliang Yu if (ndev->ntb.topo == NTB_TOPO_PRI) {
1071a1b36958SXiangliang Yu ndev->self_spad = 0;
1072a1b36958SXiangliang Yu ndev->peer_spad = 0x20;
1073a1b36958SXiangliang Yu } else {
1074a1b36958SXiangliang Yu ndev->self_spad = 0x20;
1075a1b36958SXiangliang Yu ndev->peer_spad = 0;
1076a1b36958SXiangliang Yu }
1077a1b36958SXiangliang Yu
1078a1b36958SXiangliang Yu INIT_DELAYED_WORK(&ndev->hb_timer, amd_link_hb);
1079a1b36958SXiangliang Yu schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
1080a1b36958SXiangliang Yu
1081a1b36958SXiangliang Yu break;
1082a1b36958SXiangliang Yu default:
10830f9bfb97SLogan Gunthorpe dev_err(&ndev->ntb.pdev->dev,
10840f9bfb97SLogan Gunthorpe "AMD NTB does not support B2B mode.\n");
1085a1b36958SXiangliang Yu return -EINVAL;
1086a1b36958SXiangliang Yu }
1087a1b36958SXiangliang Yu
1088a1b36958SXiangliang Yu /* Mask event interrupts */
1089a1b36958SXiangliang Yu writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
1090a1b36958SXiangliang Yu
1091a1b36958SXiangliang Yu return 0;
1092a1b36958SXiangliang Yu }
1093a1b36958SXiangliang Yu
amd_get_topo(struct amd_ntb_dev * ndev)1094a1b36958SXiangliang Yu static enum ntb_topo amd_get_topo(struct amd_ntb_dev *ndev)
1095a1b36958SXiangliang Yu {
1096a1b36958SXiangliang Yu void __iomem *mmio = ndev->self_mmio;
1097a1b36958SXiangliang Yu u32 info;
1098a1b36958SXiangliang Yu
1099a1b36958SXiangliang Yu info = readl(mmio + AMD_SIDEINFO_OFFSET);
1100a1b36958SXiangliang Yu if (info & AMD_SIDE_MASK)
1101a1b36958SXiangliang Yu return NTB_TOPO_SEC;
1102a1b36958SXiangliang Yu else
1103a1b36958SXiangliang Yu return NTB_TOPO_PRI;
1104a1b36958SXiangliang Yu }
1105a1b36958SXiangliang Yu
amd_init_dev(struct amd_ntb_dev * ndev)1106a1b36958SXiangliang Yu static int amd_init_dev(struct amd_ntb_dev *ndev)
1107a1b36958SXiangliang Yu {
11088a7cedefSArindam Nath void __iomem *mmio = ndev->self_mmio;
1109a1b36958SXiangliang Yu struct pci_dev *pdev;
1110a1b36958SXiangliang Yu int rc = 0;
1111a1b36958SXiangliang Yu
11120f9bfb97SLogan Gunthorpe pdev = ndev->ntb.pdev;
1113a1b36958SXiangliang Yu
1114a1b36958SXiangliang Yu ndev->ntb.topo = amd_get_topo(ndev);
11150f9bfb97SLogan Gunthorpe dev_dbg(&pdev->dev, "AMD NTB topo is %s\n",
1116a1b36958SXiangliang Yu ntb_topo_string(ndev->ntb.topo));
1117a1b36958SXiangliang Yu
1118a1b36958SXiangliang Yu rc = amd_init_ntb(ndev);
1119a1b36958SXiangliang Yu if (rc)
1120a1b36958SXiangliang Yu return rc;
1121a1b36958SXiangliang Yu
1122a1b36958SXiangliang Yu rc = amd_init_isr(ndev);
1123a1b36958SXiangliang Yu if (rc) {
11240f9bfb97SLogan Gunthorpe dev_err(&pdev->dev, "fail to init isr.\n");
1125a1b36958SXiangliang Yu return rc;
1126a1b36958SXiangliang Yu }
1127a1b36958SXiangliang Yu
1128a1b36958SXiangliang Yu ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1129ac10d4f6SArindam Nath /*
1130ac10d4f6SArindam Nath * We reserve the highest order bit of the DB register which will
1131ac10d4f6SArindam Nath * be used to notify peer when the driver on this side is being
1132ac10d4f6SArindam Nath * un-loaded.
1133ac10d4f6SArindam Nath */
1134ac10d4f6SArindam Nath ndev->db_last_bit =
1135ac10d4f6SArindam Nath find_last_bit((unsigned long *)&ndev->db_valid_mask,
1136ac10d4f6SArindam Nath hweight64(ndev->db_valid_mask));
1137ac10d4f6SArindam Nath writew((u16)~BIT(ndev->db_last_bit), mmio + AMD_DBMASK_OFFSET);
1138ac10d4f6SArindam Nath /*
1139ac10d4f6SArindam Nath * Since now there is one less bit to account for, the DB count
1140ac10d4f6SArindam Nath * and DB mask should be adjusted accordingly.
1141ac10d4f6SArindam Nath */
1142ac10d4f6SArindam Nath ndev->db_count -= 1;
1143ac10d4f6SArindam Nath ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1144a1b36958SXiangliang Yu
11458a7cedefSArindam Nath /* Enable Link-Up and Link-Down event interrupts */
11468a7cedefSArindam Nath ndev->int_mask &= ~(AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT);
11478a7cedefSArindam Nath writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
11488a7cedefSArindam Nath
1149a1b36958SXiangliang Yu return 0;
1150a1b36958SXiangliang Yu }
1151a1b36958SXiangliang Yu
amd_deinit_dev(struct amd_ntb_dev * ndev)1152a1b36958SXiangliang Yu static void amd_deinit_dev(struct amd_ntb_dev *ndev)
1153a1b36958SXiangliang Yu {
1154a1b36958SXiangliang Yu cancel_delayed_work_sync(&ndev->hb_timer);
1155a1b36958SXiangliang Yu
1156a1b36958SXiangliang Yu ndev_deinit_isr(ndev);
1157a1b36958SXiangliang Yu }
1158a1b36958SXiangliang Yu
amd_ntb_init_pci(struct amd_ntb_dev * ndev,struct pci_dev * pdev)1159a1b36958SXiangliang Yu static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
1160a1b36958SXiangliang Yu struct pci_dev *pdev)
1161a1b36958SXiangliang Yu {
1162a1b36958SXiangliang Yu int rc;
1163a1b36958SXiangliang Yu
1164a1b36958SXiangliang Yu pci_set_drvdata(pdev, ndev);
1165a1b36958SXiangliang Yu
1166a1b36958SXiangliang Yu rc = pci_enable_device(pdev);
1167a1b36958SXiangliang Yu if (rc)
1168a1b36958SXiangliang Yu goto err_pci_enable;
1169a1b36958SXiangliang Yu
1170a1b36958SXiangliang Yu rc = pci_request_regions(pdev, NTB_NAME);
1171a1b36958SXiangliang Yu if (rc)
1172a1b36958SXiangliang Yu goto err_pci_regions;
1173a1b36958SXiangliang Yu
1174a1b36958SXiangliang Yu pci_set_master(pdev);
1175a1b36958SXiangliang Yu
117638de3affSChristophe JAILLET rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1177a1b36958SXiangliang Yu if (rc) {
117838de3affSChristophe JAILLET rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1179a1b36958SXiangliang Yu if (rc)
1180a1b36958SXiangliang Yu goto err_dma_mask;
11810f9bfb97SLogan Gunthorpe dev_warn(&pdev->dev, "Cannot DMA highmem\n");
1182a1b36958SXiangliang Yu }
1183a1b36958SXiangliang Yu
1184a1b36958SXiangliang Yu ndev->self_mmio = pci_iomap(pdev, 0, 0);
1185a1b36958SXiangliang Yu if (!ndev->self_mmio) {
1186a1b36958SXiangliang Yu rc = -EIO;
1187a1b36958SXiangliang Yu goto err_dma_mask;
1188a1b36958SXiangliang Yu }
1189a1b36958SXiangliang Yu ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET;
1190a1b36958SXiangliang Yu
1191a1b36958SXiangliang Yu return 0;
1192a1b36958SXiangliang Yu
1193a1b36958SXiangliang Yu err_dma_mask:
119444a0a3c1SKaige Li pci_release_regions(pdev);
1195a1b36958SXiangliang Yu err_pci_regions:
1196a1b36958SXiangliang Yu pci_disable_device(pdev);
1197a1b36958SXiangliang Yu err_pci_enable:
1198a1b36958SXiangliang Yu pci_set_drvdata(pdev, NULL);
1199a1b36958SXiangliang Yu return rc;
1200a1b36958SXiangliang Yu }
1201a1b36958SXiangliang Yu
amd_ntb_deinit_pci(struct amd_ntb_dev * ndev)1202a1b36958SXiangliang Yu static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev)
1203a1b36958SXiangliang Yu {
12040f9bfb97SLogan Gunthorpe struct pci_dev *pdev = ndev->ntb.pdev;
1205a1b36958SXiangliang Yu
1206a1b36958SXiangliang Yu pci_iounmap(pdev, ndev->self_mmio);
1207a1b36958SXiangliang Yu
1208a1b36958SXiangliang Yu pci_release_regions(pdev);
1209a1b36958SXiangliang Yu pci_disable_device(pdev);
1210a1b36958SXiangliang Yu pci_set_drvdata(pdev, NULL);
1211a1b36958SXiangliang Yu }
1212a1b36958SXiangliang Yu
amd_ntb_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)1213a1b36958SXiangliang Yu static int amd_ntb_pci_probe(struct pci_dev *pdev,
1214a1b36958SXiangliang Yu const struct pci_device_id *id)
1215a1b36958SXiangliang Yu {
1216a1b36958SXiangliang Yu struct amd_ntb_dev *ndev;
1217a1b36958SXiangliang Yu int rc, node;
1218a1b36958SXiangliang Yu
1219a1b36958SXiangliang Yu node = dev_to_node(&pdev->dev);
1220a1b36958SXiangliang Yu
1221a1b36958SXiangliang Yu ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
1222a1b36958SXiangliang Yu if (!ndev) {
1223a1b36958SXiangliang Yu rc = -ENOMEM;
1224a1b36958SXiangliang Yu goto err_ndev;
1225a1b36958SXiangliang Yu }
1226a1b36958SXiangliang Yu
1227a1472e73SSanjay R Mehta ndev->dev_data = (struct ntb_dev_data *)id->driver_data;
1228a1472e73SSanjay R Mehta
1229a1b36958SXiangliang Yu ndev_init_struct(ndev, pdev);
1230a1b36958SXiangliang Yu
1231a1b36958SXiangliang Yu rc = amd_ntb_init_pci(ndev, pdev);
1232a1b36958SXiangliang Yu if (rc)
1233a1b36958SXiangliang Yu goto err_init_pci;
1234a1b36958SXiangliang Yu
1235a1b36958SXiangliang Yu rc = amd_init_dev(ndev);
1236a1b36958SXiangliang Yu if (rc)
1237a1b36958SXiangliang Yu goto err_init_dev;
1238a1b36958SXiangliang Yu
1239a1b36958SXiangliang Yu /* write side info */
1240a1b36958SXiangliang Yu amd_init_side_info(ndev);
1241a1b36958SXiangliang Yu
1242a1b36958SXiangliang Yu amd_poll_link(ndev);
1243a1b36958SXiangliang Yu
1244a1b36958SXiangliang Yu ndev_init_debugfs(ndev);
1245a1b36958SXiangliang Yu
1246a1b36958SXiangliang Yu rc = ntb_register_device(&ndev->ntb);
1247a1b36958SXiangliang Yu if (rc)
1248a1b36958SXiangliang Yu goto err_register;
1249a1b36958SXiangliang Yu
1250a1b36958SXiangliang Yu dev_info(&pdev->dev, "NTB device registered.\n");
1251a1b36958SXiangliang Yu
1252a1b36958SXiangliang Yu return 0;
1253a1b36958SXiangliang Yu
1254a1b36958SXiangliang Yu err_register:
1255a1b36958SXiangliang Yu ndev_deinit_debugfs(ndev);
1256a1b36958SXiangliang Yu amd_deinit_dev(ndev);
1257a1b36958SXiangliang Yu err_init_dev:
1258a1b36958SXiangliang Yu amd_ntb_deinit_pci(ndev);
1259a1b36958SXiangliang Yu err_init_pci:
1260a1b36958SXiangliang Yu kfree(ndev);
1261a1b36958SXiangliang Yu err_ndev:
1262a1b36958SXiangliang Yu return rc;
1263a1b36958SXiangliang Yu }
1264a1b36958SXiangliang Yu
amd_ntb_pci_remove(struct pci_dev * pdev)1265a1b36958SXiangliang Yu static void amd_ntb_pci_remove(struct pci_dev *pdev)
1266a1b36958SXiangliang Yu {
1267a1b36958SXiangliang Yu struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
1268a1b36958SXiangliang Yu
1269ac10d4f6SArindam Nath /*
1270ac10d4f6SArindam Nath * Clear the READY bit in SIDEINFO register before sending DB event
1271ac10d4f6SArindam Nath * to the peer. This will make sure that when the peer handles the
1272ac10d4f6SArindam Nath * DB event, it correctly reads this bit as being 0.
1273ac10d4f6SArindam Nath */
1274ac10d4f6SArindam Nath amd_deinit_side_info(ndev);
1275ac10d4f6SArindam Nath ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit));
1276a1b36958SXiangliang Yu ntb_unregister_device(&ndev->ntb);
1277a1b36958SXiangliang Yu ndev_deinit_debugfs(ndev);
1278a1b36958SXiangliang Yu amd_deinit_dev(ndev);
1279a1b36958SXiangliang Yu amd_ntb_deinit_pci(ndev);
1280a1b36958SXiangliang Yu kfree(ndev);
1281a1b36958SXiangliang Yu }
1282a1b36958SXiangliang Yu
amd_ntb_pci_shutdown(struct pci_dev * pdev)1283b350f0a3SArindam Nath static void amd_ntb_pci_shutdown(struct pci_dev *pdev)
1284b350f0a3SArindam Nath {
1285b350f0a3SArindam Nath struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
1286b350f0a3SArindam Nath
1287b350f0a3SArindam Nath /* Send link down notification */
1288b350f0a3SArindam Nath ntb_link_event(&ndev->ntb);
1289b350f0a3SArindam Nath
1290b350f0a3SArindam Nath amd_deinit_side_info(ndev);
1291b350f0a3SArindam Nath ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit));
1292b350f0a3SArindam Nath ntb_unregister_device(&ndev->ntb);
1293b350f0a3SArindam Nath ndev_deinit_debugfs(ndev);
1294b350f0a3SArindam Nath amd_deinit_dev(ndev);
1295b350f0a3SArindam Nath amd_ntb_deinit_pci(ndev);
1296b350f0a3SArindam Nath kfree(ndev);
1297b350f0a3SArindam Nath }
1298b350f0a3SArindam Nath
1299a1b36958SXiangliang Yu static const struct file_operations amd_ntb_debugfs_info = {
1300a1b36958SXiangliang Yu .owner = THIS_MODULE,
1301a1b36958SXiangliang Yu .open = simple_open,
1302a1b36958SXiangliang Yu .read = ndev_debugfs_read,
1303a1b36958SXiangliang Yu };
1304a1b36958SXiangliang Yu
1305a1472e73SSanjay R Mehta static const struct ntb_dev_data dev_data[] = {
1306a1472e73SSanjay R Mehta { /* for device 145b */
1307a1472e73SSanjay R Mehta .mw_count = 3,
1308a1472e73SSanjay R Mehta .mw_idx = 1,
1309a1472e73SSanjay R Mehta },
1310a1472e73SSanjay R Mehta { /* for device 148b */
1311a1472e73SSanjay R Mehta .mw_count = 2,
1312a1472e73SSanjay R Mehta .mw_idx = 2,
1313a1472e73SSanjay R Mehta },
1314a1472e73SSanjay R Mehta };
1315a1472e73SSanjay R Mehta
1316a1b36958SXiangliang Yu static const struct pci_device_id amd_ntb_pci_tbl[] = {
1317a1472e73SSanjay R Mehta { PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] },
1318a1472e73SSanjay R Mehta { PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] },
13190d5924ecSSanjay R Mehta { PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] },
13200d5924ecSSanjay R Mehta { PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] },
13219b5b99a8SJiasen Lin { PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
1322a1472e73SSanjay R Mehta { 0, }
1323a1b36958SXiangliang Yu };
1324a1b36958SXiangliang Yu MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl);
1325a1b36958SXiangliang Yu
1326a1b36958SXiangliang Yu static struct pci_driver amd_ntb_pci_driver = {
1327a1b36958SXiangliang Yu .name = KBUILD_MODNAME,
1328a1b36958SXiangliang Yu .id_table = amd_ntb_pci_tbl,
1329a1b36958SXiangliang Yu .probe = amd_ntb_pci_probe,
1330a1b36958SXiangliang Yu .remove = amd_ntb_pci_remove,
1331b350f0a3SArindam Nath .shutdown = amd_ntb_pci_shutdown,
1332a1b36958SXiangliang Yu };
1333a1b36958SXiangliang Yu
amd_ntb_pci_driver_init(void)1334a1b36958SXiangliang Yu static int __init amd_ntb_pci_driver_init(void)
1335a1b36958SXiangliang Yu {
1336*98af0a33SYuan Can int ret;
1337a1b36958SXiangliang Yu pr_info("%s %s\n", NTB_DESC, NTB_VER);
1338a1b36958SXiangliang Yu
1339a1b36958SXiangliang Yu if (debugfs_initialized())
1340a1b36958SXiangliang Yu debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1341a1b36958SXiangliang Yu
1342*98af0a33SYuan Can ret = pci_register_driver(&amd_ntb_pci_driver);
1343*98af0a33SYuan Can if (ret)
1344*98af0a33SYuan Can debugfs_remove_recursive(debugfs_dir);
1345*98af0a33SYuan Can
1346*98af0a33SYuan Can return ret;
1347a1b36958SXiangliang Yu }
1348a1b36958SXiangliang Yu module_init(amd_ntb_pci_driver_init);
1349a1b36958SXiangliang Yu
amd_ntb_pci_driver_exit(void)1350a1b36958SXiangliang Yu static void __exit amd_ntb_pci_driver_exit(void)
1351a1b36958SXiangliang Yu {
1352a1b36958SXiangliang Yu pci_unregister_driver(&amd_ntb_pci_driver);
1353a1b36958SXiangliang Yu debugfs_remove_recursive(debugfs_dir);
1354a1b36958SXiangliang Yu }
1355a1b36958SXiangliang Yu module_exit(amd_ntb_pci_driver_exit);
1356