1e9f08b65SZhou Wang // SPDX-License-Identifier: GPL-2.0-only 2*fd5273faSJie Hai /* Copyright(c) 2019-2022 HiSilicon Limited. */ 3*fd5273faSJie Hai 4e9f08b65SZhou Wang #include <linux/bitfield.h> 5e9f08b65SZhou Wang #include <linux/dmaengine.h> 6e9f08b65SZhou Wang #include <linux/init.h> 7e9f08b65SZhou Wang #include <linux/iopoll.h> 8e9f08b65SZhou Wang #include <linux/module.h> 9e9f08b65SZhou Wang #include <linux/pci.h> 10e9f08b65SZhou Wang #include <linux/spinlock.h> 11e9f08b65SZhou Wang #include "virt-dma.h" 12e9f08b65SZhou Wang 13*fd5273faSJie Hai /* HiSilicon DMA register common field define */ 14*fd5273faSJie Hai #define HISI_DMA_Q_SQ_BASE_L 0x0 15*fd5273faSJie Hai #define HISI_DMA_Q_SQ_BASE_H 0x4 16*fd5273faSJie Hai #define HISI_DMA_Q_SQ_DEPTH 0x8 17*fd5273faSJie Hai #define HISI_DMA_Q_SQ_TAIL_PTR 0xc 18*fd5273faSJie Hai #define HISI_DMA_Q_CQ_BASE_L 0x10 19*fd5273faSJie Hai #define HISI_DMA_Q_CQ_BASE_H 0x14 20*fd5273faSJie Hai #define HISI_DMA_Q_CQ_DEPTH 0x18 21*fd5273faSJie Hai #define HISI_DMA_Q_CQ_HEAD_PTR 0x1c 22*fd5273faSJie Hai #define HISI_DMA_Q_CTRL0 0x20 23*fd5273faSJie Hai #define HISI_DMA_Q_CTRL0_QUEUE_EN BIT(0) 24*fd5273faSJie Hai #define HISI_DMA_Q_CTRL0_QUEUE_PAUSE BIT(4) 25*fd5273faSJie Hai #define HISI_DMA_Q_CTRL1 0x24 26*fd5273faSJie Hai #define HISI_DMA_Q_CTRL1_QUEUE_RESET BIT(0) 27e9f08b65SZhou Wang #define HISI_DMA_Q_FSM_STS 0x30 28*fd5273faSJie Hai #define HISI_DMA_Q_FSM_STS_MASK GENMASK(3, 0) 29*fd5273faSJie Hai #define HISI_DMA_Q_ERR_INT_NUM0 0x84 30*fd5273faSJie Hai #define HISI_DMA_Q_ERR_INT_NUM1 0x88 31*fd5273faSJie Hai #define HISI_DMA_Q_ERR_INT_NUM2 0x8c 32e9f08b65SZhou Wang 33*fd5273faSJie Hai /* HiSilicon IP08 DMA register and field define */ 34*fd5273faSJie Hai #define HISI_DMA_HIP08_MODE 0x217C 35*fd5273faSJie Hai #define HISI_DMA_HIP08_Q_BASE 0x0 36*fd5273faSJie Hai #define HISI_DMA_HIP08_Q_CTRL0_ERR_ABORT_EN BIT(2) 37*fd5273faSJie Hai #define HISI_DMA_HIP08_Q_INT_STS 0x40 38*fd5273faSJie Hai #define HISI_DMA_HIP08_Q_INT_MSK 0x44 39*fd5273faSJie Hai #define HISI_DMA_HIP08_Q_INT_STS_MASK GENMASK(14, 0) 40*fd5273faSJie Hai #define HISI_DMA_HIP08_Q_ERR_INT_NUM3 0x90 41*fd5273faSJie Hai #define HISI_DMA_HIP08_Q_ERR_INT_NUM4 0x94 42*fd5273faSJie Hai #define HISI_DMA_HIP08_Q_ERR_INT_NUM5 0x98 43*fd5273faSJie Hai #define HISI_DMA_HIP08_Q_ERR_INT_NUM6 0x48 44*fd5273faSJie Hai #define HISI_DMA_HIP08_Q_CTRL0_SQCQ_DRCT BIT(24) 45*fd5273faSJie Hai 46*fd5273faSJie Hai /* HiSilicon IP09 DMA register and field define */ 47*fd5273faSJie Hai #define HISI_DMA_HIP09_DMA_FLR_DISABLE 0xA00 48*fd5273faSJie Hai #define HISI_DMA_HIP09_DMA_FLR_DISABLE_B BIT(0) 49*fd5273faSJie Hai #define HISI_DMA_HIP09_Q_BASE 0x2000 50*fd5273faSJie Hai #define HISI_DMA_HIP09_Q_CTRL0_ERR_ABORT_EN GENMASK(31, 28) 51*fd5273faSJie Hai #define HISI_DMA_HIP09_Q_CTRL0_SQ_DRCT BIT(26) 52*fd5273faSJie Hai #define HISI_DMA_HIP09_Q_CTRL0_CQ_DRCT BIT(27) 53*fd5273faSJie Hai #define HISI_DMA_HIP09_Q_CTRL1_VA_ENABLE BIT(2) 54*fd5273faSJie Hai #define HISI_DMA_HIP09_Q_INT_STS 0x40 55*fd5273faSJie Hai #define HISI_DMA_HIP09_Q_INT_MSK 0x44 56*fd5273faSJie Hai #define HISI_DMA_HIP09_Q_INT_STS_MASK 0x1 57*fd5273faSJie Hai #define HISI_DMA_HIP09_Q_ERR_INT_STS 0x48 58*fd5273faSJie Hai #define HISI_DMA_HIP09_Q_ERR_INT_MSK 0x4C 59*fd5273faSJie Hai #define HISI_DMA_HIP09_Q_ERR_INT_STS_MASK GENMASK(18, 1) 60*fd5273faSJie Hai #define HISI_DMA_HIP09_PORT_CFG_REG(port_id) (0x800 + \ 61*fd5273faSJie Hai (port_id) * 0x20) 62*fd5273faSJie Hai #define HISI_DMA_HIP09_PORT_CFG_LINK_DOWN_MASK_B BIT(16) 63*fd5273faSJie Hai 64*fd5273faSJie Hai #define HISI_DMA_HIP09_MAX_PORT_NUM 16 65*fd5273faSJie Hai 66*fd5273faSJie Hai #define HISI_DMA_HIP08_MSI_NUM 32 67*fd5273faSJie Hai #define HISI_DMA_HIP08_CHAN_NUM 30 68*fd5273faSJie Hai #define HISI_DMA_HIP09_MSI_NUM 4 69*fd5273faSJie Hai #define HISI_DMA_HIP09_CHAN_NUM 4 70*fd5273faSJie Hai #define HISI_DMA_REVISION_HIP08B 0x21 71*fd5273faSJie Hai #define HISI_DMA_REVISION_HIP09A 0x30 72*fd5273faSJie Hai 73*fd5273faSJie Hai #define HISI_DMA_Q_OFFSET 0x100 74e9f08b65SZhou Wang #define HISI_DMA_Q_DEPTH_VAL 1024 75e9f08b65SZhou Wang 76e9f08b65SZhou Wang #define PCI_BAR_2 2 77e9f08b65SZhou Wang 784aa69cf7SJie Hai #define HISI_DMA_POLL_Q_STS_DELAY_US 10 794aa69cf7SJie Hai #define HISI_DMA_POLL_Q_STS_TIME_OUT_US 1000 804aa69cf7SJie Hai 81*fd5273faSJie Hai /* 82*fd5273faSJie Hai * The HIP08B(HiSilicon IP08) and HIP09A(HiSilicon IP09) are DMA iEPs, they 83*fd5273faSJie Hai * have the same pci device id but different pci revision. 84*fd5273faSJie Hai * Unfortunately, they have different register layouts, so two layout 85*fd5273faSJie Hai * enumerations are defined. 86*fd5273faSJie Hai */ 87*fd5273faSJie Hai enum hisi_dma_reg_layout { 88*fd5273faSJie Hai HISI_DMA_REG_LAYOUT_INVALID = 0, 89*fd5273faSJie Hai HISI_DMA_REG_LAYOUT_HIP08, 90*fd5273faSJie Hai HISI_DMA_REG_LAYOUT_HIP09 91*fd5273faSJie Hai }; 92*fd5273faSJie Hai 93e9f08b65SZhou Wang enum hisi_dma_mode { 94e9f08b65SZhou Wang EP = 0, 95e9f08b65SZhou Wang RC, 96e9f08b65SZhou Wang }; 97e9f08b65SZhou Wang 98e9f08b65SZhou Wang enum hisi_dma_chan_status { 99e9f08b65SZhou Wang DISABLE = -1, 100e9f08b65SZhou Wang IDLE = 0, 101e9f08b65SZhou Wang RUN, 102e9f08b65SZhou Wang CPL, 103e9f08b65SZhou Wang PAUSE, 104e9f08b65SZhou Wang HALT, 105e9f08b65SZhou Wang ABORT, 106e9f08b65SZhou Wang WAIT, 107e9f08b65SZhou Wang BUFFCLR, 108e9f08b65SZhou Wang }; 109e9f08b65SZhou Wang 110e9f08b65SZhou Wang struct hisi_dma_sqe { 111e9f08b65SZhou Wang __le32 dw0; 112e9f08b65SZhou Wang #define OPCODE_MASK GENMASK(3, 0) 113e9f08b65SZhou Wang #define OPCODE_SMALL_PACKAGE 0x1 114e9f08b65SZhou Wang #define OPCODE_M2M 0x4 115e9f08b65SZhou Wang #define LOCAL_IRQ_EN BIT(8) 116e9f08b65SZhou Wang #define ATTR_SRC_MASK GENMASK(14, 12) 117e9f08b65SZhou Wang __le32 dw1; 118e9f08b65SZhou Wang __le32 dw2; 119e9f08b65SZhou Wang #define ATTR_DST_MASK GENMASK(26, 24) 120e9f08b65SZhou Wang __le32 length; 121e9f08b65SZhou Wang __le64 src_addr; 122e9f08b65SZhou Wang __le64 dst_addr; 123e9f08b65SZhou Wang }; 124e9f08b65SZhou Wang 125e9f08b65SZhou Wang struct hisi_dma_cqe { 126e9f08b65SZhou Wang __le32 rsv0; 127e9f08b65SZhou Wang __le32 rsv1; 128e9f08b65SZhou Wang __le16 sq_head; 129e9f08b65SZhou Wang __le16 rsv2; 130e9f08b65SZhou Wang __le16 rsv3; 131e9f08b65SZhou Wang __le16 w0; 132e9f08b65SZhou Wang #define STATUS_MASK GENMASK(15, 1) 133e9f08b65SZhou Wang #define STATUS_SUCC 0x0 134e9f08b65SZhou Wang #define VALID_BIT BIT(0) 135e9f08b65SZhou Wang }; 136e9f08b65SZhou Wang 137e9f08b65SZhou Wang struct hisi_dma_desc { 138e9f08b65SZhou Wang struct virt_dma_desc vd; 139e9f08b65SZhou Wang struct hisi_dma_sqe sqe; 140e9f08b65SZhou Wang }; 141e9f08b65SZhou Wang 142e9f08b65SZhou Wang struct hisi_dma_chan { 143e9f08b65SZhou Wang struct virt_dma_chan vc; 144e9f08b65SZhou Wang struct hisi_dma_dev *hdma_dev; 145e9f08b65SZhou Wang struct hisi_dma_sqe *sq; 146e9f08b65SZhou Wang struct hisi_dma_cqe *cq; 147e9f08b65SZhou Wang dma_addr_t sq_dma; 148e9f08b65SZhou Wang dma_addr_t cq_dma; 149e9f08b65SZhou Wang u32 sq_tail; 150e9f08b65SZhou Wang u32 cq_head; 151e9f08b65SZhou Wang u32 qp_num; 152e9f08b65SZhou Wang enum hisi_dma_chan_status status; 153e9f08b65SZhou Wang struct hisi_dma_desc *desc; 154e9f08b65SZhou Wang }; 155e9f08b65SZhou Wang 156e9f08b65SZhou Wang struct hisi_dma_dev { 157e9f08b65SZhou Wang struct pci_dev *pdev; 158e9f08b65SZhou Wang void __iomem *base; 159e9f08b65SZhou Wang struct dma_device dma_dev; 160e9f08b65SZhou Wang u32 chan_num; 161e9f08b65SZhou Wang u32 chan_depth; 162*fd5273faSJie Hai enum hisi_dma_reg_layout reg_layout; 163*fd5273faSJie Hai void __iomem *queue_base; /* queue region start of register */ 164e9f08b65SZhou Wang struct hisi_dma_chan chan[]; 165e9f08b65SZhou Wang }; 166e9f08b65SZhou Wang 167*fd5273faSJie Hai static enum hisi_dma_reg_layout hisi_dma_get_reg_layout(struct pci_dev *pdev) 168*fd5273faSJie Hai { 169*fd5273faSJie Hai if (pdev->revision == HISI_DMA_REVISION_HIP08B) 170*fd5273faSJie Hai return HISI_DMA_REG_LAYOUT_HIP08; 171*fd5273faSJie Hai else if (pdev->revision >= HISI_DMA_REVISION_HIP09A) 172*fd5273faSJie Hai return HISI_DMA_REG_LAYOUT_HIP09; 173*fd5273faSJie Hai 174*fd5273faSJie Hai return HISI_DMA_REG_LAYOUT_INVALID; 175*fd5273faSJie Hai } 176*fd5273faSJie Hai 177*fd5273faSJie Hai static u32 hisi_dma_get_chan_num(struct pci_dev *pdev) 178*fd5273faSJie Hai { 179*fd5273faSJie Hai if (pdev->revision == HISI_DMA_REVISION_HIP08B) 180*fd5273faSJie Hai return HISI_DMA_HIP08_CHAN_NUM; 181*fd5273faSJie Hai 182*fd5273faSJie Hai return HISI_DMA_HIP09_CHAN_NUM; 183*fd5273faSJie Hai } 184*fd5273faSJie Hai 185*fd5273faSJie Hai static u32 hisi_dma_get_msi_num(struct pci_dev *pdev) 186*fd5273faSJie Hai { 187*fd5273faSJie Hai if (pdev->revision == HISI_DMA_REVISION_HIP08B) 188*fd5273faSJie Hai return HISI_DMA_HIP08_MSI_NUM; 189*fd5273faSJie Hai 190*fd5273faSJie Hai return HISI_DMA_HIP09_MSI_NUM; 191*fd5273faSJie Hai } 192*fd5273faSJie Hai 193*fd5273faSJie Hai static u32 hisi_dma_get_queue_base(struct pci_dev *pdev) 194*fd5273faSJie Hai { 195*fd5273faSJie Hai if (pdev->revision == HISI_DMA_REVISION_HIP08B) 196*fd5273faSJie Hai return HISI_DMA_HIP08_Q_BASE; 197*fd5273faSJie Hai 198*fd5273faSJie Hai return HISI_DMA_HIP09_Q_BASE; 199*fd5273faSJie Hai } 200*fd5273faSJie Hai 201e9f08b65SZhou Wang static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c) 202e9f08b65SZhou Wang { 203e9f08b65SZhou Wang return container_of(c, struct hisi_dma_chan, vc.chan); 204e9f08b65SZhou Wang } 205e9f08b65SZhou Wang 206e9f08b65SZhou Wang static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd) 207e9f08b65SZhou Wang { 208e9f08b65SZhou Wang return container_of(vd, struct hisi_dma_desc, vd); 209e9f08b65SZhou Wang } 210e9f08b65SZhou Wang 211e9f08b65SZhou Wang static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index, 212e9f08b65SZhou Wang u32 val) 213e9f08b65SZhou Wang { 214*fd5273faSJie Hai writel_relaxed(val, base + reg + index * HISI_DMA_Q_OFFSET); 215e9f08b65SZhou Wang } 216e9f08b65SZhou Wang 217e9f08b65SZhou Wang static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val) 218e9f08b65SZhou Wang { 219e9f08b65SZhou Wang u32 tmp; 220e9f08b65SZhou Wang 221e9f08b65SZhou Wang tmp = readl_relaxed(addr); 222*fd5273faSJie Hai tmp = val ? tmp | pos : tmp & ~pos; 223e9f08b65SZhou Wang writel_relaxed(tmp, addr); 224e9f08b65SZhou Wang } 225e9f08b65SZhou Wang 226e9f08b65SZhou Wang static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index, 227e9f08b65SZhou Wang bool pause) 228e9f08b65SZhou Wang { 229*fd5273faSJie Hai void __iomem *addr; 230e9f08b65SZhou Wang 231*fd5273faSJie Hai addr = hdma_dev->queue_base + HISI_DMA_Q_CTRL0 + 232*fd5273faSJie Hai index * HISI_DMA_Q_OFFSET; 233*fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL0_QUEUE_PAUSE, pause); 234e9f08b65SZhou Wang } 235e9f08b65SZhou Wang 236e9f08b65SZhou Wang static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index, 237e9f08b65SZhou Wang bool enable) 238e9f08b65SZhou Wang { 239*fd5273faSJie Hai void __iomem *addr; 240e9f08b65SZhou Wang 241*fd5273faSJie Hai addr = hdma_dev->queue_base + HISI_DMA_Q_CTRL0 + 242*fd5273faSJie Hai index * HISI_DMA_Q_OFFSET; 243*fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL0_QUEUE_EN, enable); 244e9f08b65SZhou Wang } 245e9f08b65SZhou Wang 246e9f08b65SZhou Wang static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index) 247e9f08b65SZhou Wang { 248*fd5273faSJie Hai void __iomem *q_base = hdma_dev->queue_base; 249*fd5273faSJie Hai 250*fd5273faSJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) 251*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_MSK, 252*fd5273faSJie Hai qp_index, HISI_DMA_HIP08_Q_INT_STS_MASK); 253*fd5273faSJie Hai else { 254*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_MSK, 255*fd5273faSJie Hai qp_index, HISI_DMA_HIP09_Q_INT_STS_MASK); 256*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_MSK, 257*fd5273faSJie Hai qp_index, 258*fd5273faSJie Hai HISI_DMA_HIP09_Q_ERR_INT_STS_MASK); 259*fd5273faSJie Hai } 260e9f08b65SZhou Wang } 261e9f08b65SZhou Wang 262e9f08b65SZhou Wang static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index) 263e9f08b65SZhou Wang { 264*fd5273faSJie Hai void __iomem *q_base = hdma_dev->queue_base; 265e9f08b65SZhou Wang 266*fd5273faSJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) { 267*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_STS, 268*fd5273faSJie Hai qp_index, HISI_DMA_HIP08_Q_INT_STS_MASK); 269*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_MSK, 270*fd5273faSJie Hai qp_index, 0); 271*fd5273faSJie Hai } else { 272*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_STS, 273*fd5273faSJie Hai qp_index, HISI_DMA_HIP09_Q_INT_STS_MASK); 274*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_STS, 275*fd5273faSJie Hai qp_index, 276*fd5273faSJie Hai HISI_DMA_HIP09_Q_ERR_INT_STS_MASK); 277*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_MSK, 278*fd5273faSJie Hai qp_index, 0); 279*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_MSK, 280*fd5273faSJie Hai qp_index, 0); 281*fd5273faSJie Hai } 282e9f08b65SZhou Wang } 283e9f08b65SZhou Wang 284e9f08b65SZhou Wang static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index) 285e9f08b65SZhou Wang { 286*fd5273faSJie Hai void __iomem *addr; 287e9f08b65SZhou Wang 288*fd5273faSJie Hai addr = hdma_dev->queue_base + 289*fd5273faSJie Hai HISI_DMA_Q_CTRL1 + index * HISI_DMA_Q_OFFSET; 290*fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL1_QUEUE_RESET, 1); 291e9f08b65SZhou Wang } 292e9f08b65SZhou Wang 293e9f08b65SZhou Wang static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index) 294e9f08b65SZhou Wang { 295*fd5273faSJie Hai void __iomem *q_base = hdma_dev->queue_base; 296*fd5273faSJie Hai 297*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_TAIL_PTR, index, 0); 298*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR, index, 0); 299e9f08b65SZhou Wang } 300e9f08b65SZhou Wang 301e3bdaa04SJie Hai static void hisi_dma_reset_or_disable_hw_chan(struct hisi_dma_chan *chan, 302e3bdaa04SJie Hai bool disable) 303e9f08b65SZhou Wang { 304e9f08b65SZhou Wang struct hisi_dma_dev *hdma_dev = chan->hdma_dev; 305e9f08b65SZhou Wang u32 index = chan->qp_num, tmp; 3064aa69cf7SJie Hai void __iomem *addr; 307e9f08b65SZhou Wang int ret; 308e9f08b65SZhou Wang 309e9f08b65SZhou Wang hisi_dma_pause_dma(hdma_dev, index, true); 310e9f08b65SZhou Wang hisi_dma_enable_dma(hdma_dev, index, false); 311e9f08b65SZhou Wang hisi_dma_mask_irq(hdma_dev, index); 312e9f08b65SZhou Wang 313*fd5273faSJie Hai addr = hdma_dev->queue_base + 314*fd5273faSJie Hai HISI_DMA_Q_FSM_STS + index * HISI_DMA_Q_OFFSET; 3154aa69cf7SJie Hai 3164aa69cf7SJie Hai ret = readl_relaxed_poll_timeout(addr, tmp, 317*fd5273faSJie Hai FIELD_GET(HISI_DMA_Q_FSM_STS_MASK, tmp) != RUN, 3184aa69cf7SJie Hai HISI_DMA_POLL_Q_STS_DELAY_US, HISI_DMA_POLL_Q_STS_TIME_OUT_US); 319e9f08b65SZhou Wang if (ret) { 320e9f08b65SZhou Wang dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n"); 321e9f08b65SZhou Wang WARN_ON(1); 322e9f08b65SZhou Wang } 323e9f08b65SZhou Wang 324e9f08b65SZhou Wang hisi_dma_do_reset(hdma_dev, index); 325e9f08b65SZhou Wang hisi_dma_reset_qp_point(hdma_dev, index); 326e9f08b65SZhou Wang hisi_dma_pause_dma(hdma_dev, index, false); 327e3bdaa04SJie Hai 328e3bdaa04SJie Hai if (!disable) { 329e9f08b65SZhou Wang hisi_dma_enable_dma(hdma_dev, index, true); 330e9f08b65SZhou Wang hisi_dma_unmask_irq(hdma_dev, index); 331e3bdaa04SJie Hai } 332e9f08b65SZhou Wang 3334aa69cf7SJie Hai ret = readl_relaxed_poll_timeout(addr, tmp, 334*fd5273faSJie Hai FIELD_GET(HISI_DMA_Q_FSM_STS_MASK, tmp) == IDLE, 3354aa69cf7SJie Hai HISI_DMA_POLL_Q_STS_DELAY_US, HISI_DMA_POLL_Q_STS_TIME_OUT_US); 336e9f08b65SZhou Wang if (ret) { 337e9f08b65SZhou Wang dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n"); 338e9f08b65SZhou Wang WARN_ON(1); 339e9f08b65SZhou Wang } 340e9f08b65SZhou Wang } 341e9f08b65SZhou Wang 342e9f08b65SZhou Wang static void hisi_dma_free_chan_resources(struct dma_chan *c) 343e9f08b65SZhou Wang { 344e9f08b65SZhou Wang struct hisi_dma_chan *chan = to_hisi_dma_chan(c); 345e9f08b65SZhou Wang struct hisi_dma_dev *hdma_dev = chan->hdma_dev; 346e9f08b65SZhou Wang 347e3bdaa04SJie Hai hisi_dma_reset_or_disable_hw_chan(chan, false); 348e9f08b65SZhou Wang vchan_free_chan_resources(&chan->vc); 349e9f08b65SZhou Wang 350e9f08b65SZhou Wang memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth); 351e9f08b65SZhou Wang memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth); 352e9f08b65SZhou Wang chan->sq_tail = 0; 353e9f08b65SZhou Wang chan->cq_head = 0; 354e9f08b65SZhou Wang chan->status = DISABLE; 355e9f08b65SZhou Wang } 356e9f08b65SZhou Wang 357e9f08b65SZhou Wang static void hisi_dma_desc_free(struct virt_dma_desc *vd) 358e9f08b65SZhou Wang { 359e9f08b65SZhou Wang kfree(to_hisi_dma_desc(vd)); 360e9f08b65SZhou Wang } 361e9f08b65SZhou Wang 362e9f08b65SZhou Wang static struct dma_async_tx_descriptor * 363e9f08b65SZhou Wang hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src, 364e9f08b65SZhou Wang size_t len, unsigned long flags) 365e9f08b65SZhou Wang { 366e9f08b65SZhou Wang struct hisi_dma_chan *chan = to_hisi_dma_chan(c); 367e9f08b65SZhou Wang struct hisi_dma_desc *desc; 368e9f08b65SZhou Wang 369e9f08b65SZhou Wang desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 370e9f08b65SZhou Wang if (!desc) 371e9f08b65SZhou Wang return NULL; 372e9f08b65SZhou Wang 373e9f08b65SZhou Wang desc->sqe.length = cpu_to_le32(len); 374e9f08b65SZhou Wang desc->sqe.src_addr = cpu_to_le64(src); 375e9f08b65SZhou Wang desc->sqe.dst_addr = cpu_to_le64(dst); 376e9f08b65SZhou Wang 377e9f08b65SZhou Wang return vchan_tx_prep(&chan->vc, &desc->vd, flags); 378e9f08b65SZhou Wang } 379e9f08b65SZhou Wang 380e9f08b65SZhou Wang static enum dma_status 381e9f08b65SZhou Wang hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, 382e9f08b65SZhou Wang struct dma_tx_state *txstate) 383e9f08b65SZhou Wang { 384e9f08b65SZhou Wang return dma_cookie_status(c, cookie, txstate); 385e9f08b65SZhou Wang } 386e9f08b65SZhou Wang 387e9f08b65SZhou Wang static void hisi_dma_start_transfer(struct hisi_dma_chan *chan) 388e9f08b65SZhou Wang { 389e9f08b65SZhou Wang struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail; 390e9f08b65SZhou Wang struct hisi_dma_dev *hdma_dev = chan->hdma_dev; 391e9f08b65SZhou Wang struct hisi_dma_desc *desc; 392e9f08b65SZhou Wang struct virt_dma_desc *vd; 393e9f08b65SZhou Wang 394e9f08b65SZhou Wang vd = vchan_next_desc(&chan->vc); 395e9f08b65SZhou Wang if (!vd) { 396e9f08b65SZhou Wang chan->desc = NULL; 397e9f08b65SZhou Wang return; 398e9f08b65SZhou Wang } 399e9f08b65SZhou Wang list_del(&vd->node); 400e9f08b65SZhou Wang desc = to_hisi_dma_desc(vd); 401e9f08b65SZhou Wang chan->desc = desc; 402e9f08b65SZhou Wang 403e9f08b65SZhou Wang memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe)); 404e9f08b65SZhou Wang 405e9f08b65SZhou Wang /* update other field in sqe */ 406e9f08b65SZhou Wang sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M)); 407e9f08b65SZhou Wang sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN); 408e9f08b65SZhou Wang 409e9f08b65SZhou Wang /* make sure data has been updated in sqe */ 410e9f08b65SZhou Wang wmb(); 411e9f08b65SZhou Wang 412e9f08b65SZhou Wang /* update sq tail, point to new sqe position */ 413e9f08b65SZhou Wang chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth; 414e9f08b65SZhou Wang 415e9f08b65SZhou Wang /* update sq_tail to trigger a new task */ 416*fd5273faSJie Hai hisi_dma_chan_write(hdma_dev->queue_base, HISI_DMA_Q_SQ_TAIL_PTR, 417*fd5273faSJie Hai chan->qp_num, chan->sq_tail); 418e9f08b65SZhou Wang } 419e9f08b65SZhou Wang 420e9f08b65SZhou Wang static void hisi_dma_issue_pending(struct dma_chan *c) 421e9f08b65SZhou Wang { 422e9f08b65SZhou Wang struct hisi_dma_chan *chan = to_hisi_dma_chan(c); 423e9f08b65SZhou Wang unsigned long flags; 424e9f08b65SZhou Wang 425e9f08b65SZhou Wang spin_lock_irqsave(&chan->vc.lock, flags); 426e9f08b65SZhou Wang 4272cbb9588SJie Hai if (vchan_issue_pending(&chan->vc) && !chan->desc) 428e9f08b65SZhou Wang hisi_dma_start_transfer(chan); 429e9f08b65SZhou Wang 430e9f08b65SZhou Wang spin_unlock_irqrestore(&chan->vc.lock, flags); 431e9f08b65SZhou Wang } 432e9f08b65SZhou Wang 433e9f08b65SZhou Wang static int hisi_dma_terminate_all(struct dma_chan *c) 434e9f08b65SZhou Wang { 435e9f08b65SZhou Wang struct hisi_dma_chan *chan = to_hisi_dma_chan(c); 436e9f08b65SZhou Wang unsigned long flags; 437e9f08b65SZhou Wang LIST_HEAD(head); 438e9f08b65SZhou Wang 439e9f08b65SZhou Wang spin_lock_irqsave(&chan->vc.lock, flags); 440e9f08b65SZhou Wang 441e9f08b65SZhou Wang hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true); 442e9f08b65SZhou Wang if (chan->desc) { 443e9f08b65SZhou Wang vchan_terminate_vdesc(&chan->desc->vd); 444e9f08b65SZhou Wang chan->desc = NULL; 445e9f08b65SZhou Wang } 446e9f08b65SZhou Wang 447e9f08b65SZhou Wang vchan_get_all_descriptors(&chan->vc, &head); 448e9f08b65SZhou Wang 449e9f08b65SZhou Wang spin_unlock_irqrestore(&chan->vc.lock, flags); 450e9f08b65SZhou Wang 451e9f08b65SZhou Wang vchan_dma_desc_free_list(&chan->vc, &head); 452e9f08b65SZhou Wang hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false); 453e9f08b65SZhou Wang 454e9f08b65SZhou Wang return 0; 455e9f08b65SZhou Wang } 456e9f08b65SZhou Wang 457e9f08b65SZhou Wang static void hisi_dma_synchronize(struct dma_chan *c) 458e9f08b65SZhou Wang { 459e9f08b65SZhou Wang struct hisi_dma_chan *chan = to_hisi_dma_chan(c); 460e9f08b65SZhou Wang 461e9f08b65SZhou Wang vchan_synchronize(&chan->vc); 462e9f08b65SZhou Wang } 463e9f08b65SZhou Wang 464e9f08b65SZhou Wang static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev) 465e9f08b65SZhou Wang { 466e9f08b65SZhou Wang size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth; 467e9f08b65SZhou Wang size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth; 468e9f08b65SZhou Wang struct device *dev = &hdma_dev->pdev->dev; 469e9f08b65SZhou Wang struct hisi_dma_chan *chan; 470e9f08b65SZhou Wang int i; 471e9f08b65SZhou Wang 472e9f08b65SZhou Wang for (i = 0; i < hdma_dev->chan_num; i++) { 473e9f08b65SZhou Wang chan = &hdma_dev->chan[i]; 474e9f08b65SZhou Wang chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma, 475e9f08b65SZhou Wang GFP_KERNEL); 476e9f08b65SZhou Wang if (!chan->sq) 477e9f08b65SZhou Wang return -ENOMEM; 478e9f08b65SZhou Wang 479e9f08b65SZhou Wang chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma, 480e9f08b65SZhou Wang GFP_KERNEL); 481e9f08b65SZhou Wang if (!chan->cq) 482e9f08b65SZhou Wang return -ENOMEM; 483e9f08b65SZhou Wang } 484e9f08b65SZhou Wang 485e9f08b65SZhou Wang return 0; 486e9f08b65SZhou Wang } 487e9f08b65SZhou Wang 488e9f08b65SZhou Wang static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index) 489e9f08b65SZhou Wang { 490e9f08b65SZhou Wang struct hisi_dma_chan *chan = &hdma_dev->chan[index]; 491*fd5273faSJie Hai void __iomem *q_base = hdma_dev->queue_base; 492e9f08b65SZhou Wang u32 hw_depth = hdma_dev->chan_depth - 1; 493*fd5273faSJie Hai void __iomem *addr; 494*fd5273faSJie Hai u32 tmp; 495e9f08b65SZhou Wang 496e9f08b65SZhou Wang /* set sq, cq base */ 497*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_BASE_L, index, 498e9f08b65SZhou Wang lower_32_bits(chan->sq_dma)); 499*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_BASE_H, index, 500e9f08b65SZhou Wang upper_32_bits(chan->sq_dma)); 501*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_BASE_L, index, 502e9f08b65SZhou Wang lower_32_bits(chan->cq_dma)); 503*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_BASE_H, index, 504e9f08b65SZhou Wang upper_32_bits(chan->cq_dma)); 505e9f08b65SZhou Wang 506e9f08b65SZhou Wang /* set sq, cq depth */ 507*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_DEPTH, index, hw_depth); 508*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_DEPTH, index, hw_depth); 509e9f08b65SZhou Wang 510e9f08b65SZhou Wang /* init sq tail and cq head */ 511*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_TAIL_PTR, index, 0); 512*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR, index, 0); 513*fd5273faSJie Hai 514*fd5273faSJie Hai /* init error interrupt stats */ 515*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM0, index, 0); 516*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM1, index, 0); 517*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM2, index, 0); 518*fd5273faSJie Hai 519*fd5273faSJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) { 520*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM3, 521*fd5273faSJie Hai index, 0); 522*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM4, 523*fd5273faSJie Hai index, 0); 524*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM5, 525*fd5273faSJie Hai index, 0); 526*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM6, 527*fd5273faSJie Hai index, 0); 528*fd5273faSJie Hai /* 529*fd5273faSJie Hai * init SQ/CQ direction selecting register. 530*fd5273faSJie Hai * "0" is to local side and "1" is to remote side. 531*fd5273faSJie Hai */ 532*fd5273faSJie Hai addr = q_base + HISI_DMA_Q_CTRL0 + index * HISI_DMA_Q_OFFSET; 533*fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_HIP08_Q_CTRL0_SQCQ_DRCT, 0); 534*fd5273faSJie Hai 535*fd5273faSJie Hai /* 536*fd5273faSJie Hai * 0 - Continue to next descriptor if error occurs. 537*fd5273faSJie Hai * 1 - Abort the DMA queue if error occurs. 538*fd5273faSJie Hai */ 539*fd5273faSJie Hai hisi_dma_update_bit(addr, 540*fd5273faSJie Hai HISI_DMA_HIP08_Q_CTRL0_ERR_ABORT_EN, 0); 541*fd5273faSJie Hai } else { 542*fd5273faSJie Hai addr = q_base + HISI_DMA_Q_CTRL0 + index * HISI_DMA_Q_OFFSET; 543*fd5273faSJie Hai 544*fd5273faSJie Hai /* 545*fd5273faSJie Hai * init SQ/CQ direction selecting register. 546*fd5273faSJie Hai * "0" is to local side and "1" is to remote side. 547*fd5273faSJie Hai */ 548*fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL0_SQ_DRCT, 0); 549*fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL0_CQ_DRCT, 0); 550*fd5273faSJie Hai 551*fd5273faSJie Hai /* 552*fd5273faSJie Hai * 0 - Continue to next descriptor if error occurs. 553*fd5273faSJie Hai * 1 - Abort the DMA queue if error occurs. 554*fd5273faSJie Hai */ 555*fd5273faSJie Hai 556*fd5273faSJie Hai tmp = readl_relaxed(addr); 557*fd5273faSJie Hai tmp &= ~HISI_DMA_HIP09_Q_CTRL0_ERR_ABORT_EN; 558*fd5273faSJie Hai writel_relaxed(tmp, addr); 559*fd5273faSJie Hai 560*fd5273faSJie Hai /* 561*fd5273faSJie Hai * 0 - dma should process FLR whith CPU. 562*fd5273faSJie Hai * 1 - dma not process FLR, only cpu process FLR. 563*fd5273faSJie Hai */ 564*fd5273faSJie Hai addr = q_base + HISI_DMA_HIP09_DMA_FLR_DISABLE + 565*fd5273faSJie Hai index * HISI_DMA_Q_OFFSET; 566*fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_HIP09_DMA_FLR_DISABLE_B, 0); 567*fd5273faSJie Hai 568*fd5273faSJie Hai addr = q_base + HISI_DMA_Q_CTRL1 + index * HISI_DMA_Q_OFFSET; 569*fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL1_VA_ENABLE, 1); 570*fd5273faSJie Hai } 571e9f08b65SZhou Wang } 572e9f08b65SZhou Wang 573e9f08b65SZhou Wang static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) 574e9f08b65SZhou Wang { 575e9f08b65SZhou Wang hisi_dma_init_hw_qp(hdma_dev, qp_index); 576e9f08b65SZhou Wang hisi_dma_unmask_irq(hdma_dev, qp_index); 577e9f08b65SZhou Wang hisi_dma_enable_dma(hdma_dev, qp_index, true); 578e9f08b65SZhou Wang } 579e9f08b65SZhou Wang 580e9f08b65SZhou Wang static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) 581e9f08b65SZhou Wang { 582e3bdaa04SJie Hai hisi_dma_reset_or_disable_hw_chan(&hdma_dev->chan[qp_index], true); 583e9f08b65SZhou Wang } 584e9f08b65SZhou Wang 585e9f08b65SZhou Wang static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev) 586e9f08b65SZhou Wang { 587e9f08b65SZhou Wang int i; 588e9f08b65SZhou Wang 589e9f08b65SZhou Wang for (i = 0; i < hdma_dev->chan_num; i++) { 590e9f08b65SZhou Wang hdma_dev->chan[i].qp_num = i; 591e9f08b65SZhou Wang hdma_dev->chan[i].hdma_dev = hdma_dev; 592e9f08b65SZhou Wang hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free; 593e9f08b65SZhou Wang vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev); 594e9f08b65SZhou Wang hisi_dma_enable_qp(hdma_dev, i); 595e9f08b65SZhou Wang } 596e9f08b65SZhou Wang } 597e9f08b65SZhou Wang 598e9f08b65SZhou Wang static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev) 599e9f08b65SZhou Wang { 600e9f08b65SZhou Wang int i; 601e9f08b65SZhou Wang 602e9f08b65SZhou Wang for (i = 0; i < hdma_dev->chan_num; i++) { 603e9f08b65SZhou Wang hisi_dma_disable_qp(hdma_dev, i); 604e9f08b65SZhou Wang tasklet_kill(&hdma_dev->chan[i].vc.task); 605e9f08b65SZhou Wang } 606e9f08b65SZhou Wang } 607e9f08b65SZhou Wang 608e9f08b65SZhou Wang static irqreturn_t hisi_dma_irq(int irq, void *data) 609e9f08b65SZhou Wang { 610e9f08b65SZhou Wang struct hisi_dma_chan *chan = data; 611e9f08b65SZhou Wang struct hisi_dma_dev *hdma_dev = chan->hdma_dev; 612e9f08b65SZhou Wang struct hisi_dma_desc *desc; 613e9f08b65SZhou Wang struct hisi_dma_cqe *cqe; 614*fd5273faSJie Hai void __iomem *q_base; 615e9f08b65SZhou Wang 616d9c8d4b2SBarry Song spin_lock(&chan->vc.lock); 617e9f08b65SZhou Wang 618e9f08b65SZhou Wang desc = chan->desc; 619e9f08b65SZhou Wang cqe = chan->cq + chan->cq_head; 620*fd5273faSJie Hai q_base = hdma_dev->queue_base; 621e9f08b65SZhou Wang if (desc) { 62294477a79SJie Hai chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth; 623*fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR, 62494477a79SJie Hai chan->qp_num, chan->cq_head); 625e9f08b65SZhou Wang if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) { 626e9f08b65SZhou Wang vchan_cookie_complete(&desc->vd); 6272cbb9588SJie Hai hisi_dma_start_transfer(chan); 628e9f08b65SZhou Wang } else { 629e9f08b65SZhou Wang dev_err(&hdma_dev->pdev->dev, "task error!\n"); 630e9f08b65SZhou Wang } 631e9f08b65SZhou Wang } 632e9f08b65SZhou Wang 633d9c8d4b2SBarry Song spin_unlock(&chan->vc.lock); 634e9f08b65SZhou Wang 635e9f08b65SZhou Wang return IRQ_HANDLED; 636e9f08b65SZhou Wang } 637e9f08b65SZhou Wang 638e9f08b65SZhou Wang static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev) 639e9f08b65SZhou Wang { 640e9f08b65SZhou Wang struct pci_dev *pdev = hdma_dev->pdev; 641e9f08b65SZhou Wang int i, ret; 642e9f08b65SZhou Wang 643e9f08b65SZhou Wang for (i = 0; i < hdma_dev->chan_num; i++) { 644e9f08b65SZhou Wang ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i), 645e9f08b65SZhou Wang hisi_dma_irq, IRQF_SHARED, "hisi_dma", 646e9f08b65SZhou Wang &hdma_dev->chan[i]); 647e9f08b65SZhou Wang if (ret) 648e9f08b65SZhou Wang return ret; 649e9f08b65SZhou Wang } 650e9f08b65SZhou Wang 651e9f08b65SZhou Wang return 0; 652e9f08b65SZhou Wang } 653e9f08b65SZhou Wang 654e9f08b65SZhou Wang /* This function enables all hw channels in a device */ 655e9f08b65SZhou Wang static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev) 656e9f08b65SZhou Wang { 657e9f08b65SZhou Wang int ret; 658e9f08b65SZhou Wang 659e9f08b65SZhou Wang ret = hisi_dma_alloc_qps_mem(hdma_dev); 660e9f08b65SZhou Wang if (ret) { 661e9f08b65SZhou Wang dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n"); 662e9f08b65SZhou Wang return ret; 663e9f08b65SZhou Wang } 664e9f08b65SZhou Wang 665e9f08b65SZhou Wang ret = hisi_dma_request_qps_irq(hdma_dev); 666e9f08b65SZhou Wang if (ret) { 667e9f08b65SZhou Wang dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n"); 668e9f08b65SZhou Wang return ret; 669e9f08b65SZhou Wang } 670e9f08b65SZhou Wang 671e9f08b65SZhou Wang hisi_dma_enable_qps(hdma_dev); 672e9f08b65SZhou Wang 673e9f08b65SZhou Wang return 0; 674e9f08b65SZhou Wang } 675e9f08b65SZhou Wang 676e9f08b65SZhou Wang static void hisi_dma_disable_hw_channels(void *data) 677e9f08b65SZhou Wang { 678e9f08b65SZhou Wang hisi_dma_disable_qps(data); 679e9f08b65SZhou Wang } 680e9f08b65SZhou Wang 681e9f08b65SZhou Wang static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev, 682e9f08b65SZhou Wang enum hisi_dma_mode mode) 683e9f08b65SZhou Wang { 684*fd5273faSJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) 685*fd5273faSJie Hai writel_relaxed(mode == RC ? 1 : 0, 686*fd5273faSJie Hai hdma_dev->base + HISI_DMA_HIP08_MODE); 687*fd5273faSJie Hai } 688*fd5273faSJie Hai 689*fd5273faSJie Hai static void hisi_dma_init_hw(struct hisi_dma_dev *hdma_dev) 690*fd5273faSJie Hai { 691*fd5273faSJie Hai void __iomem *addr; 692*fd5273faSJie Hai int i; 693*fd5273faSJie Hai 694*fd5273faSJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) { 695*fd5273faSJie Hai for (i = 0; i < HISI_DMA_HIP09_MAX_PORT_NUM; i++) { 696*fd5273faSJie Hai addr = hdma_dev->base + HISI_DMA_HIP09_PORT_CFG_REG(i); 697*fd5273faSJie Hai hisi_dma_update_bit(addr, 698*fd5273faSJie Hai HISI_DMA_HIP09_PORT_CFG_LINK_DOWN_MASK_B, 1); 699*fd5273faSJie Hai } 700*fd5273faSJie Hai } 701*fd5273faSJie Hai } 702*fd5273faSJie Hai 703*fd5273faSJie Hai static void hisi_dma_init_dma_dev(struct hisi_dma_dev *hdma_dev) 704*fd5273faSJie Hai { 705*fd5273faSJie Hai struct dma_device *dma_dev; 706*fd5273faSJie Hai 707*fd5273faSJie Hai dma_dev = &hdma_dev->dma_dev; 708*fd5273faSJie Hai dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 709*fd5273faSJie Hai dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources; 710*fd5273faSJie Hai dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy; 711*fd5273faSJie Hai dma_dev->device_tx_status = hisi_dma_tx_status; 712*fd5273faSJie Hai dma_dev->device_issue_pending = hisi_dma_issue_pending; 713*fd5273faSJie Hai dma_dev->device_terminate_all = hisi_dma_terminate_all; 714*fd5273faSJie Hai dma_dev->device_synchronize = hisi_dma_synchronize; 715*fd5273faSJie Hai dma_dev->directions = BIT(DMA_MEM_TO_MEM); 716*fd5273faSJie Hai dma_dev->dev = &hdma_dev->pdev->dev; 717*fd5273faSJie Hai INIT_LIST_HEAD(&dma_dev->channels); 718e9f08b65SZhou Wang } 719e9f08b65SZhou Wang 720e9f08b65SZhou Wang static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id) 721e9f08b65SZhou Wang { 722*fd5273faSJie Hai enum hisi_dma_reg_layout reg_layout; 723e9f08b65SZhou Wang struct device *dev = &pdev->dev; 724e9f08b65SZhou Wang struct hisi_dma_dev *hdma_dev; 725e9f08b65SZhou Wang struct dma_device *dma_dev; 726*fd5273faSJie Hai u32 chan_num; 727*fd5273faSJie Hai u32 msi_num; 728e9f08b65SZhou Wang int ret; 729e9f08b65SZhou Wang 730*fd5273faSJie Hai reg_layout = hisi_dma_get_reg_layout(pdev); 731*fd5273faSJie Hai if (reg_layout == HISI_DMA_REG_LAYOUT_INVALID) { 732*fd5273faSJie Hai dev_err(dev, "unsupported device!\n"); 733*fd5273faSJie Hai return -EINVAL; 734*fd5273faSJie Hai } 735*fd5273faSJie Hai 736e9f08b65SZhou Wang ret = pcim_enable_device(pdev); 737e9f08b65SZhou Wang if (ret) { 738e9f08b65SZhou Wang dev_err(dev, "failed to enable device mem!\n"); 739e9f08b65SZhou Wang return ret; 740e9f08b65SZhou Wang } 741e9f08b65SZhou Wang 742e9f08b65SZhou Wang ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev)); 743e9f08b65SZhou Wang if (ret) { 744e9f08b65SZhou Wang dev_err(dev, "failed to remap I/O region!\n"); 745e9f08b65SZhou Wang return ret; 746e9f08b65SZhou Wang } 747e9f08b65SZhou Wang 748d77143ddSQing Wang ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 749e9f08b65SZhou Wang if (ret) 750e9f08b65SZhou Wang return ret; 751e9f08b65SZhou Wang 752*fd5273faSJie Hai chan_num = hisi_dma_get_chan_num(pdev); 753*fd5273faSJie Hai hdma_dev = devm_kzalloc(dev, struct_size(hdma_dev, chan, chan_num), 754*fd5273faSJie Hai GFP_KERNEL); 755e9f08b65SZhou Wang if (!hdma_dev) 756e9f08b65SZhou Wang return -EINVAL; 757e9f08b65SZhou Wang 758e9f08b65SZhou Wang hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2]; 759e9f08b65SZhou Wang hdma_dev->pdev = pdev; 760e9f08b65SZhou Wang hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL; 761*fd5273faSJie Hai hdma_dev->chan_num = chan_num; 762*fd5273faSJie Hai hdma_dev->reg_layout = reg_layout; 763*fd5273faSJie Hai hdma_dev->queue_base = hdma_dev->base + hisi_dma_get_queue_base(pdev); 764e9f08b65SZhou Wang 765e9f08b65SZhou Wang pci_set_drvdata(pdev, hdma_dev); 766e9f08b65SZhou Wang pci_set_master(pdev); 767e9f08b65SZhou Wang 768*fd5273faSJie Hai msi_num = hisi_dma_get_msi_num(pdev); 769*fd5273faSJie Hai 77026f1ca91SChristophe JAILLET /* This will be freed by 'pcim_release()'. See 'pcim_enable_device()' */ 771*fd5273faSJie Hai ret = pci_alloc_irq_vectors(pdev, msi_num, msi_num, PCI_IRQ_MSI); 772e9f08b65SZhou Wang if (ret < 0) { 773e9f08b65SZhou Wang dev_err(dev, "Failed to allocate MSI vectors!\n"); 774e9f08b65SZhou Wang return ret; 775e9f08b65SZhou Wang } 776e9f08b65SZhou Wang 777*fd5273faSJie Hai hisi_dma_init_dma_dev(hdma_dev); 778e9f08b65SZhou Wang 779e9f08b65SZhou Wang hisi_dma_set_mode(hdma_dev, RC); 780e9f08b65SZhou Wang 781*fd5273faSJie Hai hisi_dma_init_hw(hdma_dev); 782*fd5273faSJie Hai 783e9f08b65SZhou Wang ret = hisi_dma_enable_hw_channels(hdma_dev); 784e9f08b65SZhou Wang if (ret < 0) { 785e9f08b65SZhou Wang dev_err(dev, "failed to enable hw channel!\n"); 786e9f08b65SZhou Wang return ret; 787e9f08b65SZhou Wang } 788e9f08b65SZhou Wang 789e9f08b65SZhou Wang ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels, 790e9f08b65SZhou Wang hdma_dev); 791e9f08b65SZhou Wang if (ret) 792e9f08b65SZhou Wang return ret; 793e9f08b65SZhou Wang 794*fd5273faSJie Hai dma_dev = &hdma_dev->dma_dev; 795e9f08b65SZhou Wang ret = dmaenginem_async_device_register(dma_dev); 796e9f08b65SZhou Wang if (ret < 0) 797e9f08b65SZhou Wang dev_err(dev, "failed to register device!\n"); 798e9f08b65SZhou Wang 799e9f08b65SZhou Wang return ret; 800e9f08b65SZhou Wang } 801e9f08b65SZhou Wang 802e9f08b65SZhou Wang static const struct pci_device_id hisi_dma_pci_tbl[] = { 803e9f08b65SZhou Wang { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) }, 804e9f08b65SZhou Wang { 0, } 805e9f08b65SZhou Wang }; 806e9f08b65SZhou Wang 807e9f08b65SZhou Wang static struct pci_driver hisi_dma_pci_driver = { 808e9f08b65SZhou Wang .name = "hisi_dma", 809e9f08b65SZhou Wang .id_table = hisi_dma_pci_tbl, 810e9f08b65SZhou Wang .probe = hisi_dma_probe, 811e9f08b65SZhou Wang }; 812e9f08b65SZhou Wang 813e9f08b65SZhou Wang module_pci_driver(hisi_dma_pci_driver); 814e9f08b65SZhou Wang 815e9f08b65SZhou Wang MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); 816e9f08b65SZhou Wang MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>"); 817e9f08b65SZhou Wang MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver"); 818e9f08b65SZhou Wang MODULE_LICENSE("GPL v2"); 819e9f08b65SZhou Wang MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl); 820