1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 29730ffcbSVarun Prakash /* 39730ffcbSVarun Prakash * Copyright (c) 2016 Chelsio Communications, Inc. 49730ffcbSVarun Prakash */ 59730ffcbSVarun Prakash 69730ffcbSVarun Prakash #include "cxgbit.h" 79730ffcbSVarun Prakash 89730ffcbSVarun Prakash static void 99730ffcbSVarun Prakash cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod, 109730ffcbSVarun Prakash struct cxgbi_task_tag_info *ttinfo, 119730ffcbSVarun Prakash struct scatterlist **sg_pp, unsigned int *sg_off) 129730ffcbSVarun Prakash { 139730ffcbSVarun Prakash struct scatterlist *sg = sg_pp ? *sg_pp : NULL; 149730ffcbSVarun Prakash unsigned int offset = sg_off ? *sg_off : 0; 159730ffcbSVarun Prakash dma_addr_t addr = 0UL; 169730ffcbSVarun Prakash unsigned int len = 0; 179730ffcbSVarun Prakash int i; 189730ffcbSVarun Prakash 199730ffcbSVarun Prakash memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr)); 209730ffcbSVarun Prakash 219730ffcbSVarun Prakash if (sg) { 229730ffcbSVarun Prakash addr = sg_dma_address(sg); 239730ffcbSVarun Prakash len = sg_dma_len(sg); 249730ffcbSVarun Prakash } 259730ffcbSVarun Prakash 269730ffcbSVarun Prakash for (i = 0; i < PPOD_PAGES_MAX; i++) { 279730ffcbSVarun Prakash if (sg) { 289730ffcbSVarun Prakash ppod->addr[i] = cpu_to_be64(addr + offset); 299730ffcbSVarun Prakash offset += PAGE_SIZE; 309730ffcbSVarun Prakash if (offset == (len + sg->offset)) { 319730ffcbSVarun Prakash offset = 0; 329730ffcbSVarun Prakash sg = sg_next(sg); 339730ffcbSVarun Prakash if (sg) { 349730ffcbSVarun Prakash addr = sg_dma_address(sg); 359730ffcbSVarun Prakash len = sg_dma_len(sg); 369730ffcbSVarun Prakash } 379730ffcbSVarun Prakash } 389730ffcbSVarun Prakash } else { 399730ffcbSVarun Prakash ppod->addr[i] = 0ULL; 409730ffcbSVarun Prakash } 419730ffcbSVarun Prakash } 429730ffcbSVarun Prakash 439730ffcbSVarun Prakash /* 449730ffcbSVarun Prakash * the fifth address needs to be repeated in the next ppod, so do 459730ffcbSVarun Prakash * not move sg 469730ffcbSVarun Prakash */ 479730ffcbSVarun Prakash if (sg_pp) { 489730ffcbSVarun Prakash *sg_pp = sg; 499730ffcbSVarun Prakash *sg_off = offset; 509730ffcbSVarun Prakash } 519730ffcbSVarun Prakash 529730ffcbSVarun Prakash if (offset == len) { 539730ffcbSVarun Prakash offset = 0; 549730ffcbSVarun Prakash if (sg) { 559730ffcbSVarun Prakash sg = sg_next(sg); 569730ffcbSVarun Prakash if (sg) 579730ffcbSVarun Prakash addr = sg_dma_address(sg); 589730ffcbSVarun Prakash } 599730ffcbSVarun Prakash } 609730ffcbSVarun Prakash ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL; 619730ffcbSVarun Prakash } 629730ffcbSVarun Prakash 639730ffcbSVarun Prakash static struct sk_buff * 649730ffcbSVarun Prakash cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm, 659730ffcbSVarun Prakash unsigned int idx, unsigned int npods, unsigned int tid) 669730ffcbSVarun Prakash { 679730ffcbSVarun Prakash struct ulp_mem_io *req; 689730ffcbSVarun Prakash struct ulptx_idata *idata; 699730ffcbSVarun Prakash unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; 709730ffcbSVarun Prakash unsigned int dlen = npods << PPOD_SIZE_SHIFT; 719730ffcbSVarun Prakash unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + 729730ffcbSVarun Prakash sizeof(struct ulptx_idata) + dlen, 16); 739730ffcbSVarun Prakash struct sk_buff *skb; 749730ffcbSVarun Prakash 759730ffcbSVarun Prakash skb = alloc_skb(wr_len, GFP_KERNEL); 769730ffcbSVarun Prakash if (!skb) 779730ffcbSVarun Prakash return NULL; 789730ffcbSVarun Prakash 794df864c1SJohannes Berg req = __skb_put(skb, wr_len); 809730ffcbSVarun Prakash INIT_ULPTX_WR(req, wr_len, 0, tid); 819730ffcbSVarun Prakash req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | 829730ffcbSVarun Prakash FW_WR_ATOMIC_V(0)); 839730ffcbSVarun Prakash req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | 849730ffcbSVarun Prakash ULP_MEMIO_ORDER_V(0) | 859730ffcbSVarun Prakash T5_ULP_MEMIO_IMM_V(1)); 869730ffcbSVarun Prakash req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); 879730ffcbSVarun Prakash req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); 889730ffcbSVarun Prakash req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); 899730ffcbSVarun Prakash 909730ffcbSVarun Prakash idata = (struct ulptx_idata *)(req + 1); 919730ffcbSVarun Prakash idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); 929730ffcbSVarun Prakash idata->len = htonl(dlen); 939730ffcbSVarun Prakash 949730ffcbSVarun Prakash return skb; 959730ffcbSVarun Prakash } 969730ffcbSVarun Prakash 979730ffcbSVarun Prakash static int 989730ffcbSVarun Prakash cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk, 999730ffcbSVarun Prakash struct cxgbi_task_tag_info *ttinfo, unsigned int idx, 1009730ffcbSVarun Prakash unsigned int npods, struct scatterlist **sg_pp, 1019730ffcbSVarun Prakash unsigned int *sg_off) 1029730ffcbSVarun Prakash { 1039730ffcbSVarun Prakash struct cxgbit_device *cdev = csk->com.cdev; 1049730ffcbSVarun Prakash struct sk_buff *skb; 1059730ffcbSVarun Prakash struct ulp_mem_io *req; 1069730ffcbSVarun Prakash struct ulptx_idata *idata; 1079730ffcbSVarun Prakash struct cxgbi_pagepod *ppod; 1089730ffcbSVarun Prakash unsigned int i; 1099730ffcbSVarun Prakash 1109730ffcbSVarun Prakash skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid); 1119730ffcbSVarun Prakash if (!skb) 1129730ffcbSVarun Prakash return -ENOMEM; 1139730ffcbSVarun Prakash 1149730ffcbSVarun Prakash req = (struct ulp_mem_io *)skb->data; 1159730ffcbSVarun Prakash idata = (struct ulptx_idata *)(req + 1); 1169730ffcbSVarun Prakash ppod = (struct cxgbi_pagepod *)(idata + 1); 1179730ffcbSVarun Prakash 1189730ffcbSVarun Prakash for (i = 0; i < npods; i++, ppod++) 1199730ffcbSVarun Prakash cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); 1209730ffcbSVarun Prakash 1219730ffcbSVarun Prakash __skb_queue_tail(&csk->ppodq, skb); 1229730ffcbSVarun Prakash 1239730ffcbSVarun Prakash return 0; 1249730ffcbSVarun Prakash } 1259730ffcbSVarun Prakash 1269730ffcbSVarun Prakash static int 1279730ffcbSVarun Prakash cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk, 1289730ffcbSVarun Prakash struct cxgbi_task_tag_info *ttinfo) 1299730ffcbSVarun Prakash { 1309730ffcbSVarun Prakash unsigned int pidx = ttinfo->idx; 1319730ffcbSVarun Prakash unsigned int npods = ttinfo->npods; 1329730ffcbSVarun Prakash unsigned int i, cnt; 1339730ffcbSVarun Prakash struct scatterlist *sg = ttinfo->sgl; 1349730ffcbSVarun Prakash unsigned int offset = 0; 1359730ffcbSVarun Prakash int ret = 0; 1369730ffcbSVarun Prakash 1379730ffcbSVarun Prakash for (i = 0; i < npods; i += cnt, pidx += cnt) { 1389730ffcbSVarun Prakash cnt = npods - i; 1399730ffcbSVarun Prakash 1409730ffcbSVarun Prakash if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1419730ffcbSVarun Prakash cnt = ULPMEM_IDATA_MAX_NPPODS; 1429730ffcbSVarun Prakash 1439730ffcbSVarun Prakash ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, 1449730ffcbSVarun Prakash &sg, &offset); 1459730ffcbSVarun Prakash if (ret < 0) 1469730ffcbSVarun Prakash break; 1479730ffcbSVarun Prakash } 1489730ffcbSVarun Prakash 1499730ffcbSVarun Prakash return ret; 1509730ffcbSVarun Prakash } 1519730ffcbSVarun Prakash 1529730ffcbSVarun Prakash static int cxgbit_ddp_sgl_check(struct scatterlist *sg, 1539730ffcbSVarun Prakash unsigned int nents) 1549730ffcbSVarun Prakash { 1559730ffcbSVarun Prakash unsigned int last_sgidx = nents - 1; 1569730ffcbSVarun Prakash unsigned int i; 1579730ffcbSVarun Prakash 1589730ffcbSVarun Prakash for (i = 0; i < nents; i++, sg = sg_next(sg)) { 1599730ffcbSVarun Prakash unsigned int len = sg->length + sg->offset; 1609730ffcbSVarun Prakash 1619730ffcbSVarun Prakash if ((sg->offset & 0x3) || (i && sg->offset) || 1629730ffcbSVarun Prakash ((i != last_sgidx) && (len != PAGE_SIZE))) { 1639730ffcbSVarun Prakash return -EINVAL; 1649730ffcbSVarun Prakash } 1659730ffcbSVarun Prakash } 1669730ffcbSVarun Prakash 1679730ffcbSVarun Prakash return 0; 1689730ffcbSVarun Prakash } 1699730ffcbSVarun Prakash 1709730ffcbSVarun Prakash static int 1719730ffcbSVarun Prakash cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo, 1729730ffcbSVarun Prakash unsigned int xferlen) 1739730ffcbSVarun Prakash { 1749730ffcbSVarun Prakash struct cxgbit_device *cdev = csk->com.cdev; 1759730ffcbSVarun Prakash struct cxgbi_ppm *ppm = cdev2ppm(cdev); 1769730ffcbSVarun Prakash struct scatterlist *sgl = ttinfo->sgl; 1779730ffcbSVarun Prakash unsigned int sgcnt = ttinfo->nents; 1789730ffcbSVarun Prakash unsigned int sg_offset = sgl->offset; 1799730ffcbSVarun Prakash int ret; 1809730ffcbSVarun Prakash 1819730ffcbSVarun Prakash if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) { 1829730ffcbSVarun Prakash pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n", 1839730ffcbSVarun Prakash ppm, ppm->tformat.pgsz_idx_dflt, 1849730ffcbSVarun Prakash xferlen, ttinfo->nents); 1859730ffcbSVarun Prakash return -EINVAL; 1869730ffcbSVarun Prakash } 1879730ffcbSVarun Prakash 1889730ffcbSVarun Prakash if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0) 1899730ffcbSVarun Prakash return -EINVAL; 1909730ffcbSVarun Prakash 1919730ffcbSVarun Prakash ttinfo->nr_pages = (xferlen + sgl->offset + 1929730ffcbSVarun Prakash (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT; 1939730ffcbSVarun Prakash 1949730ffcbSVarun Prakash /* 1959730ffcbSVarun Prakash * the ddp tag will be used for the ttt in the outgoing r2t pdu 1969730ffcbSVarun Prakash */ 1979730ffcbSVarun Prakash ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, 1989730ffcbSVarun Prakash &ttinfo->tag, 0); 1999730ffcbSVarun Prakash if (ret < 0) 2009730ffcbSVarun Prakash return ret; 2019730ffcbSVarun Prakash ttinfo->npods = ret; 2029730ffcbSVarun Prakash 2039730ffcbSVarun Prakash sgl->offset = 0; 2049730ffcbSVarun Prakash ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); 2059730ffcbSVarun Prakash sgl->offset = sg_offset; 2069730ffcbSVarun Prakash if (!ret) { 207cedefa85SVarun Prakash pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", 2089730ffcbSVarun Prakash __func__, 0, xferlen, sgcnt); 2099730ffcbSVarun Prakash goto rel_ppods; 2109730ffcbSVarun Prakash } 2119730ffcbSVarun Prakash 2129730ffcbSVarun Prakash cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, 2139730ffcbSVarun Prakash xferlen, &ttinfo->hdr); 2149730ffcbSVarun Prakash 2159730ffcbSVarun Prakash ret = cxgbit_ddp_set_map(ppm, csk, ttinfo); 2169730ffcbSVarun Prakash if (ret < 0) { 2179730ffcbSVarun Prakash __skb_queue_purge(&csk->ppodq); 2189730ffcbSVarun Prakash dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); 2199730ffcbSVarun Prakash goto rel_ppods; 2209730ffcbSVarun Prakash } 2219730ffcbSVarun Prakash 2229730ffcbSVarun Prakash return 0; 2239730ffcbSVarun Prakash 2249730ffcbSVarun Prakash rel_ppods: 2259730ffcbSVarun Prakash cxgbi_ppm_ppod_release(ppm, ttinfo->idx); 2269730ffcbSVarun Prakash return -EINVAL; 2279730ffcbSVarun Prakash } 2289730ffcbSVarun Prakash 2299730ffcbSVarun Prakash void 2309730ffcbSVarun Prakash cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2319730ffcbSVarun Prakash struct iscsi_r2t *r2t) 2329730ffcbSVarun Prakash { 2339730ffcbSVarun Prakash struct cxgbit_sock *csk = conn->context; 2349730ffcbSVarun Prakash struct cxgbit_device *cdev = csk->com.cdev; 2359730ffcbSVarun Prakash struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); 2369730ffcbSVarun Prakash struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; 2379730ffcbSVarun Prakash int ret = -EINVAL; 2389730ffcbSVarun Prakash 2399730ffcbSVarun Prakash if ((!ccmd->setup_ddp) || 2409730ffcbSVarun Prakash (!test_bit(CSK_DDP_ENABLE, &csk->com.flags))) 2419730ffcbSVarun Prakash goto out; 2429730ffcbSVarun Prakash 2439730ffcbSVarun Prakash ccmd->setup_ddp = false; 2449730ffcbSVarun Prakash 2459730ffcbSVarun Prakash ttinfo->sgl = cmd->se_cmd.t_data_sg; 2469730ffcbSVarun Prakash ttinfo->nents = cmd->se_cmd.t_data_nents; 2479730ffcbSVarun Prakash 2489730ffcbSVarun Prakash ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); 2499730ffcbSVarun Prakash if (ret < 0) { 250cedefa85SVarun Prakash pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", 2519730ffcbSVarun Prakash csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); 2529730ffcbSVarun Prakash 2539730ffcbSVarun Prakash ttinfo->sgl = NULL; 2549730ffcbSVarun Prakash ttinfo->nents = 0; 2559730ffcbSVarun Prakash } else { 2569730ffcbSVarun Prakash ccmd->release = true; 2579730ffcbSVarun Prakash } 2589730ffcbSVarun Prakash out: 2599730ffcbSVarun Prakash pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag); 2609730ffcbSVarun Prakash r2t->targ_xfer_tag = ttinfo->tag; 2619730ffcbSVarun Prakash } 2629730ffcbSVarun Prakash 2631e65cc16SBart Van Assche void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 2649730ffcbSVarun Prakash { 2659730ffcbSVarun Prakash struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); 2669730ffcbSVarun Prakash 2679730ffcbSVarun Prakash if (ccmd->release) { 268*6ecdafaeSVarun Prakash if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 269*6ecdafaeSVarun Prakash put_page(sg_page(&ccmd->sg)); 270*6ecdafaeSVarun Prakash } else { 2719730ffcbSVarun Prakash struct cxgbit_sock *csk = conn->context; 2729730ffcbSVarun Prakash struct cxgbit_device *cdev = csk->com.cdev; 2739730ffcbSVarun Prakash struct cxgbi_ppm *ppm = cdev2ppm(cdev); 274*6ecdafaeSVarun Prakash struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; 2759730ffcbSVarun Prakash 2761ae01724SVarun Prakash /* Abort the TCP conn if DDP is not complete to 2771ae01724SVarun Prakash * avoid any possibility of DDP after freeing 2781ae01724SVarun Prakash * the cmd. 2791ae01724SVarun Prakash */ 2801ae01724SVarun Prakash if (unlikely(cmd->write_data_done != 2811ae01724SVarun Prakash cmd->se_cmd.data_length)) 2821ae01724SVarun Prakash cxgbit_abort_conn(csk); 2831ae01724SVarun Prakash 284*6ecdafaeSVarun Prakash if (unlikely(ttinfo->sgl)) { 2859730ffcbSVarun Prakash dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, 2869730ffcbSVarun Prakash ttinfo->nents, DMA_FROM_DEVICE); 287*6ecdafaeSVarun Prakash ttinfo->nents = 0; 288*6ecdafaeSVarun Prakash ttinfo->sgl = NULL; 2899730ffcbSVarun Prakash } 290*6ecdafaeSVarun Prakash cxgbi_ppm_ppod_release(ppm, ttinfo->idx); 291*6ecdafaeSVarun Prakash } 2929730ffcbSVarun Prakash ccmd->release = false; 2939730ffcbSVarun Prakash } 2949730ffcbSVarun Prakash } 2959730ffcbSVarun Prakash 2969730ffcbSVarun Prakash int cxgbit_ddp_init(struct cxgbit_device *cdev) 2979730ffcbSVarun Prakash { 2989730ffcbSVarun Prakash struct cxgb4_lld_info *lldi = &cdev->lldi; 2999730ffcbSVarun Prakash struct net_device *ndev = cdev->lldi.ports[0]; 3009730ffcbSVarun Prakash struct cxgbi_tag_format tformat; 3019730ffcbSVarun Prakash int ret, i; 3029730ffcbSVarun Prakash 3039730ffcbSVarun Prakash if (!lldi->vr->iscsi.size) { 3049730ffcbSVarun Prakash pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); 3059730ffcbSVarun Prakash return -EACCES; 3069730ffcbSVarun Prakash } 3079730ffcbSVarun Prakash 3089730ffcbSVarun Prakash memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); 3099730ffcbSVarun Prakash for (i = 0; i < 4; i++) 3109730ffcbSVarun Prakash tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) 3119730ffcbSVarun Prakash & 0xF; 3129730ffcbSVarun Prakash cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); 3139730ffcbSVarun Prakash 3149730ffcbSVarun Prakash ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0], 3159730ffcbSVarun Prakash cdev->lldi.pdev, &cdev->lldi, &tformat, 316a248384eSVarun Prakash lldi->vr->iscsi.size, lldi->iscsi_llimit, 317a248384eSVarun Prakash lldi->vr->iscsi.start, 2, 318a248384eSVarun Prakash lldi->vr->ppod_edram.start, 319a248384eSVarun Prakash lldi->vr->ppod_edram.size); 3209730ffcbSVarun Prakash if (ret >= 0) { 3219730ffcbSVarun Prakash struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm); 3229730ffcbSVarun Prakash 3239730ffcbSVarun Prakash if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) && 3249730ffcbSVarun Prakash (ppm->ppmax >= 1024)) 3259730ffcbSVarun Prakash set_bit(CDEV_DDP_ENABLE, &cdev->flags); 3269730ffcbSVarun Prakash ret = 0; 3279730ffcbSVarun Prakash } 3289730ffcbSVarun Prakash 3299730ffcbSVarun Prakash return ret; 3309730ffcbSVarun Prakash } 331