1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/cpumask.h> 3 #include <linux/dma-mapping.h> 4 #include <linux/dmapool.h> 5 #include <linux/delay.h> 6 #include <linux/gfp.h> 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/pci_regs.h> 10 #include <linux/vmalloc.h> 11 #include <linux/pci.h> 12 13 #include "nitrox_dev.h" 14 #include "nitrox_common.h" 15 #include "nitrox_req.h" 16 #include "nitrox_csr.h" 17 18 #define CRYPTO_CTX_SIZE 256 19 20 /* packet inuput ring alignments */ 21 #define PKTIN_Q_ALIGN_BYTES 16 22 23 static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) 24 { 25 struct nitrox_device *ndev = cmdq->ndev; 26 27 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; 28 cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize, 29 &cmdq->unalign_dma, 30 GFP_KERNEL); 31 if (!cmdq->unalign_base) 32 return -ENOMEM; 33 34 cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes); 35 cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma); 36 cmdq->write_idx = 0; 37 38 spin_lock_init(&cmdq->cmd_qlock); 39 spin_lock_init(&cmdq->resp_qlock); 40 spin_lock_init(&cmdq->backlog_qlock); 41 42 INIT_LIST_HEAD(&cmdq->response_head); 43 INIT_LIST_HEAD(&cmdq->backlog_head); 44 INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work); 45 46 atomic_set(&cmdq->pending_count, 0); 47 atomic_set(&cmdq->backlog_count, 0); 48 return 0; 49 } 50 51 static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq) 52 { 53 cmdq->write_idx = 0; 54 atomic_set(&cmdq->pending_count, 0); 55 atomic_set(&cmdq->backlog_count, 0); 56 } 57 58 static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq) 59 { 60 struct nitrox_device *ndev = cmdq->ndev; 61 62 if (!cmdq->unalign_base) 63 return; 64 65 cancel_work_sync(&cmdq->backlog_qflush); 66 67 dma_free_coherent(DEV(ndev), cmdq->qsize, 68 cmdq->unalign_base, cmdq->unalign_dma); 69 nitrox_cmdq_reset(cmdq); 70 71 cmdq->dbell_csr_addr = NULL; 72 cmdq->compl_cnt_csr_addr = NULL; 73 cmdq->unalign_base = NULL; 74 cmdq->base = NULL; 75 cmdq->unalign_dma = 0; 76 cmdq->dma = 0; 77 cmdq->qsize = 0; 78 cmdq->instr_size = 0; 79 } 80 81 static void nitrox_free_pktin_queues(struct nitrox_device *ndev) 82 { 83 int i; 84 85 for (i = 0; i < ndev->nr_queues; i++) { 86 struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i]; 87 88 nitrox_cmdq_cleanup(cmdq); 89 } 90 kfree(ndev->pkt_inq); 91 ndev->pkt_inq = NULL; 92 } 93 94 static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev) 95 { 96 int i, err; 97 98 ndev->pkt_inq = kcalloc_node(ndev->nr_queues, 99 sizeof(struct nitrox_cmdq), 100 GFP_KERNEL, ndev->node); 101 if (!ndev->pkt_inq) 102 return -ENOMEM; 103 104 for (i = 0; i < ndev->nr_queues; i++) { 105 struct nitrox_cmdq *cmdq; 106 u64 offset; 107 108 cmdq = &ndev->pkt_inq[i]; 109 cmdq->ndev = ndev; 110 cmdq->qno = i; 111 cmdq->instr_size = sizeof(struct nps_pkt_instr); 112 113 /* packet input ring doorbell address */ 114 offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i); 115 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset); 116 /* packet solicit port completion count address */ 117 offset = NPS_PKT_SLC_CNTSX(i); 118 cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset); 119 120 err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES); 121 if (err) 122 goto pktq_fail; 123 } 124 return 0; 125 126 pktq_fail: 127 nitrox_free_pktin_queues(ndev); 128 return err; 129 } 130 131 static int create_crypto_dma_pool(struct nitrox_device *ndev) 132 { 133 size_t size; 134 135 /* Crypto context pool, 16 byte aligned */ 136 size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr); 137 ndev->ctx_pool = dma_pool_create("nitrox-context", 138 DEV(ndev), size, 16, 0); 139 if (!ndev->ctx_pool) 140 return -ENOMEM; 141 142 return 0; 143 } 144 145 static void destroy_crypto_dma_pool(struct nitrox_device *ndev) 146 { 147 if (!ndev->ctx_pool) 148 return; 149 150 dma_pool_destroy(ndev->ctx_pool); 151 ndev->ctx_pool = NULL; 152 } 153 154 /* 155 * crypto_alloc_context - Allocate crypto context from pool 156 * @ndev: NITROX Device 157 */ 158 void *crypto_alloc_context(struct nitrox_device *ndev) 159 { 160 struct ctx_hdr *ctx; 161 void *vaddr; 162 dma_addr_t dma; 163 164 vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma); 165 if (!vaddr) 166 return NULL; 167 168 /* fill meta data */ 169 ctx = vaddr; 170 ctx->pool = ndev->ctx_pool; 171 ctx->dma = dma; 172 ctx->ctx_dma = dma + sizeof(struct ctx_hdr); 173 174 return ((u8 *)vaddr + sizeof(struct ctx_hdr)); 175 } 176 177 /** 178 * crypto_free_context - Free crypto context to pool 179 * @ctx: context to free 180 */ 181 void crypto_free_context(void *ctx) 182 { 183 struct ctx_hdr *ctxp; 184 185 if (!ctx) 186 return; 187 188 ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr)); 189 dma_pool_free(ctxp->pool, ctxp, ctxp->dma); 190 } 191 192 /** 193 * nitrox_common_sw_init - allocate software resources. 194 * @ndev: NITROX device 195 * 196 * Allocates crypto context pools and command queues etc. 197 * 198 * Return: 0 on success, or a negative error code on error. 199 */ 200 int nitrox_common_sw_init(struct nitrox_device *ndev) 201 { 202 int err = 0; 203 204 /* per device crypto context pool */ 205 err = create_crypto_dma_pool(ndev); 206 if (err) 207 return err; 208 209 err = nitrox_alloc_pktin_queues(ndev); 210 if (err) 211 destroy_crypto_dma_pool(ndev); 212 213 return err; 214 } 215 216 /** 217 * nitrox_common_sw_cleanup - free software resources. 218 * @ndev: NITROX device 219 */ 220 void nitrox_common_sw_cleanup(struct nitrox_device *ndev) 221 { 222 nitrox_free_pktin_queues(ndev); 223 destroy_crypto_dma_pool(ndev); 224 } 225