1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 HGST, a Western Digital Company. 4 */ 5 #include <linux/module.h> 6 #include <linux/err.h> 7 #include <linux/slab.h> 8 #include <rdma/ib_verbs.h> 9 10 /* # of WCs to poll for with a single call to ib_poll_cq */ 11 #define IB_POLL_BATCH 16 12 #define IB_POLL_BATCH_DIRECT 8 13 14 /* # of WCs to iterate over before yielding */ 15 #define IB_POLL_BUDGET_IRQ 256 16 #define IB_POLL_BUDGET_WORKQUEUE 65536 17 18 #define IB_POLL_FLAGS \ 19 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) 20 21 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, 22 int batch) 23 { 24 int i, n, completed = 0; 25 26 /* 27 * budget might be (-1) if the caller does not 28 * want to bound this call, thus we need unsigned 29 * minimum here. 30 */ 31 while ((n = ib_poll_cq(cq, min_t(u32, batch, 32 budget - completed), wcs)) > 0) { 33 for (i = 0; i < n; i++) { 34 struct ib_wc *wc = &wcs[i]; 35 36 if (wc->wr_cqe) 37 wc->wr_cqe->done(cq, wc); 38 else 39 WARN_ON_ONCE(wc->status == IB_WC_SUCCESS); 40 } 41 42 completed += n; 43 44 if (n != batch || (budget != -1 && completed >= budget)) 45 break; 46 } 47 48 return completed; 49 } 50 51 /** 52 * ib_process_direct_cq - process a CQ in caller context 53 * @cq: CQ to process 54 * @budget: number of CQEs to poll for 55 * 56 * This function is used to process all outstanding CQ entries. 57 * It does not offload CQ processing to a different context and does 58 * not ask for completion interrupts from the HCA. 59 * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger 60 * concurrent processing. 61 * 62 * Note: do not pass -1 as %budget unless it is guaranteed that the number 63 * of completions that will be processed is small. 64 */ 65 int ib_process_cq_direct(struct ib_cq *cq, int budget) 66 { 67 struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; 68 69 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); 70 } 71 EXPORT_SYMBOL(ib_process_cq_direct); 72 73 static void ib_cq_completion_direct(struct ib_cq *cq, void *private) 74 { 75 WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); 76 } 77 78 static int ib_poll_handler(struct irq_poll *iop, int budget) 79 { 80 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); 81 int completed; 82 83 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); 84 if (completed < budget) { 85 irq_poll_complete(&cq->iop); 86 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 87 irq_poll_sched(&cq->iop); 88 } 89 90 return completed; 91 } 92 93 static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) 94 { 95 irq_poll_sched(&cq->iop); 96 } 97 98 static void ib_cq_poll_work(struct work_struct *work) 99 { 100 struct ib_cq *cq = container_of(work, struct ib_cq, work); 101 int completed; 102 103 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, 104 IB_POLL_BATCH); 105 if (completed >= IB_POLL_BUDGET_WORKQUEUE || 106 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 107 queue_work(cq->comp_wq, &cq->work); 108 } 109 110 static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) 111 { 112 queue_work(cq->comp_wq, &cq->work); 113 } 114 115 /** 116 * __ib_alloc_cq - allocate a completion queue 117 * @dev: device to allocate the CQ for 118 * @private: driver private data, accessible from cq->cq_context 119 * @nr_cqe: number of CQEs to allocate 120 * @comp_vector: HCA completion vectors for this CQ 121 * @poll_ctx: context to poll the CQ from. 122 * @caller: module owner name. 123 * @udata: Valid user data or NULL for kernel object 124 * 125 * This is the proper interface to allocate a CQ for in-kernel users. A 126 * CQ allocated with this interface will automatically be polled from the 127 * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id 128 * to use this CQ abstraction. 129 */ 130 struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, 131 int nr_cqe, int comp_vector, 132 enum ib_poll_context poll_ctx, 133 const char *caller, struct ib_udata *udata) 134 { 135 struct ib_cq_init_attr cq_attr = { 136 .cqe = nr_cqe, 137 .comp_vector = comp_vector, 138 }; 139 struct ib_cq *cq; 140 int ret = -ENOMEM; 141 142 cq = dev->ops.create_cq(dev, &cq_attr, NULL); 143 if (IS_ERR(cq)) 144 return cq; 145 146 cq->device = dev; 147 cq->uobject = NULL; 148 cq->event_handler = NULL; 149 cq->cq_context = private; 150 cq->poll_ctx = poll_ctx; 151 atomic_set(&cq->usecnt, 0); 152 153 cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); 154 if (!cq->wc) 155 goto out_destroy_cq; 156 157 cq->res.type = RDMA_RESTRACK_CQ; 158 rdma_restrack_set_task(&cq->res, caller); 159 rdma_restrack_kadd(&cq->res); 160 161 switch (cq->poll_ctx) { 162 case IB_POLL_DIRECT: 163 cq->comp_handler = ib_cq_completion_direct; 164 break; 165 case IB_POLL_SOFTIRQ: 166 cq->comp_handler = ib_cq_completion_softirq; 167 168 irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler); 169 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 170 break; 171 case IB_POLL_WORKQUEUE: 172 case IB_POLL_UNBOUND_WORKQUEUE: 173 cq->comp_handler = ib_cq_completion_workqueue; 174 INIT_WORK(&cq->work, ib_cq_poll_work); 175 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 176 cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ? 177 ib_comp_wq : ib_comp_unbound_wq; 178 break; 179 default: 180 ret = -EINVAL; 181 goto out_free_wc; 182 } 183 184 return cq; 185 186 out_free_wc: 187 kfree(cq->wc); 188 rdma_restrack_del(&cq->res); 189 out_destroy_cq: 190 cq->device->ops.destroy_cq(cq, udata); 191 return ERR_PTR(ret); 192 } 193 EXPORT_SYMBOL(__ib_alloc_cq_user); 194 195 /** 196 * ib_free_cq - free a completion queue 197 * @cq: completion queue to free. 198 * @udata: User data or NULL for kernel object 199 */ 200 void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) 201 { 202 int ret; 203 204 if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) 205 return; 206 207 switch (cq->poll_ctx) { 208 case IB_POLL_DIRECT: 209 break; 210 case IB_POLL_SOFTIRQ: 211 irq_poll_disable(&cq->iop); 212 break; 213 case IB_POLL_WORKQUEUE: 214 case IB_POLL_UNBOUND_WORKQUEUE: 215 cancel_work_sync(&cq->work); 216 break; 217 default: 218 WARN_ON_ONCE(1); 219 } 220 221 kfree(cq->wc); 222 rdma_restrack_del(&cq->res); 223 ret = cq->device->ops.destroy_cq(cq, udata); 224 WARN_ON_ONCE(ret); 225 } 226 EXPORT_SYMBOL(ib_free_cq_user); 227