1 /* 2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of EITHER the GNU General Public License 6 * version 2 as published by the Free Software Foundation or the BSD 7 * 2-Clause License. This program is distributed in the hope that it 8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED 9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. 10 * See the GNU General Public License version 2 for more details at 11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program available in the file COPYING in the main 15 * directory of this source tree. 16 * 17 * The BSD 2-Clause License 18 * 19 * Redistribution and use in source and binary forms, with or 20 * without modification, are permitted provided that the following 21 * conditions are met: 22 * 23 * - Redistributions of source code must retain the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer. 26 * 27 * - Redistributions in binary form must reproduce the above 28 * copyright notice, this list of conditions and the following 29 * disclaimer in the documentation and/or other materials 30 * provided with the distribution. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 */ 45 46 #include <asm/page.h> 47 #include <linux/io.h> 48 #include <linux/wait.h> 49 #include <rdma/ib_addr.h> 50 #include <rdma/ib_smi.h> 51 #include <rdma/ib_user_verbs.h> 52 #include <rdma/uverbs_ioctl.h> 53 54 #include "pvrdma.h" 55 56 /** 57 * pvrdma_req_notify_cq - request notification for a completion queue 58 * @ibcq: the completion queue 59 * @notify_flags: notification flags 60 * 61 * @return: 0 for success. 62 */ 63 int pvrdma_req_notify_cq(struct ib_cq *ibcq, 64 enum ib_cq_notify_flags notify_flags) 65 { 66 struct pvrdma_dev *dev = to_vdev(ibcq->device); 67 struct pvrdma_cq *cq = to_vcq(ibcq); 68 u32 val = cq->cq_handle; 69 unsigned long flags; 70 int has_data = 0; 71 72 val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 73 PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM; 74 75 spin_lock_irqsave(&cq->cq_lock, flags); 76 77 pvrdma_write_uar_cq(dev, val); 78 79 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) { 80 unsigned int head; 81 82 has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, 83 cq->ibcq.cqe, &head); 84 if (unlikely(has_data == PVRDMA_INVALID_IDX)) 85 dev_err(&dev->pdev->dev, "CQ ring state invalid\n"); 86 } 87 88 spin_unlock_irqrestore(&cq->cq_lock, flags); 89 90 return has_data; 91 } 92 93 /** 94 * pvrdma_create_cq - create completion queue 95 * @ibdev: the device 96 * @attr: completion queue attributes 97 * @udata: user data 98 * 99 * @return: ib_cq completion queue pointer on success, 100 * otherwise returns negative errno. 101 */ 102 struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, 103 const struct ib_cq_init_attr *attr, 104 struct ib_udata *udata) 105 { 106 int entries = attr->cqe; 107 struct pvrdma_dev *dev = to_vdev(ibdev); 108 struct pvrdma_cq *cq; 109 int ret; 110 int npages; 111 unsigned long flags; 112 union pvrdma_cmd_req req; 113 union pvrdma_cmd_resp rsp; 114 struct pvrdma_cmd_create_cq *cmd = &req.create_cq; 115 struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; 116 struct pvrdma_create_cq_resp cq_resp = {0}; 117 struct pvrdma_create_cq ucmd; 118 struct pvrdma_ucontext *context = rdma_udata_to_drv_context( 119 udata, struct pvrdma_ucontext, ibucontext); 120 121 BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); 122 123 entries = roundup_pow_of_two(entries); 124 if (entries < 1 || entries > dev->dsr->caps.max_cqe) 125 return ERR_PTR(-EINVAL); 126 127 if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq)) 128 return ERR_PTR(-ENOMEM); 129 130 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 131 if (!cq) { 132 atomic_dec(&dev->num_cqs); 133 return ERR_PTR(-ENOMEM); 134 } 135 136 cq->ibcq.cqe = entries; 137 cq->is_kernel = !udata; 138 139 if (!cq->is_kernel) { 140 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { 141 ret = -EFAULT; 142 goto err_cq; 143 } 144 145 cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 146 IB_ACCESS_LOCAL_WRITE, 1); 147 if (IS_ERR(cq->umem)) { 148 ret = PTR_ERR(cq->umem); 149 goto err_cq; 150 } 151 152 npages = ib_umem_page_count(cq->umem); 153 } else { 154 /* One extra page for shared ring state */ 155 npages = 1 + (entries * sizeof(struct pvrdma_cqe) + 156 PAGE_SIZE - 1) / PAGE_SIZE; 157 158 /* Skip header page. */ 159 cq->offset = PAGE_SIZE; 160 } 161 162 if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) { 163 dev_warn(&dev->pdev->dev, 164 "overflow pages in completion queue\n"); 165 ret = -EINVAL; 166 goto err_umem; 167 } 168 169 ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel); 170 if (ret) { 171 dev_warn(&dev->pdev->dev, 172 "could not allocate page directory\n"); 173 goto err_umem; 174 } 175 176 /* Ring state is always the first page. Set in library for user cq. */ 177 if (cq->is_kernel) 178 cq->ring_state = cq->pdir.pages[0]; 179 else 180 pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0); 181 182 refcount_set(&cq->refcnt, 1); 183 init_completion(&cq->free); 184 spin_lock_init(&cq->cq_lock); 185 186 memset(cmd, 0, sizeof(*cmd)); 187 cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ; 188 cmd->nchunks = npages; 189 cmd->ctx_handle = context ? context->ctx_handle : 0; 190 cmd->cqe = entries; 191 cmd->pdir_dma = cq->pdir.dir_dma; 192 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP); 193 if (ret < 0) { 194 dev_warn(&dev->pdev->dev, 195 "could not create completion queue, error: %d\n", ret); 196 goto err_page_dir; 197 } 198 199 cq->ibcq.cqe = resp->cqe; 200 cq->cq_handle = resp->cq_handle; 201 cq_resp.cqn = resp->cq_handle; 202 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 203 dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; 204 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 205 206 if (!cq->is_kernel) { 207 cq->uar = &context->uar; 208 209 /* Copy udata back. */ 210 if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { 211 dev_warn(&dev->pdev->dev, 212 "failed to copy back udata\n"); 213 pvrdma_destroy_cq(&cq->ibcq, udata); 214 return ERR_PTR(-EINVAL); 215 } 216 } 217 218 return &cq->ibcq; 219 220 err_page_dir: 221 pvrdma_page_dir_cleanup(dev, &cq->pdir); 222 err_umem: 223 if (!cq->is_kernel) 224 ib_umem_release(cq->umem); 225 err_cq: 226 atomic_dec(&dev->num_cqs); 227 kfree(cq); 228 229 return ERR_PTR(ret); 230 } 231 232 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) 233 { 234 if (refcount_dec_and_test(&cq->refcnt)) 235 complete(&cq->free); 236 wait_for_completion(&cq->free); 237 238 if (!cq->is_kernel) 239 ib_umem_release(cq->umem); 240 241 pvrdma_page_dir_cleanup(dev, &cq->pdir); 242 kfree(cq); 243 } 244 245 /** 246 * pvrdma_destroy_cq - destroy completion queue 247 * @cq: the completion queue to destroy. 248 * @udata: user data or null for kernel object 249 * 250 * @return: 0 for success. 251 */ 252 int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) 253 { 254 struct pvrdma_cq *vcq = to_vcq(cq); 255 union pvrdma_cmd_req req; 256 struct pvrdma_cmd_destroy_cq *cmd = &req.destroy_cq; 257 struct pvrdma_dev *dev = to_vdev(cq->device); 258 unsigned long flags; 259 int ret; 260 261 memset(cmd, 0, sizeof(*cmd)); 262 cmd->hdr.cmd = PVRDMA_CMD_DESTROY_CQ; 263 cmd->cq_handle = vcq->cq_handle; 264 265 ret = pvrdma_cmd_post(dev, &req, NULL, 0); 266 if (ret < 0) 267 dev_warn(&dev->pdev->dev, 268 "could not destroy completion queue, error: %d\n", 269 ret); 270 271 /* free cq's resources */ 272 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 273 dev->cq_tbl[vcq->cq_handle] = NULL; 274 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 275 276 pvrdma_free_cq(dev, vcq); 277 atomic_dec(&dev->num_cqs); 278 279 return ret; 280 } 281 282 static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i) 283 { 284 return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr( 285 &cq->pdir, 286 cq->offset + 287 sizeof(struct pvrdma_cqe) * i); 288 } 289 290 void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq) 291 { 292 unsigned int head; 293 int has_data; 294 295 if (!cq->is_kernel) 296 return; 297 298 /* Lock held */ 299 has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, 300 cq->ibcq.cqe, &head); 301 if (unlikely(has_data > 0)) { 302 int items; 303 int curr; 304 int tail = pvrdma_idx(&cq->ring_state->rx.prod_tail, 305 cq->ibcq.cqe); 306 struct pvrdma_cqe *cqe; 307 struct pvrdma_cqe *curr_cqe; 308 309 items = (tail > head) ? (tail - head) : 310 (cq->ibcq.cqe - head + tail); 311 curr = --tail; 312 while (items-- > 0) { 313 if (curr < 0) 314 curr = cq->ibcq.cqe - 1; 315 if (tail < 0) 316 tail = cq->ibcq.cqe - 1; 317 curr_cqe = get_cqe(cq, curr); 318 if ((curr_cqe->qp & 0xFFFF) != qp->qp_handle) { 319 if (curr != tail) { 320 cqe = get_cqe(cq, tail); 321 *cqe = *curr_cqe; 322 } 323 tail--; 324 } else { 325 pvrdma_idx_ring_inc( 326 &cq->ring_state->rx.cons_head, 327 cq->ibcq.cqe); 328 } 329 curr--; 330 } 331 } 332 } 333 334 static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp, 335 struct ib_wc *wc) 336 { 337 struct pvrdma_dev *dev = to_vdev(cq->ibcq.device); 338 int has_data; 339 unsigned int head; 340 bool tried = false; 341 struct pvrdma_cqe *cqe; 342 343 retry: 344 has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, 345 cq->ibcq.cqe, &head); 346 if (has_data == 0) { 347 if (tried) 348 return -EAGAIN; 349 350 pvrdma_write_uar_cq(dev, cq->cq_handle | PVRDMA_UAR_CQ_POLL); 351 352 tried = true; 353 goto retry; 354 } else if (has_data == PVRDMA_INVALID_IDX) { 355 dev_err(&dev->pdev->dev, "CQ ring state invalid\n"); 356 return -EAGAIN; 357 } 358 359 cqe = get_cqe(cq, head); 360 361 /* Ensure cqe is valid. */ 362 rmb(); 363 if (dev->qp_tbl[cqe->qp & 0xffff]) 364 *cur_qp = (struct pvrdma_qp *)dev->qp_tbl[cqe->qp & 0xffff]; 365 else 366 return -EAGAIN; 367 368 wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode); 369 wc->status = pvrdma_wc_status_to_ib(cqe->status); 370 wc->wr_id = cqe->wr_id; 371 wc->qp = &(*cur_qp)->ibqp; 372 wc->byte_len = cqe->byte_len; 373 wc->ex.imm_data = cqe->imm_data; 374 wc->src_qp = cqe->src_qp; 375 wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags); 376 wc->pkey_index = cqe->pkey_index; 377 wc->slid = cqe->slid; 378 wc->sl = cqe->sl; 379 wc->dlid_path_bits = cqe->dlid_path_bits; 380 wc->port_num = cqe->port_num; 381 wc->vendor_err = cqe->vendor_err; 382 wc->network_hdr_type = cqe->network_hdr_type; 383 384 /* Update shared ring state */ 385 pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe); 386 387 return 0; 388 } 389 390 /** 391 * pvrdma_poll_cq - poll for work completion queue entries 392 * @ibcq: completion queue 393 * @num_entries: the maximum number of entries 394 * @entry: pointer to work completion array 395 * 396 * @return: number of polled completion entries 397 */ 398 int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 399 { 400 struct pvrdma_cq *cq = to_vcq(ibcq); 401 struct pvrdma_qp *cur_qp = NULL; 402 unsigned long flags; 403 int npolled; 404 405 if (num_entries < 1 || wc == NULL) 406 return 0; 407 408 spin_lock_irqsave(&cq->cq_lock, flags); 409 for (npolled = 0; npolled < num_entries; ++npolled) { 410 if (pvrdma_poll_one(cq, &cur_qp, wc + npolled)) 411 break; 412 } 413 414 spin_unlock_irqrestore(&cq->cq_lock, flags); 415 416 /* Ensure we do not return errors from poll_cq */ 417 return npolled; 418 } 419