1 /* 2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/vmalloc.h> 35 #include "rxe.h" 36 #include "rxe_loc.h" 37 #include "rxe_queue.h" 38 39 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, 40 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask) 41 { 42 if (srq && srq->error) { 43 pr_warn("srq in error state\n"); 44 goto err1; 45 } 46 47 if (mask & IB_SRQ_MAX_WR) { 48 if (attr->max_wr > rxe->attr.max_srq_wr) { 49 pr_warn("max_wr(%d) > max_srq_wr(%d)\n", 50 attr->max_wr, rxe->attr.max_srq_wr); 51 goto err1; 52 } 53 54 if (attr->max_wr <= 0) { 55 pr_warn("max_wr(%d) <= 0\n", attr->max_wr); 56 goto err1; 57 } 58 59 if (srq && srq->limit && (attr->max_wr < srq->limit)) { 60 pr_warn("max_wr (%d) < srq->limit (%d)\n", 61 attr->max_wr, srq->limit); 62 goto err1; 63 } 64 65 if (attr->max_wr < RXE_MIN_SRQ_WR) 66 attr->max_wr = RXE_MIN_SRQ_WR; 67 } 68 69 if (mask & IB_SRQ_LIMIT) { 70 if (attr->srq_limit > rxe->attr.max_srq_wr) { 71 pr_warn("srq_limit(%d) > max_srq_wr(%d)\n", 72 attr->srq_limit, rxe->attr.max_srq_wr); 73 goto err1; 74 } 75 76 if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) { 77 pr_warn("srq_limit (%d) > cur limit(%d)\n", 78 attr->srq_limit, 79 srq->rq.queue->buf->index_mask); 80 goto err1; 81 } 82 } 83 84 if (mask == IB_SRQ_INIT_MASK) { 85 if (attr->max_sge > rxe->attr.max_srq_sge) { 86 pr_warn("max_sge(%d) > max_srq_sge(%d)\n", 87 attr->max_sge, rxe->attr.max_srq_sge); 88 goto err1; 89 } 90 91 if (attr->max_sge < RXE_MIN_SRQ_SGE) 92 attr->max_sge = RXE_MIN_SRQ_SGE; 93 } 94 95 return 0; 96 97 err1: 98 return -EINVAL; 99 } 100 101 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, 102 struct ib_srq_init_attr *init, 103 struct ib_ucontext *context, 104 struct rxe_create_srq_resp __user *uresp) 105 { 106 int err; 107 int srq_wqe_size; 108 struct rxe_queue *q; 109 110 srq->ibsrq.event_handler = init->event_handler; 111 srq->ibsrq.srq_context = init->srq_context; 112 srq->limit = init->attr.srq_limit; 113 srq->srq_num = srq->pelem.index; 114 srq->rq.max_wr = init->attr.max_wr; 115 srq->rq.max_sge = init->attr.max_sge; 116 117 srq_wqe_size = rcv_wqe_size(srq->rq.max_sge); 118 119 spin_lock_init(&srq->rq.producer_lock); 120 spin_lock_init(&srq->rq.consumer_lock); 121 122 q = rxe_queue_init(rxe, &srq->rq.max_wr, 123 srq_wqe_size); 124 if (!q) { 125 pr_warn("unable to allocate queue for srq\n"); 126 return -ENOMEM; 127 } 128 129 srq->rq.queue = q; 130 131 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf, 132 q->buf_size, &q->ip); 133 if (err) { 134 vfree(q->buf); 135 kfree(q); 136 return err; 137 } 138 139 if (uresp) { 140 if (copy_to_user(&uresp->srq_num, &srq->srq_num, 141 sizeof(uresp->srq_num))) { 142 rxe_queue_cleanup(q); 143 return -EFAULT; 144 } 145 } 146 147 return 0; 148 } 149 150 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, 151 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, 152 struct rxe_modify_srq_cmd *ucmd) 153 { 154 int err; 155 struct rxe_queue *q = srq->rq.queue; 156 struct mminfo __user *mi = NULL; 157 158 if (mask & IB_SRQ_MAX_WR) { 159 /* 160 * This is completely screwed up, the response is supposed to 161 * be in the outbuf not like this. 162 */ 163 mi = u64_to_user_ptr(ucmd->mmap_info_addr); 164 165 err = rxe_queue_resize(q, &attr->max_wr, 166 rcv_wqe_size(srq->rq.max_sge), 167 srq->rq.queue->ip ? 168 srq->rq.queue->ip->context : 169 NULL, 170 mi, &srq->rq.producer_lock, 171 &srq->rq.consumer_lock); 172 if (err) 173 goto err2; 174 } 175 176 if (mask & IB_SRQ_LIMIT) 177 srq->limit = attr->srq_limit; 178 179 return 0; 180 181 err2: 182 rxe_queue_cleanup(q); 183 srq->rq.queue = NULL; 184 return err; 185 } 186