1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/vmalloc.h> 8 #include "rxe.h" 9 #include "rxe_queue.h" 10 11 int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init) 12 { 13 struct ib_srq_attr *attr = &init->attr; 14 15 if (attr->max_wr > rxe->attr.max_srq_wr) { 16 rxe_dbg(rxe, "max_wr(%d) > max_srq_wr(%d)\n", 17 attr->max_wr, rxe->attr.max_srq_wr); 18 goto err1; 19 } 20 21 if (attr->max_wr <= 0) { 22 rxe_dbg(rxe, "max_wr(%d) <= 0\n", attr->max_wr); 23 goto err1; 24 } 25 26 if (attr->max_wr < RXE_MIN_SRQ_WR) 27 attr->max_wr = RXE_MIN_SRQ_WR; 28 29 if (attr->max_sge > rxe->attr.max_srq_sge) { 30 rxe_dbg(rxe, "max_sge(%d) > max_srq_sge(%d)\n", 31 attr->max_sge, rxe->attr.max_srq_sge); 32 goto err1; 33 } 34 35 if (attr->max_sge < RXE_MIN_SRQ_SGE) 36 attr->max_sge = RXE_MIN_SRQ_SGE; 37 38 return 0; 39 40 err1: 41 return -EINVAL; 42 } 43 44 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, 45 struct ib_srq_init_attr *init, struct ib_udata *udata, 46 struct rxe_create_srq_resp __user *uresp) 47 { 48 int err; 49 int srq_wqe_size; 50 struct rxe_queue *q; 51 enum queue_type type; 52 53 srq->ibsrq.event_handler = init->event_handler; 54 srq->ibsrq.srq_context = init->srq_context; 55 srq->limit = init->attr.srq_limit; 56 srq->srq_num = srq->elem.index; 57 srq->rq.max_wr = init->attr.max_wr; 58 srq->rq.max_sge = init->attr.max_sge; 59 60 srq_wqe_size = rcv_wqe_size(srq->rq.max_sge); 61 62 spin_lock_init(&srq->rq.producer_lock); 63 spin_lock_init(&srq->rq.consumer_lock); 64 65 type = QUEUE_TYPE_FROM_CLIENT; 66 q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type); 67 if (!q) { 68 rxe_dbg_srq(srq, "Unable to allocate queue\n"); 69 return -ENOMEM; 70 } 71 72 srq->rq.queue = q; 73 74 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf, 75 q->buf_size, &q->ip); 76 if (err) { 77 vfree(q->buf); 78 kfree(q); 79 return err; 80 } 81 82 if (uresp) { 83 if (copy_to_user(&uresp->srq_num, &srq->srq_num, 84 sizeof(uresp->srq_num))) { 85 rxe_queue_cleanup(q); 86 return -EFAULT; 87 } 88 } 89 90 return 0; 91 } 92 93 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, 94 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask) 95 { 96 if (srq->error) { 97 rxe_dbg_srq(srq, "in error state\n"); 98 goto err1; 99 } 100 101 if (mask & IB_SRQ_MAX_WR) { 102 if (attr->max_wr > rxe->attr.max_srq_wr) { 103 rxe_dbg_srq(srq, "max_wr(%d) > max_srq_wr(%d)\n", 104 attr->max_wr, rxe->attr.max_srq_wr); 105 goto err1; 106 } 107 108 if (attr->max_wr <= 0) { 109 rxe_dbg_srq(srq, "max_wr(%d) <= 0\n", attr->max_wr); 110 goto err1; 111 } 112 113 if (srq->limit && (attr->max_wr < srq->limit)) { 114 rxe_dbg_srq(srq, "max_wr (%d) < srq->limit (%d)\n", 115 attr->max_wr, srq->limit); 116 goto err1; 117 } 118 119 if (attr->max_wr < RXE_MIN_SRQ_WR) 120 attr->max_wr = RXE_MIN_SRQ_WR; 121 } 122 123 if (mask & IB_SRQ_LIMIT) { 124 if (attr->srq_limit > rxe->attr.max_srq_wr) { 125 rxe_dbg_srq(srq, "srq_limit(%d) > max_srq_wr(%d)\n", 126 attr->srq_limit, rxe->attr.max_srq_wr); 127 goto err1; 128 } 129 130 if (attr->srq_limit > srq->rq.queue->buf->index_mask) { 131 rxe_dbg_srq(srq, "srq_limit (%d) > cur limit(%d)\n", 132 attr->srq_limit, 133 srq->rq.queue->buf->index_mask); 134 goto err1; 135 } 136 } 137 138 return 0; 139 140 err1: 141 return -EINVAL; 142 } 143 144 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, 145 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, 146 struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata) 147 { 148 int err; 149 struct rxe_queue *q = srq->rq.queue; 150 struct mminfo __user *mi = NULL; 151 152 if (mask & IB_SRQ_MAX_WR) { 153 /* 154 * This is completely screwed up, the response is supposed to 155 * be in the outbuf not like this. 156 */ 157 mi = u64_to_user_ptr(ucmd->mmap_info_addr); 158 159 err = rxe_queue_resize(q, &attr->max_wr, 160 rcv_wqe_size(srq->rq.max_sge), udata, mi, 161 &srq->rq.producer_lock, 162 &srq->rq.consumer_lock); 163 if (err) 164 goto err2; 165 } 166 167 if (mask & IB_SRQ_LIMIT) 168 srq->limit = attr->srq_limit; 169 170 return 0; 171 172 err2: 173 rxe_queue_cleanup(q); 174 srq->rq.queue = NULL; 175 return err; 176 } 177 178 void rxe_srq_cleanup(struct rxe_pool_elem *elem) 179 { 180 struct rxe_srq *srq = container_of(elem, typeof(*srq), elem); 181 182 if (srq->pd) 183 rxe_put(srq->pd); 184 185 if (srq->rq.queue) 186 rxe_queue_cleanup(srq->rq.queue); 187 } 188