1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qedr NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/list.h> 10 #include <linux/mutex.h> 11 #include <linux/qed/qede_rdma.h> 12 #include "qede.h" 13 14 static struct qedr_driver *qedr_drv; 15 static LIST_HEAD(qedr_dev_list); 16 static DEFINE_MUTEX(qedr_dev_list_lock); 17 18 bool qede_rdma_supported(struct qede_dev *dev) 19 { 20 return dev->dev_info.common.rdma_supported; 21 } 22 23 static void _qede_rdma_dev_add(struct qede_dev *edev) 24 { 25 if (!qedr_drv) 26 return; 27 28 /* Leftovers from previous error recovery */ 29 edev->rdma_info.exp_recovery = false; 30 edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, 31 edev->ndev); 32 } 33 34 static int qede_rdma_create_wq(struct qede_dev *edev) 35 { 36 INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list); 37 kref_init(&edev->rdma_info.refcnt); 38 init_completion(&edev->rdma_info.event_comp); 39 40 edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq"); 41 if (!edev->rdma_info.rdma_wq) { 42 DP_NOTICE(edev, "qedr: Could not create workqueue\n"); 43 return -ENOMEM; 44 } 45 46 return 0; 47 } 48 49 static void qede_rdma_cleanup_event(struct qede_dev *edev) 50 { 51 struct list_head *head = &edev->rdma_info.rdma_event_list; 52 struct qede_rdma_event_work *event_node; 53 54 flush_workqueue(edev->rdma_info.rdma_wq); 55 while (!list_empty(head)) { 56 event_node = list_entry(head->next, struct qede_rdma_event_work, 57 list); 58 cancel_work_sync(&event_node->work); 59 list_del(&event_node->list); 60 kfree(event_node); 61 } 62 } 63 64 static void qede_rdma_complete_event(struct kref *ref) 65 { 66 struct qede_rdma_dev *rdma_dev = 67 container_of(ref, struct qede_rdma_dev, refcnt); 68 69 /* no more events will be added after this */ 70 complete(&rdma_dev->event_comp); 71 } 72 73 static void qede_rdma_destroy_wq(struct qede_dev *edev) 74 { 75 /* Avoid race with add_event flow, make sure it finishes before 76 * we start accessing the list and cleaning up the work 77 */ 78 kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); 79 wait_for_completion(&edev->rdma_info.event_comp); 80 81 qede_rdma_cleanup_event(edev); 82 destroy_workqueue(edev->rdma_info.rdma_wq); 83 edev->rdma_info.rdma_wq = NULL; 84 } 85 86 int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) 87 { 88 int rc; 89 90 if (!qede_rdma_supported(edev)) 91 return 0; 92 93 /* Cannot start qedr while recovering since it wasn't fully stopped */ 94 if (recovery) 95 return 0; 96 97 rc = qede_rdma_create_wq(edev); 98 if (rc) 99 return rc; 100 101 INIT_LIST_HEAD(&edev->rdma_info.entry); 102 mutex_lock(&qedr_dev_list_lock); 103 list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); 104 _qede_rdma_dev_add(edev); 105 mutex_unlock(&qedr_dev_list_lock); 106 107 return rc; 108 } 109 110 static void _qede_rdma_dev_remove(struct qede_dev *edev) 111 { 112 if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) 113 qedr_drv->remove(edev->rdma_info.qedr_dev); 114 } 115 116 void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery) 117 { 118 if (!qede_rdma_supported(edev)) 119 return; 120 121 /* Cannot remove qedr while recovering since it wasn't fully stopped */ 122 if (!recovery) { 123 qede_rdma_destroy_wq(edev); 124 mutex_lock(&qedr_dev_list_lock); 125 if (!edev->rdma_info.exp_recovery) 126 _qede_rdma_dev_remove(edev); 127 edev->rdma_info.qedr_dev = NULL; 128 list_del(&edev->rdma_info.entry); 129 mutex_unlock(&qedr_dev_list_lock); 130 } else { 131 if (!edev->rdma_info.exp_recovery) { 132 mutex_lock(&qedr_dev_list_lock); 133 _qede_rdma_dev_remove(edev); 134 mutex_unlock(&qedr_dev_list_lock); 135 } 136 edev->rdma_info.exp_recovery = true; 137 } 138 } 139 140 static void _qede_rdma_dev_open(struct qede_dev *edev) 141 { 142 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 143 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); 144 } 145 146 static void qede_rdma_dev_open(struct qede_dev *edev) 147 { 148 if (!qede_rdma_supported(edev)) 149 return; 150 151 mutex_lock(&qedr_dev_list_lock); 152 _qede_rdma_dev_open(edev); 153 mutex_unlock(&qedr_dev_list_lock); 154 } 155 156 static void _qede_rdma_dev_close(struct qede_dev *edev) 157 { 158 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 159 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); 160 } 161 162 static void qede_rdma_dev_close(struct qede_dev *edev) 163 { 164 if (!qede_rdma_supported(edev)) 165 return; 166 167 mutex_lock(&qedr_dev_list_lock); 168 _qede_rdma_dev_close(edev); 169 mutex_unlock(&qedr_dev_list_lock); 170 } 171 172 static void qede_rdma_dev_shutdown(struct qede_dev *edev) 173 { 174 if (!qede_rdma_supported(edev)) 175 return; 176 177 mutex_lock(&qedr_dev_list_lock); 178 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 179 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE); 180 mutex_unlock(&qedr_dev_list_lock); 181 } 182 183 int qede_rdma_register_driver(struct qedr_driver *drv) 184 { 185 struct qede_dev *edev; 186 u8 qedr_counter = 0; 187 188 mutex_lock(&qedr_dev_list_lock); 189 if (qedr_drv) { 190 mutex_unlock(&qedr_dev_list_lock); 191 return -EINVAL; 192 } 193 qedr_drv = drv; 194 195 list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { 196 struct net_device *ndev; 197 198 qedr_counter++; 199 _qede_rdma_dev_add(edev); 200 ndev = edev->ndev; 201 if (netif_running(ndev) && netif_oper_up(ndev)) 202 _qede_rdma_dev_open(edev); 203 } 204 mutex_unlock(&qedr_dev_list_lock); 205 206 pr_notice("qedr: discovered and registered %d RDMA funcs\n", 207 qedr_counter); 208 209 return 0; 210 } 211 EXPORT_SYMBOL(qede_rdma_register_driver); 212 213 void qede_rdma_unregister_driver(struct qedr_driver *drv) 214 { 215 struct qede_dev *edev; 216 217 mutex_lock(&qedr_dev_list_lock); 218 list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { 219 /* If device has experienced recovery it was already removed */ 220 if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery) 221 _qede_rdma_dev_remove(edev); 222 } 223 qedr_drv = NULL; 224 mutex_unlock(&qedr_dev_list_lock); 225 } 226 EXPORT_SYMBOL(qede_rdma_unregister_driver); 227 228 static void qede_rdma_changeaddr(struct qede_dev *edev) 229 { 230 if (!qede_rdma_supported(edev)) 231 return; 232 233 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 234 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); 235 } 236 237 static struct qede_rdma_event_work * 238 qede_rdma_get_free_event_node(struct qede_dev *edev) 239 { 240 struct qede_rdma_event_work *event_node = NULL; 241 struct list_head *list_node = NULL; 242 bool found = false; 243 244 list_for_each(list_node, &edev->rdma_info.rdma_event_list) { 245 event_node = list_entry(list_node, struct qede_rdma_event_work, 246 list); 247 if (!work_pending(&event_node->work)) { 248 found = true; 249 break; 250 } 251 } 252 253 if (!found) { 254 event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC); 255 if (!event_node) { 256 DP_NOTICE(edev, 257 "qedr: Could not allocate memory for rdma work\n"); 258 return NULL; 259 } 260 list_add_tail(&event_node->list, 261 &edev->rdma_info.rdma_event_list); 262 } 263 264 return event_node; 265 } 266 267 static void qede_rdma_handle_event(struct work_struct *work) 268 { 269 struct qede_rdma_event_work *event_node; 270 enum qede_rdma_event event; 271 struct qede_dev *edev; 272 273 event_node = container_of(work, struct qede_rdma_event_work, work); 274 event = event_node->event; 275 edev = event_node->ptr; 276 277 switch (event) { 278 case QEDE_UP: 279 qede_rdma_dev_open(edev); 280 break; 281 case QEDE_DOWN: 282 qede_rdma_dev_close(edev); 283 break; 284 case QEDE_CLOSE: 285 qede_rdma_dev_shutdown(edev); 286 break; 287 case QEDE_CHANGE_ADDR: 288 qede_rdma_changeaddr(edev); 289 break; 290 default: 291 DP_NOTICE(edev, "Invalid rdma event %d", event); 292 } 293 } 294 295 static void qede_rdma_add_event(struct qede_dev *edev, 296 enum qede_rdma_event event) 297 { 298 struct qede_rdma_event_work *event_node; 299 300 /* If a recovery was experienced avoid adding the event */ 301 if (edev->rdma_info.exp_recovery) 302 return; 303 304 if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq) 305 return; 306 307 /* We don't want the cleanup flow to start while we're allocating and 308 * scheduling the work 309 */ 310 if (!kref_get_unless_zero(&edev->rdma_info.refcnt)) 311 return; /* already being destroyed */ 312 313 event_node = qede_rdma_get_free_event_node(edev); 314 if (!event_node) 315 goto out; 316 317 event_node->event = event; 318 event_node->ptr = edev; 319 320 INIT_WORK(&event_node->work, qede_rdma_handle_event); 321 queue_work(edev->rdma_info.rdma_wq, &event_node->work); 322 323 out: 324 kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); 325 } 326 327 void qede_rdma_dev_event_open(struct qede_dev *edev) 328 { 329 qede_rdma_add_event(edev, QEDE_UP); 330 } 331 332 void qede_rdma_dev_event_close(struct qede_dev *edev) 333 { 334 qede_rdma_add_event(edev, QEDE_DOWN); 335 } 336 337 void qede_rdma_event_changeaddr(struct qede_dev *edev) 338 { 339 qede_rdma_add_event(edev, QEDE_CHANGE_ADDR); 340 } 341