1 /* QLogic qedr NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/pci.h> 33 #include <linux/netdevice.h> 34 #include <linux/list.h> 35 #include <linux/mutex.h> 36 #include <linux/qed/qede_rdma.h> 37 #include "qede.h" 38 39 static struct qedr_driver *qedr_drv; 40 static LIST_HEAD(qedr_dev_list); 41 static DEFINE_MUTEX(qedr_dev_list_lock); 42 43 bool qede_rdma_supported(struct qede_dev *dev) 44 { 45 return dev->dev_info.common.rdma_supported; 46 } 47 48 static void _qede_rdma_dev_add(struct qede_dev *edev) 49 { 50 if (!qedr_drv) 51 return; 52 53 /* Leftovers from previous error recovery */ 54 edev->rdma_info.exp_recovery = false; 55 edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, 56 edev->ndev); 57 } 58 59 static int qede_rdma_create_wq(struct qede_dev *edev) 60 { 61 INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list); 62 kref_init(&edev->rdma_info.refcnt); 63 init_completion(&edev->rdma_info.event_comp); 64 65 edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq"); 66 if (!edev->rdma_info.rdma_wq) { 67 DP_NOTICE(edev, "qedr: Could not create workqueue\n"); 68 return -ENOMEM; 69 } 70 71 return 0; 72 } 73 74 static void qede_rdma_cleanup_event(struct qede_dev *edev) 75 { 76 struct list_head *head = &edev->rdma_info.rdma_event_list; 77 struct qede_rdma_event_work *event_node; 78 79 flush_workqueue(edev->rdma_info.rdma_wq); 80 while (!list_empty(head)) { 81 event_node = list_entry(head->next, struct qede_rdma_event_work, 82 list); 83 cancel_work_sync(&event_node->work); 84 list_del(&event_node->list); 85 kfree(event_node); 86 } 87 } 88 89 static void qede_rdma_complete_event(struct kref *ref) 90 { 91 struct qede_rdma_dev *rdma_dev = 92 container_of(ref, struct qede_rdma_dev, refcnt); 93 94 /* no more events will be added after this */ 95 complete(&rdma_dev->event_comp); 96 } 97 98 static void qede_rdma_destroy_wq(struct qede_dev *edev) 99 { 100 /* Avoid race with add_event flow, make sure it finishes before 101 * we start accessing the list and cleaning up the work 102 */ 103 kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); 104 wait_for_completion(&edev->rdma_info.event_comp); 105 106 qede_rdma_cleanup_event(edev); 107 destroy_workqueue(edev->rdma_info.rdma_wq); 108 edev->rdma_info.rdma_wq = NULL; 109 } 110 111 int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) 112 { 113 int rc; 114 115 if (!qede_rdma_supported(edev)) 116 return 0; 117 118 /* Cannot start qedr while recovering since it wasn't fully stopped */ 119 if (recovery) 120 return 0; 121 122 rc = qede_rdma_create_wq(edev); 123 if (rc) 124 return rc; 125 126 INIT_LIST_HEAD(&edev->rdma_info.entry); 127 mutex_lock(&qedr_dev_list_lock); 128 list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); 129 _qede_rdma_dev_add(edev); 130 mutex_unlock(&qedr_dev_list_lock); 131 132 return rc; 133 } 134 135 static void _qede_rdma_dev_remove(struct qede_dev *edev) 136 { 137 if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) 138 qedr_drv->remove(edev->rdma_info.qedr_dev); 139 } 140 141 void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery) 142 { 143 if (!qede_rdma_supported(edev)) 144 return; 145 146 /* Cannot remove qedr while recovering since it wasn't fully stopped */ 147 if (!recovery) { 148 qede_rdma_destroy_wq(edev); 149 mutex_lock(&qedr_dev_list_lock); 150 if (!edev->rdma_info.exp_recovery) 151 _qede_rdma_dev_remove(edev); 152 edev->rdma_info.qedr_dev = NULL; 153 list_del(&edev->rdma_info.entry); 154 mutex_unlock(&qedr_dev_list_lock); 155 } else { 156 if (!edev->rdma_info.exp_recovery) { 157 mutex_lock(&qedr_dev_list_lock); 158 _qede_rdma_dev_remove(edev); 159 mutex_unlock(&qedr_dev_list_lock); 160 } 161 edev->rdma_info.exp_recovery = true; 162 } 163 } 164 165 static void _qede_rdma_dev_open(struct qede_dev *edev) 166 { 167 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 168 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); 169 } 170 171 static void qede_rdma_dev_open(struct qede_dev *edev) 172 { 173 if (!qede_rdma_supported(edev)) 174 return; 175 176 mutex_lock(&qedr_dev_list_lock); 177 _qede_rdma_dev_open(edev); 178 mutex_unlock(&qedr_dev_list_lock); 179 } 180 181 static void _qede_rdma_dev_close(struct qede_dev *edev) 182 { 183 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 184 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); 185 } 186 187 static void qede_rdma_dev_close(struct qede_dev *edev) 188 { 189 if (!qede_rdma_supported(edev)) 190 return; 191 192 mutex_lock(&qedr_dev_list_lock); 193 _qede_rdma_dev_close(edev); 194 mutex_unlock(&qedr_dev_list_lock); 195 } 196 197 static void qede_rdma_dev_shutdown(struct qede_dev *edev) 198 { 199 if (!qede_rdma_supported(edev)) 200 return; 201 202 mutex_lock(&qedr_dev_list_lock); 203 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 204 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE); 205 mutex_unlock(&qedr_dev_list_lock); 206 } 207 208 int qede_rdma_register_driver(struct qedr_driver *drv) 209 { 210 struct qede_dev *edev; 211 u8 qedr_counter = 0; 212 213 mutex_lock(&qedr_dev_list_lock); 214 if (qedr_drv) { 215 mutex_unlock(&qedr_dev_list_lock); 216 return -EINVAL; 217 } 218 qedr_drv = drv; 219 220 list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { 221 struct net_device *ndev; 222 223 qedr_counter++; 224 _qede_rdma_dev_add(edev); 225 ndev = edev->ndev; 226 if (netif_running(ndev) && netif_oper_up(ndev)) 227 _qede_rdma_dev_open(edev); 228 } 229 mutex_unlock(&qedr_dev_list_lock); 230 231 pr_notice("qedr: discovered and registered %d RDMA funcs\n", 232 qedr_counter); 233 234 return 0; 235 } 236 EXPORT_SYMBOL(qede_rdma_register_driver); 237 238 void qede_rdma_unregister_driver(struct qedr_driver *drv) 239 { 240 struct qede_dev *edev; 241 242 mutex_lock(&qedr_dev_list_lock); 243 list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { 244 /* If device has experienced recovery it was already removed */ 245 if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery) 246 _qede_rdma_dev_remove(edev); 247 } 248 qedr_drv = NULL; 249 mutex_unlock(&qedr_dev_list_lock); 250 } 251 EXPORT_SYMBOL(qede_rdma_unregister_driver); 252 253 static void qede_rdma_changeaddr(struct qede_dev *edev) 254 { 255 if (!qede_rdma_supported(edev)) 256 return; 257 258 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 259 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); 260 } 261 262 static struct qede_rdma_event_work * 263 qede_rdma_get_free_event_node(struct qede_dev *edev) 264 { 265 struct qede_rdma_event_work *event_node = NULL; 266 struct list_head *list_node = NULL; 267 bool found = false; 268 269 list_for_each(list_node, &edev->rdma_info.rdma_event_list) { 270 event_node = list_entry(list_node, struct qede_rdma_event_work, 271 list); 272 if (!work_pending(&event_node->work)) { 273 found = true; 274 break; 275 } 276 } 277 278 if (!found) { 279 event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC); 280 if (!event_node) { 281 DP_NOTICE(edev, 282 "qedr: Could not allocate memory for rdma work\n"); 283 return NULL; 284 } 285 list_add_tail(&event_node->list, 286 &edev->rdma_info.rdma_event_list); 287 } 288 289 return event_node; 290 } 291 292 static void qede_rdma_handle_event(struct work_struct *work) 293 { 294 struct qede_rdma_event_work *event_node; 295 enum qede_rdma_event event; 296 struct qede_dev *edev; 297 298 event_node = container_of(work, struct qede_rdma_event_work, work); 299 event = event_node->event; 300 edev = event_node->ptr; 301 302 switch (event) { 303 case QEDE_UP: 304 qede_rdma_dev_open(edev); 305 break; 306 case QEDE_DOWN: 307 qede_rdma_dev_close(edev); 308 break; 309 case QEDE_CLOSE: 310 qede_rdma_dev_shutdown(edev); 311 break; 312 case QEDE_CHANGE_ADDR: 313 qede_rdma_changeaddr(edev); 314 break; 315 default: 316 DP_NOTICE(edev, "Invalid rdma event %d", event); 317 } 318 } 319 320 static void qede_rdma_add_event(struct qede_dev *edev, 321 enum qede_rdma_event event) 322 { 323 struct qede_rdma_event_work *event_node; 324 325 /* If a recovery was experienced avoid adding the event */ 326 if (edev->rdma_info.exp_recovery) 327 return; 328 329 if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq) 330 return; 331 332 /* We don't want the cleanup flow to start while we're allocating and 333 * scheduling the work 334 */ 335 if (!kref_get_unless_zero(&edev->rdma_info.refcnt)) 336 return; /* already being destroyed */ 337 338 event_node = qede_rdma_get_free_event_node(edev); 339 if (!event_node) 340 goto out; 341 342 event_node->event = event; 343 event_node->ptr = edev; 344 345 INIT_WORK(&event_node->work, qede_rdma_handle_event); 346 queue_work(edev->rdma_info.rdma_wq, &event_node->work); 347 348 out: 349 kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); 350 } 351 352 void qede_rdma_dev_event_open(struct qede_dev *edev) 353 { 354 qede_rdma_add_event(edev, QEDE_UP); 355 } 356 357 void qede_rdma_dev_event_close(struct qede_dev *edev) 358 { 359 qede_rdma_add_event(edev, QEDE_DOWN); 360 } 361 362 void qede_rdma_event_changeaddr(struct qede_dev *edev) 363 { 364 qede_rdma_add_event(edev, QEDE_CHANGE_ADDR); 365 } 366