1 /* QLogic qedr NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/pci.h> 33 #include <linux/netdevice.h> 34 #include <linux/list.h> 35 #include <linux/mutex.h> 36 #include <linux/qed/qede_rdma.h> 37 #include "qede.h" 38 39 static struct qedr_driver *qedr_drv; 40 static LIST_HEAD(qedr_dev_list); 41 static DEFINE_MUTEX(qedr_dev_list_lock); 42 43 bool qede_rdma_supported(struct qede_dev *dev) 44 { 45 return dev->dev_info.common.rdma_supported; 46 } 47 48 static void _qede_rdma_dev_add(struct qede_dev *edev) 49 { 50 if (!qedr_drv) 51 return; 52 53 /* Leftovers from previous error recovery */ 54 edev->rdma_info.exp_recovery = false; 55 edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, 56 edev->ndev); 57 } 58 59 static int qede_rdma_create_wq(struct qede_dev *edev) 60 { 61 INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list); 62 edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq"); 63 if (!edev->rdma_info.rdma_wq) { 64 DP_NOTICE(edev, "qedr: Could not create workqueue\n"); 65 return -ENOMEM; 66 } 67 68 return 0; 69 } 70 71 static void qede_rdma_cleanup_event(struct qede_dev *edev) 72 { 73 struct list_head *head = &edev->rdma_info.rdma_event_list; 74 struct qede_rdma_event_work *event_node; 75 76 flush_workqueue(edev->rdma_info.rdma_wq); 77 while (!list_empty(head)) { 78 event_node = list_entry(head->next, struct qede_rdma_event_work, 79 list); 80 cancel_work_sync(&event_node->work); 81 list_del(&event_node->list); 82 kfree(event_node); 83 } 84 } 85 86 static void qede_rdma_destroy_wq(struct qede_dev *edev) 87 { 88 qede_rdma_cleanup_event(edev); 89 destroy_workqueue(edev->rdma_info.rdma_wq); 90 } 91 92 int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) 93 { 94 int rc; 95 96 if (!qede_rdma_supported(edev)) 97 return 0; 98 99 /* Cannot start qedr while recovering since it wasn't fully stopped */ 100 if (recovery) 101 return 0; 102 103 rc = qede_rdma_create_wq(edev); 104 if (rc) 105 return rc; 106 107 INIT_LIST_HEAD(&edev->rdma_info.entry); 108 mutex_lock(&qedr_dev_list_lock); 109 list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); 110 _qede_rdma_dev_add(edev); 111 mutex_unlock(&qedr_dev_list_lock); 112 113 return rc; 114 } 115 116 static void _qede_rdma_dev_remove(struct qede_dev *edev) 117 { 118 if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) 119 qedr_drv->remove(edev->rdma_info.qedr_dev); 120 } 121 122 void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery) 123 { 124 if (!qede_rdma_supported(edev)) 125 return; 126 127 /* Cannot remove qedr while recovering since it wasn't fully stopped */ 128 if (!recovery) { 129 qede_rdma_destroy_wq(edev); 130 mutex_lock(&qedr_dev_list_lock); 131 if (!edev->rdma_info.exp_recovery) 132 _qede_rdma_dev_remove(edev); 133 edev->rdma_info.qedr_dev = NULL; 134 list_del(&edev->rdma_info.entry); 135 mutex_unlock(&qedr_dev_list_lock); 136 } else { 137 if (!edev->rdma_info.exp_recovery) { 138 mutex_lock(&qedr_dev_list_lock); 139 _qede_rdma_dev_remove(edev); 140 mutex_unlock(&qedr_dev_list_lock); 141 } 142 edev->rdma_info.exp_recovery = true; 143 } 144 } 145 146 static void _qede_rdma_dev_open(struct qede_dev *edev) 147 { 148 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 149 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); 150 } 151 152 static void qede_rdma_dev_open(struct qede_dev *edev) 153 { 154 if (!qede_rdma_supported(edev)) 155 return; 156 157 mutex_lock(&qedr_dev_list_lock); 158 _qede_rdma_dev_open(edev); 159 mutex_unlock(&qedr_dev_list_lock); 160 } 161 162 static void _qede_rdma_dev_close(struct qede_dev *edev) 163 { 164 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 165 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); 166 } 167 168 static void qede_rdma_dev_close(struct qede_dev *edev) 169 { 170 if (!qede_rdma_supported(edev)) 171 return; 172 173 mutex_lock(&qedr_dev_list_lock); 174 _qede_rdma_dev_close(edev); 175 mutex_unlock(&qedr_dev_list_lock); 176 } 177 178 static void qede_rdma_dev_shutdown(struct qede_dev *edev) 179 { 180 if (!qede_rdma_supported(edev)) 181 return; 182 183 mutex_lock(&qedr_dev_list_lock); 184 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 185 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE); 186 mutex_unlock(&qedr_dev_list_lock); 187 } 188 189 int qede_rdma_register_driver(struct qedr_driver *drv) 190 { 191 struct qede_dev *edev; 192 u8 qedr_counter = 0; 193 194 mutex_lock(&qedr_dev_list_lock); 195 if (qedr_drv) { 196 mutex_unlock(&qedr_dev_list_lock); 197 return -EINVAL; 198 } 199 qedr_drv = drv; 200 201 list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { 202 struct net_device *ndev; 203 204 qedr_counter++; 205 _qede_rdma_dev_add(edev); 206 ndev = edev->ndev; 207 if (netif_running(ndev) && netif_oper_up(ndev)) 208 _qede_rdma_dev_open(edev); 209 } 210 mutex_unlock(&qedr_dev_list_lock); 211 212 pr_notice("qedr: discovered and registered %d RDMA funcs\n", 213 qedr_counter); 214 215 return 0; 216 } 217 EXPORT_SYMBOL(qede_rdma_register_driver); 218 219 void qede_rdma_unregister_driver(struct qedr_driver *drv) 220 { 221 struct qede_dev *edev; 222 223 mutex_lock(&qedr_dev_list_lock); 224 list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { 225 /* If device has experienced recovery it was already removed */ 226 if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery) 227 _qede_rdma_dev_remove(edev); 228 } 229 qedr_drv = NULL; 230 mutex_unlock(&qedr_dev_list_lock); 231 } 232 EXPORT_SYMBOL(qede_rdma_unregister_driver); 233 234 static void qede_rdma_changeaddr(struct qede_dev *edev) 235 { 236 if (!qede_rdma_supported(edev)) 237 return; 238 239 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) 240 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); 241 } 242 243 static struct qede_rdma_event_work * 244 qede_rdma_get_free_event_node(struct qede_dev *edev) 245 { 246 struct qede_rdma_event_work *event_node = NULL; 247 struct list_head *list_node = NULL; 248 bool found = false; 249 250 list_for_each(list_node, &edev->rdma_info.rdma_event_list) { 251 event_node = list_entry(list_node, struct qede_rdma_event_work, 252 list); 253 if (!work_pending(&event_node->work)) { 254 found = true; 255 break; 256 } 257 } 258 259 if (!found) { 260 event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC); 261 if (!event_node) { 262 DP_NOTICE(edev, 263 "qedr: Could not allocate memory for rdma work\n"); 264 return NULL; 265 } 266 list_add_tail(&event_node->list, 267 &edev->rdma_info.rdma_event_list); 268 } 269 270 return event_node; 271 } 272 273 static void qede_rdma_handle_event(struct work_struct *work) 274 { 275 struct qede_rdma_event_work *event_node; 276 enum qede_rdma_event event; 277 struct qede_dev *edev; 278 279 event_node = container_of(work, struct qede_rdma_event_work, work); 280 event = event_node->event; 281 edev = event_node->ptr; 282 283 switch (event) { 284 case QEDE_UP: 285 qede_rdma_dev_open(edev); 286 break; 287 case QEDE_DOWN: 288 qede_rdma_dev_close(edev); 289 break; 290 case QEDE_CLOSE: 291 qede_rdma_dev_shutdown(edev); 292 break; 293 case QEDE_CHANGE_ADDR: 294 qede_rdma_changeaddr(edev); 295 break; 296 default: 297 DP_NOTICE(edev, "Invalid rdma event %d", event); 298 } 299 } 300 301 static void qede_rdma_add_event(struct qede_dev *edev, 302 enum qede_rdma_event event) 303 { 304 struct qede_rdma_event_work *event_node; 305 306 /* If a recovery was experienced avoid adding the event */ 307 if (edev->rdma_info.exp_recovery) 308 return; 309 310 if (!edev->rdma_info.qedr_dev) 311 return; 312 313 event_node = qede_rdma_get_free_event_node(edev); 314 if (!event_node) 315 return; 316 317 event_node->event = event; 318 event_node->ptr = edev; 319 320 INIT_WORK(&event_node->work, qede_rdma_handle_event); 321 queue_work(edev->rdma_info.rdma_wq, &event_node->work); 322 } 323 324 void qede_rdma_dev_event_open(struct qede_dev *edev) 325 { 326 qede_rdma_add_event(edev, QEDE_UP); 327 } 328 329 void qede_rdma_dev_event_close(struct qede_dev *edev) 330 { 331 qede_rdma_add_event(edev, QEDE_DOWN); 332 } 333 334 void qede_rdma_event_changeaddr(struct qede_dev *edev) 335 { 336 qede_rdma_add_event(edev, QEDE_CHANGE_ADDR); 337 } 338