1 /* 2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/interrupt.h> 35 #include <linux/slab.h> 36 #include <linux/export.h> 37 #include <linux/mm.h> 38 #include <linux/dma-mapping.h> 39 40 #include <linux/mlx4/cmd.h> 41 #include <linux/cpu_rmap.h> 42 43 #include "mlx4.h" 44 #include "fw.h" 45 46 enum { 47 MLX4_IRQNAME_SIZE = 32 48 }; 49 50 enum { 51 MLX4_NUM_ASYNC_EQE = 0x100, 52 MLX4_NUM_SPARE_EQE = 0x80, 53 MLX4_EQ_ENTRY_SIZE = 0x20 54 }; 55 56 #define MLX4_EQ_STATUS_OK ( 0 << 28) 57 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 58 #define MLX4_EQ_OWNER_SW ( 0 << 24) 59 #define MLX4_EQ_OWNER_HW ( 1 << 24) 60 #define MLX4_EQ_FLAG_EC ( 1 << 18) 61 #define MLX4_EQ_FLAG_OI ( 1 << 17) 62 #define MLX4_EQ_STATE_ARMED ( 9 << 8) 63 #define MLX4_EQ_STATE_FIRED (10 << 8) 64 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) 65 66 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ 67 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ 68 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ 69 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ 70 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ 71 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ 72 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ 73 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 74 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 75 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ 76 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ 77 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 78 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ 79 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ 80 (1ull << MLX4_EVENT_TYPE_CMD) | \ 81 (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \ 82 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ 83 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ 84 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) 85 86 static u64 get_async_ev_mask(struct mlx4_dev *dev) 87 { 88 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; 89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) 90 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); 91 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) 92 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT); 93 94 return async_ev_mask; 95 } 96 97 static void eq_set_ci(struct mlx4_eq *eq, int req_not) 98 { 99 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | 100 req_not << 31), 101 eq->doorbell); 102 /* We still want ordering, just not swabbing, so add a barrier */ 103 mb(); 104 } 105 106 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, 107 u8 eqe_size) 108 { 109 /* (entry & (eq->nent - 1)) gives us a cyclic array */ 110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; 111 /* CX3 is capable of extending the EQE from 32 to 64 bytes with 112 * strides of 64B,128B and 256B. 113 * When 64B EQE is used, the first (in the lower addresses) 114 * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes 115 * contain the legacy EQE information. 116 * In all other cases, the first 32B contains the legacy EQE info. 117 */ 118 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; 119 } 120 121 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) 122 { 123 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); 124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; 125 } 126 127 static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq) 128 { 129 struct mlx4_eqe *eqe = 130 &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)]; 131 return (!!(eqe->owner & 0x80) ^ 132 !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ? 133 eqe : NULL; 134 } 135 136 void mlx4_gen_slave_eqe(struct work_struct *work) 137 { 138 struct mlx4_mfunc_master_ctx *master = 139 container_of(work, struct mlx4_mfunc_master_ctx, 140 slave_event_work); 141 struct mlx4_mfunc *mfunc = 142 container_of(master, struct mlx4_mfunc, master); 143 struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); 144 struct mlx4_dev *dev = &priv->dev; 145 struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; 146 struct mlx4_eqe *eqe; 147 u8 slave; 148 int i; 149 150 for (eqe = next_slave_event_eqe(slave_eq); eqe; 151 eqe = next_slave_event_eqe(slave_eq)) { 152 slave = eqe->slave_id; 153 154 /* All active slaves need to receive the event */ 155 if (slave == ALL_SLAVES) { 156 for (i = 0; i <= dev->persist->num_vfs; i++) { 157 if (mlx4_GEN_EQE(dev, i, eqe)) 158 mlx4_warn(dev, "Failed to generate event for slave %d\n", 159 i); 160 } 161 } else { 162 if (mlx4_GEN_EQE(dev, slave, eqe)) 163 mlx4_warn(dev, "Failed to generate event for slave %d\n", 164 slave); 165 } 166 ++slave_eq->cons; 167 } 168 } 169 170 171 static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) 172 { 173 struct mlx4_priv *priv = mlx4_priv(dev); 174 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; 175 struct mlx4_eqe *s_eqe; 176 unsigned long flags; 177 178 spin_lock_irqsave(&slave_eq->event_lock, flags); 179 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; 180 if ((!!(s_eqe->owner & 0x80)) ^ 181 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { 182 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n", 183 slave); 184 spin_unlock_irqrestore(&slave_eq->event_lock, flags); 185 return; 186 } 187 188 memcpy(s_eqe, eqe, dev->caps.eqe_size - 1); 189 s_eqe->slave_id = slave; 190 /* ensure all information is written before setting the ownersip bit */ 191 dma_wmb(); 192 s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; 193 ++slave_eq->prod; 194 195 queue_work(priv->mfunc.master.comm_wq, 196 &priv->mfunc.master.slave_event_work); 197 spin_unlock_irqrestore(&slave_eq->event_lock, flags); 198 } 199 200 static void mlx4_slave_event(struct mlx4_dev *dev, int slave, 201 struct mlx4_eqe *eqe) 202 { 203 struct mlx4_priv *priv = mlx4_priv(dev); 204 205 if (slave < 0 || slave > dev->persist->num_vfs || 206 slave == dev->caps.function || 207 !priv->mfunc.master.slave_state[slave].active) 208 return; 209 210 slave_event(dev, slave, eqe); 211 } 212 213 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) 214 { 215 struct mlx4_eqe eqe; 216 217 struct mlx4_priv *priv = mlx4_priv(dev); 218 struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave]; 219 220 if (!s_slave->active) 221 return 0; 222 223 memset(&eqe, 0, sizeof eqe); 224 225 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; 226 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE; 227 eqe.event.port_mgmt_change.port = port; 228 229 return mlx4_GEN_EQE(dev, slave, &eqe); 230 } 231 EXPORT_SYMBOL(mlx4_gen_pkey_eqe); 232 233 int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) 234 { 235 struct mlx4_eqe eqe; 236 237 /*don't send if we don't have the that slave */ 238 if (dev->persist->num_vfs < slave) 239 return 0; 240 memset(&eqe, 0, sizeof eqe); 241 242 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; 243 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO; 244 eqe.event.port_mgmt_change.port = port; 245 246 return mlx4_GEN_EQE(dev, slave, &eqe); 247 } 248 EXPORT_SYMBOL(mlx4_gen_guid_change_eqe); 249 250 int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, 251 u8 port_subtype_change) 252 { 253 struct mlx4_eqe eqe; 254 255 /*don't send if we don't have the that slave */ 256 if (dev->persist->num_vfs < slave) 257 return 0; 258 memset(&eqe, 0, sizeof eqe); 259 260 eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE; 261 eqe.subtype = port_subtype_change; 262 eqe.event.port_change.port = cpu_to_be32(port << 28); 263 264 mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__, 265 port_subtype_change, slave, port); 266 return mlx4_GEN_EQE(dev, slave, &eqe); 267 } 268 EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe); 269 270 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port) 271 { 272 struct mlx4_priv *priv = mlx4_priv(dev); 273 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 274 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); 275 276 if (slave >= dev->num_slaves || port > dev->caps.num_ports || 277 port <= 0 || !test_bit(port - 1, actv_ports.ports)) { 278 pr_err("%s: Error: asking for slave:%d, port:%d\n", 279 __func__, slave, port); 280 return SLAVE_PORT_DOWN; 281 } 282 return s_state[slave].port_state[port]; 283 } 284 EXPORT_SYMBOL(mlx4_get_slave_port_state); 285 286 static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, 287 enum slave_port_state state) 288 { 289 struct mlx4_priv *priv = mlx4_priv(dev); 290 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 291 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); 292 293 if (slave >= dev->num_slaves || port > dev->caps.num_ports || 294 port <= 0 || !test_bit(port - 1, actv_ports.ports)) { 295 pr_err("%s: Error: asking for slave:%d, port:%d\n", 296 __func__, slave, port); 297 return -1; 298 } 299 s_state[slave].port_state[port] = state; 300 301 return 0; 302 } 303 304 static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) 305 { 306 int i; 307 enum slave_port_gen_event gen_event; 308 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, 309 port); 310 311 for (i = 0; i < dev->persist->num_vfs + 1; i++) 312 if (test_bit(i, slaves_pport.slaves)) 313 set_and_calc_slave_port_state(dev, i, port, 314 event, &gen_event); 315 } 316 /************************************************************************** 317 The function get as input the new event to that port, 318 and according to the prev state change the slave's port state. 319 The events are: 320 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, 321 MLX4_PORT_STATE_DEV_EVENT_PORT_UP 322 MLX4_PORT_STATE_IB_EVENT_GID_VALID 323 MLX4_PORT_STATE_IB_EVENT_GID_INVALID 324 ***************************************************************************/ 325 int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, 326 u8 port, int event, 327 enum slave_port_gen_event *gen_event) 328 { 329 struct mlx4_priv *priv = mlx4_priv(dev); 330 struct mlx4_slave_state *ctx = NULL; 331 unsigned long flags; 332 int ret = -1; 333 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); 334 enum slave_port_state cur_state = 335 mlx4_get_slave_port_state(dev, slave, port); 336 337 *gen_event = SLAVE_PORT_GEN_EVENT_NONE; 338 339 if (slave >= dev->num_slaves || port > dev->caps.num_ports || 340 port <= 0 || !test_bit(port - 1, actv_ports.ports)) { 341 pr_err("%s: Error: asking for slave:%d, port:%d\n", 342 __func__, slave, port); 343 return ret; 344 } 345 346 ctx = &priv->mfunc.master.slave_state[slave]; 347 spin_lock_irqsave(&ctx->lock, flags); 348 349 switch (cur_state) { 350 case SLAVE_PORT_DOWN: 351 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) 352 mlx4_set_slave_port_state(dev, slave, port, 353 SLAVE_PENDING_UP); 354 break; 355 case SLAVE_PENDING_UP: 356 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) 357 mlx4_set_slave_port_state(dev, slave, port, 358 SLAVE_PORT_DOWN); 359 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) { 360 mlx4_set_slave_port_state(dev, slave, port, 361 SLAVE_PORT_UP); 362 *gen_event = SLAVE_PORT_GEN_EVENT_UP; 363 } 364 break; 365 case SLAVE_PORT_UP: 366 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) { 367 mlx4_set_slave_port_state(dev, slave, port, 368 SLAVE_PORT_DOWN); 369 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; 370 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID == 371 event) { 372 mlx4_set_slave_port_state(dev, slave, port, 373 SLAVE_PENDING_UP); 374 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; 375 } 376 break; 377 default: 378 pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n", 379 __func__, slave, port); 380 goto out; 381 } 382 ret = mlx4_get_slave_port_state(dev, slave, port); 383 384 out: 385 spin_unlock_irqrestore(&ctx->lock, flags); 386 return ret; 387 } 388 389 EXPORT_SYMBOL(set_and_calc_slave_port_state); 390 391 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr) 392 { 393 struct mlx4_eqe eqe; 394 395 memset(&eqe, 0, sizeof eqe); 396 397 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; 398 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO; 399 eqe.event.port_mgmt_change.port = port; 400 eqe.event.port_mgmt_change.params.port_info.changed_attr = 401 cpu_to_be32((u32) attr); 402 403 slave_event(dev, ALL_SLAVES, &eqe); 404 return 0; 405 } 406 EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev); 407 408 void mlx4_master_handle_slave_flr(struct work_struct *work) 409 { 410 struct mlx4_mfunc_master_ctx *master = 411 container_of(work, struct mlx4_mfunc_master_ctx, 412 slave_flr_event_work); 413 struct mlx4_mfunc *mfunc = 414 container_of(master, struct mlx4_mfunc, master); 415 struct mlx4_priv *priv = 416 container_of(mfunc, struct mlx4_priv, mfunc); 417 struct mlx4_dev *dev = &priv->dev; 418 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 419 int i; 420 int err; 421 unsigned long flags; 422 423 mlx4_dbg(dev, "mlx4_handle_slave_flr\n"); 424 425 for (i = 0 ; i < dev->num_slaves; i++) { 426 427 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { 428 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n", 429 i); 430 /* In case of 'Reset flow' FLR can be generated for 431 * a slave before mlx4_load_one is done. 432 * make sure interface is up before trying to delete 433 * slave resources which weren't allocated yet. 434 */ 435 if (dev->persist->interface_state & 436 MLX4_INTERFACE_STATE_UP) 437 mlx4_delete_all_resources_for_slave(dev, i); 438 /*return the slave to running mode*/ 439 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 440 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; 441 slave_state[i].is_slave_going_down = 0; 442 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 443 /*notify the FW:*/ 444 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, 445 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 446 if (err) 447 mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n", 448 i); 449 } 450 } 451 } 452 453 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) 454 { 455 struct mlx4_priv *priv = mlx4_priv(dev); 456 struct mlx4_eqe *eqe; 457 int cqn = -1; 458 int eqes_found = 0; 459 int set_ci = 0; 460 int port; 461 int slave = 0; 462 int ret; 463 u32 flr_slave; 464 u8 update_slave_state; 465 int i; 466 enum slave_port_gen_event gen_event; 467 unsigned long flags; 468 struct mlx4_vport_state *s_info; 469 int eqe_size = dev->caps.eqe_size; 470 471 while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) { 472 /* 473 * Make sure we read EQ entry contents after we've 474 * checked the ownership bit. 475 */ 476 dma_rmb(); 477 478 switch (eqe->type) { 479 case MLX4_EVENT_TYPE_COMP: 480 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; 481 mlx4_cq_completion(dev, cqn); 482 break; 483 484 case MLX4_EVENT_TYPE_PATH_MIG: 485 case MLX4_EVENT_TYPE_COMM_EST: 486 case MLX4_EVENT_TYPE_SQ_DRAINED: 487 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: 488 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: 489 case MLX4_EVENT_TYPE_PATH_MIG_FAILED: 490 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 491 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: 492 mlx4_dbg(dev, "event %d arrived\n", eqe->type); 493 if (mlx4_is_master(dev)) { 494 /* forward only to slave owning the QP */ 495 ret = mlx4_get_slave_from_resource_id(dev, 496 RES_QP, 497 be32_to_cpu(eqe->event.qp.qpn) 498 & 0xffffff, &slave); 499 if (ret && ret != -ENOENT) { 500 mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", 501 eqe->type, eqe->subtype, 502 eq->eqn, eq->cons_index, ret); 503 break; 504 } 505 506 if (!ret && slave != dev->caps.function) { 507 mlx4_slave_event(dev, slave, eqe); 508 break; 509 } 510 511 } 512 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 513 0xffffff, eqe->type); 514 break; 515 516 case MLX4_EVENT_TYPE_SRQ_LIMIT: 517 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", 518 __func__); 519 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: 520 if (mlx4_is_master(dev)) { 521 /* forward only to slave owning the SRQ */ 522 ret = mlx4_get_slave_from_resource_id(dev, 523 RES_SRQ, 524 be32_to_cpu(eqe->event.srq.srqn) 525 & 0xffffff, 526 &slave); 527 if (ret && ret != -ENOENT) { 528 mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", 529 eqe->type, eqe->subtype, 530 eq->eqn, eq->cons_index, ret); 531 break; 532 } 533 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", 534 __func__, slave, 535 be32_to_cpu(eqe->event.srq.srqn), 536 eqe->type, eqe->subtype); 537 538 if (!ret && slave != dev->caps.function) { 539 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", 540 __func__, eqe->type, 541 eqe->subtype, slave); 542 mlx4_slave_event(dev, slave, eqe); 543 break; 544 } 545 } 546 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 547 0xffffff, eqe->type); 548 break; 549 550 case MLX4_EVENT_TYPE_CMD: 551 mlx4_cmd_event(dev, 552 be16_to_cpu(eqe->event.cmd.token), 553 eqe->event.cmd.status, 554 be64_to_cpu(eqe->event.cmd.out_param)); 555 break; 556 557 case MLX4_EVENT_TYPE_PORT_CHANGE: { 558 struct mlx4_slaves_pport slaves_port; 559 port = be32_to_cpu(eqe->event.port_change.port) >> 28; 560 slaves_port = mlx4_phys_to_slaves_pport(dev, port); 561 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { 562 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, 563 port); 564 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 565 if (!mlx4_is_master(dev)) 566 break; 567 for (i = 0; i < dev->persist->num_vfs + 1; 568 i++) { 569 if (!test_bit(i, slaves_port.slaves)) 570 continue; 571 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 572 if (i == mlx4_master_func_num(dev)) 573 continue; 574 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", 575 __func__, i, port); 576 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 577 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 578 eqe->event.port_change.port = 579 cpu_to_be32( 580 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) 581 | (mlx4_phys_to_slave_port(dev, i, port) << 28)); 582 mlx4_slave_event(dev, i, eqe); 583 } 584 } else { /* IB port */ 585 set_and_calc_slave_port_state(dev, i, port, 586 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, 587 &gen_event); 588 /*we can be in pending state, then do not send port_down event*/ 589 if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) { 590 if (i == mlx4_master_func_num(dev)) 591 continue; 592 mlx4_slave_event(dev, i, eqe); 593 } 594 } 595 } 596 } else { 597 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port); 598 599 mlx4_priv(dev)->sense.do_sense_port[port] = 0; 600 601 if (!mlx4_is_master(dev)) 602 break; 603 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 604 for (i = 0; 605 i < dev->persist->num_vfs + 1; 606 i++) { 607 if (!test_bit(i, slaves_port.slaves)) 608 continue; 609 if (i == mlx4_master_func_num(dev)) 610 continue; 611 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 612 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 613 eqe->event.port_change.port = 614 cpu_to_be32( 615 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) 616 | (mlx4_phys_to_slave_port(dev, i, port) << 28)); 617 mlx4_slave_event(dev, i, eqe); 618 } 619 } 620 else /* IB port */ 621 /* port-up event will be sent to a slave when the 622 * slave's alias-guid is set. This is done in alias_GUID.c 623 */ 624 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); 625 } 626 break; 627 } 628 629 case MLX4_EVENT_TYPE_CQ_ERROR: 630 mlx4_warn(dev, "CQ %s on CQN %06x\n", 631 eqe->event.cq_err.syndrome == 1 ? 632 "overrun" : "access violation", 633 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); 634 if (mlx4_is_master(dev)) { 635 ret = mlx4_get_slave_from_resource_id(dev, 636 RES_CQ, 637 be32_to_cpu(eqe->event.cq_err.cqn) 638 & 0xffffff, &slave); 639 if (ret && ret != -ENOENT) { 640 mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", 641 eqe->type, eqe->subtype, 642 eq->eqn, eq->cons_index, ret); 643 break; 644 } 645 646 if (!ret && slave != dev->caps.function) { 647 mlx4_slave_event(dev, slave, eqe); 648 break; 649 } 650 } 651 mlx4_cq_event(dev, 652 be32_to_cpu(eqe->event.cq_err.cqn) 653 & 0xffffff, 654 eqe->type); 655 break; 656 657 case MLX4_EVENT_TYPE_EQ_OVERFLOW: 658 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); 659 break; 660 661 case MLX4_EVENT_TYPE_OP_REQUIRED: 662 atomic_inc(&priv->opreq_count); 663 /* FW commands can't be executed from interrupt context 664 * working in deferred task 665 */ 666 queue_work(mlx4_wq, &priv->opreq_task); 667 break; 668 669 case MLX4_EVENT_TYPE_COMM_CHANNEL: 670 if (!mlx4_is_master(dev)) { 671 mlx4_warn(dev, "Received comm channel event for non master device\n"); 672 break; 673 } 674 memcpy(&priv->mfunc.master.comm_arm_bit_vector, 675 eqe->event.comm_channel_arm.bit_vec, 676 sizeof eqe->event.comm_channel_arm.bit_vec); 677 queue_work(priv->mfunc.master.comm_wq, 678 &priv->mfunc.master.comm_work); 679 break; 680 681 case MLX4_EVENT_TYPE_FLR_EVENT: 682 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); 683 if (!mlx4_is_master(dev)) { 684 mlx4_warn(dev, "Non-master function received FLR event\n"); 685 break; 686 } 687 688 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); 689 690 if (flr_slave >= dev->num_slaves) { 691 mlx4_warn(dev, 692 "Got FLR for unknown function: %d\n", 693 flr_slave); 694 update_slave_state = 0; 695 } else 696 update_slave_state = 1; 697 698 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 699 if (update_slave_state) { 700 priv->mfunc.master.slave_state[flr_slave].active = false; 701 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR; 702 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; 703 } 704 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 705 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, 706 flr_slave); 707 queue_work(priv->mfunc.master.comm_wq, 708 &priv->mfunc.master.slave_flr_event_work); 709 break; 710 711 case MLX4_EVENT_TYPE_FATAL_WARNING: 712 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { 713 if (mlx4_is_master(dev)) 714 for (i = 0; i < dev->num_slaves; i++) { 715 mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n", 716 __func__, i); 717 if (i == dev->caps.function) 718 continue; 719 mlx4_slave_event(dev, i, eqe); 720 } 721 mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n", 722 be16_to_cpu(eqe->event.warming.warning_threshold), 723 be16_to_cpu(eqe->event.warming.current_temperature)); 724 } else 725 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n", 726 eqe->type, eqe->subtype, eq->eqn, 727 eq->cons_index, eqe->owner, eq->nent, 728 eqe->slave_id, 729 !!(eqe->owner & 0x80) ^ 730 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); 731 732 break; 733 734 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: 735 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, 736 (unsigned long) eqe); 737 break; 738 739 case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: 740 switch (eqe->subtype) { 741 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE: 742 mlx4_warn(dev, "Bad cable detected on port %u\n", 743 eqe->event.bad_cable.port); 744 break; 745 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE: 746 mlx4_warn(dev, "Unsupported cable detected\n"); 747 break; 748 default: 749 mlx4_dbg(dev, 750 "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n", 751 eqe->type, eqe->subtype, eq->eqn, 752 eq->cons_index, eqe->owner, eq->nent, 753 !!(eqe->owner & 0x80) ^ 754 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); 755 break; 756 } 757 break; 758 759 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 760 case MLX4_EVENT_TYPE_ECC_DETECT: 761 default: 762 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n", 763 eqe->type, eqe->subtype, eq->eqn, 764 eq->cons_index, eqe->owner, eq->nent, 765 eqe->slave_id, 766 !!(eqe->owner & 0x80) ^ 767 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); 768 break; 769 }; 770 771 ++eq->cons_index; 772 eqes_found = 1; 773 ++set_ci; 774 775 /* 776 * The HCA will think the queue has overflowed if we 777 * don't tell it we've been processing events. We 778 * create our EQs with MLX4_NUM_SPARE_EQE extra 779 * entries, so we must update our consumer index at 780 * least that often. 781 */ 782 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { 783 eq_set_ci(eq, 0); 784 set_ci = 0; 785 } 786 } 787 788 eq_set_ci(eq, 1); 789 790 /* cqn is 24bit wide but is initialized such that its higher bits 791 * are ones too. Thus, if we got any event, cqn's high bits should be off 792 * and we need to schedule the tasklet. 793 */ 794 if (!(cqn & ~0xffffff)) 795 tasklet_schedule(&eq->tasklet_ctx.task); 796 797 return eqes_found; 798 } 799 800 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) 801 { 802 struct mlx4_dev *dev = dev_ptr; 803 struct mlx4_priv *priv = mlx4_priv(dev); 804 int work = 0; 805 int i; 806 807 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); 808 809 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 810 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); 811 812 return IRQ_RETVAL(work); 813 } 814 815 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) 816 { 817 struct mlx4_eq *eq = eq_ptr; 818 struct mlx4_dev *dev = eq->dev; 819 820 mlx4_eq_int(dev, eq); 821 822 /* MSI-X vectors always belong to us */ 823 return IRQ_HANDLED; 824 } 825 826 int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, 827 struct mlx4_vhcr *vhcr, 828 struct mlx4_cmd_mailbox *inbox, 829 struct mlx4_cmd_mailbox *outbox, 830 struct mlx4_cmd_info *cmd) 831 { 832 struct mlx4_priv *priv = mlx4_priv(dev); 833 struct mlx4_slave_event_eq_info *event_eq = 834 priv->mfunc.master.slave_state[slave].event_eq; 835 u32 in_modifier = vhcr->in_modifier; 836 u32 eqn = in_modifier & 0x3FF; 837 u64 in_param = vhcr->in_param; 838 int err = 0; 839 int i; 840 841 if (slave == dev->caps.function) 842 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, 843 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, 844 MLX4_CMD_NATIVE); 845 if (!err) 846 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) 847 if (in_param & (1LL << i)) 848 event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn; 849 850 return err; 851 } 852 853 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, 854 int eq_num) 855 { 856 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, 857 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, 858 MLX4_CMD_WRAPPED); 859 } 860 861 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 862 int eq_num) 863 { 864 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, 865 MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, 866 MLX4_CMD_WRAPPED); 867 } 868 869 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num) 870 { 871 return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ, 872 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 873 } 874 875 static int mlx4_num_eq_uar(struct mlx4_dev *dev) 876 { 877 /* 878 * Each UAR holds 4 EQ doorbells. To figure out how many UARs 879 * we need to map, take the difference of highest index and 880 * the lowest index we'll use and add 1. 881 */ 882 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + 883 dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; 884 } 885 886 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) 887 { 888 struct mlx4_priv *priv = mlx4_priv(dev); 889 int index; 890 891 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; 892 893 if (!priv->eq_table.uar_map[index]) { 894 priv->eq_table.uar_map[index] = 895 ioremap(pci_resource_start(dev->persist->pdev, 2) + 896 ((eq->eqn / 4) << PAGE_SHIFT), 897 PAGE_SIZE); 898 if (!priv->eq_table.uar_map[index]) { 899 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", 900 eq->eqn); 901 return NULL; 902 } 903 } 904 905 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); 906 } 907 908 static void mlx4_unmap_uar(struct mlx4_dev *dev) 909 { 910 struct mlx4_priv *priv = mlx4_priv(dev); 911 int i; 912 913 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 914 if (priv->eq_table.uar_map[i]) { 915 iounmap(priv->eq_table.uar_map[i]); 916 priv->eq_table.uar_map[i] = NULL; 917 } 918 } 919 920 static int mlx4_create_eq(struct mlx4_dev *dev, int nent, 921 u8 intr, struct mlx4_eq *eq) 922 { 923 struct mlx4_priv *priv = mlx4_priv(dev); 924 struct mlx4_cmd_mailbox *mailbox; 925 struct mlx4_eq_context *eq_context; 926 int npages; 927 u64 *dma_list = NULL; 928 dma_addr_t t; 929 u64 mtt_addr; 930 int err = -ENOMEM; 931 int i; 932 933 eq->dev = dev; 934 eq->nent = roundup_pow_of_two(max(nent, 2)); 935 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with 936 * strides of 64B,128B and 256B. 937 */ 938 npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; 939 940 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 941 GFP_KERNEL); 942 if (!eq->page_list) 943 goto err_out; 944 945 for (i = 0; i < npages; ++i) 946 eq->page_list[i].buf = NULL; 947 948 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 949 if (!dma_list) 950 goto err_out_free; 951 952 mailbox = mlx4_alloc_cmd_mailbox(dev); 953 if (IS_ERR(mailbox)) 954 goto err_out_free; 955 eq_context = mailbox->buf; 956 957 for (i = 0; i < npages; ++i) { 958 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> 959 pdev->dev, 960 PAGE_SIZE, &t, 961 GFP_KERNEL); 962 if (!eq->page_list[i].buf) 963 goto err_out_free_pages; 964 965 dma_list[i] = t; 966 eq->page_list[i].map = t; 967 968 memset(eq->page_list[i].buf, 0, PAGE_SIZE); 969 } 970 971 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); 972 if (eq->eqn == -1) 973 goto err_out_free_pages; 974 975 eq->doorbell = mlx4_get_eq_uar(dev, eq); 976 if (!eq->doorbell) { 977 err = -ENOMEM; 978 goto err_out_free_eq; 979 } 980 981 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); 982 if (err) 983 goto err_out_free_eq; 984 985 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); 986 if (err) 987 goto err_out_free_mtt; 988 989 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | 990 MLX4_EQ_STATE_ARMED); 991 eq_context->log_eq_size = ilog2(eq->nent); 992 eq_context->intr = intr; 993 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; 994 995 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); 996 eq_context->mtt_base_addr_h = mtt_addr >> 32; 997 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 998 999 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); 1000 if (err) { 1001 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); 1002 goto err_out_free_mtt; 1003 } 1004 1005 kfree(dma_list); 1006 mlx4_free_cmd_mailbox(dev, mailbox); 1007 1008 eq->cons_index = 0; 1009 1010 INIT_LIST_HEAD(&eq->tasklet_ctx.list); 1011 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); 1012 spin_lock_init(&eq->tasklet_ctx.lock); 1013 tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb, 1014 (unsigned long)&eq->tasklet_ctx); 1015 1016 return err; 1017 1018 err_out_free_mtt: 1019 mlx4_mtt_cleanup(dev, &eq->mtt); 1020 1021 err_out_free_eq: 1022 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); 1023 1024 err_out_free_pages: 1025 for (i = 0; i < npages; ++i) 1026 if (eq->page_list[i].buf) 1027 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, 1028 eq->page_list[i].buf, 1029 eq->page_list[i].map); 1030 1031 mlx4_free_cmd_mailbox(dev, mailbox); 1032 1033 err_out_free: 1034 kfree(eq->page_list); 1035 kfree(dma_list); 1036 1037 err_out: 1038 return err; 1039 } 1040 1041 static void mlx4_free_eq(struct mlx4_dev *dev, 1042 struct mlx4_eq *eq) 1043 { 1044 struct mlx4_priv *priv = mlx4_priv(dev); 1045 int err; 1046 int i; 1047 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with 1048 * strides of 64B,128B and 256B 1049 */ 1050 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; 1051 1052 err = mlx4_HW2SW_EQ(dev, eq->eqn); 1053 if (err) 1054 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); 1055 1056 synchronize_irq(eq->irq); 1057 tasklet_disable(&eq->tasklet_ctx.task); 1058 1059 mlx4_mtt_cleanup(dev, &eq->mtt); 1060 for (i = 0; i < npages; ++i) 1061 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, 1062 eq->page_list[i].buf, 1063 eq->page_list[i].map); 1064 1065 kfree(eq->page_list); 1066 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); 1067 } 1068 1069 static void mlx4_free_irqs(struct mlx4_dev *dev) 1070 { 1071 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 1072 struct mlx4_priv *priv = mlx4_priv(dev); 1073 int i, vec; 1074 1075 if (eq_table->have_irq) 1076 free_irq(dev->persist->pdev->irq, dev); 1077 1078 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 1079 if (eq_table->eq[i].have_irq) { 1080 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 1081 eq_table->eq[i].have_irq = 0; 1082 } 1083 1084 for (i = 0; i < dev->caps.comp_pool; i++) { 1085 /* 1086 * Freeing the assigned irq's 1087 * all bits should be 0, but we need to validate 1088 */ 1089 if (priv->msix_ctl.pool_bm & 1ULL << i) { 1090 /* NO need protecting*/ 1091 vec = dev->caps.num_comp_vectors + 1 + i; 1092 free_irq(priv->eq_table.eq[vec].irq, 1093 &priv->eq_table.eq[vec]); 1094 } 1095 } 1096 1097 1098 kfree(eq_table->irq_names); 1099 } 1100 1101 static int mlx4_map_clr_int(struct mlx4_dev *dev) 1102 { 1103 struct mlx4_priv *priv = mlx4_priv(dev); 1104 1105 priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev, 1106 priv->fw.clr_int_bar) + 1107 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 1108 if (!priv->clr_base) { 1109 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); 1110 return -ENOMEM; 1111 } 1112 1113 return 0; 1114 } 1115 1116 static void mlx4_unmap_clr_int(struct mlx4_dev *dev) 1117 { 1118 struct mlx4_priv *priv = mlx4_priv(dev); 1119 1120 iounmap(priv->clr_base); 1121 } 1122 1123 int mlx4_alloc_eq_table(struct mlx4_dev *dev) 1124 { 1125 struct mlx4_priv *priv = mlx4_priv(dev); 1126 1127 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, 1128 sizeof *priv->eq_table.eq, GFP_KERNEL); 1129 if (!priv->eq_table.eq) 1130 return -ENOMEM; 1131 1132 return 0; 1133 } 1134 1135 void mlx4_free_eq_table(struct mlx4_dev *dev) 1136 { 1137 kfree(mlx4_priv(dev)->eq_table.eq); 1138 } 1139 1140 int mlx4_init_eq_table(struct mlx4_dev *dev) 1141 { 1142 struct mlx4_priv *priv = mlx4_priv(dev); 1143 int err; 1144 int i; 1145 1146 priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev), 1147 sizeof *priv->eq_table.uar_map, 1148 GFP_KERNEL); 1149 if (!priv->eq_table.uar_map) { 1150 err = -ENOMEM; 1151 goto err_out_free; 1152 } 1153 1154 err = mlx4_bitmap_init(&priv->eq_table.bitmap, 1155 roundup_pow_of_two(dev->caps.num_eqs), 1156 dev->caps.num_eqs - 1, 1157 dev->caps.reserved_eqs, 1158 roundup_pow_of_two(dev->caps.num_eqs) - 1159 dev->caps.num_eqs); 1160 if (err) 1161 goto err_out_free; 1162 1163 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 1164 priv->eq_table.uar_map[i] = NULL; 1165 1166 if (!mlx4_is_slave(dev)) { 1167 err = mlx4_map_clr_int(dev); 1168 if (err) 1169 goto err_out_bitmap; 1170 1171 priv->eq_table.clr_mask = 1172 swab32(1 << (priv->eq_table.inta_pin & 31)); 1173 priv->eq_table.clr_int = priv->clr_base + 1174 (priv->eq_table.inta_pin < 32 ? 4 : 0); 1175 } 1176 1177 priv->eq_table.irq_names = 1178 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + 1179 dev->caps.comp_pool), 1180 GFP_KERNEL); 1181 if (!priv->eq_table.irq_names) { 1182 err = -ENOMEM; 1183 goto err_out_bitmap; 1184 } 1185 1186 for (i = 0; i < dev->caps.num_comp_vectors; ++i) { 1187 err = mlx4_create_eq(dev, dev->caps.num_cqs - 1188 dev->caps.reserved_cqs + 1189 MLX4_NUM_SPARE_EQE, 1190 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 1191 &priv->eq_table.eq[i]); 1192 if (err) { 1193 --i; 1194 goto err_out_unmap; 1195 } 1196 } 1197 1198 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, 1199 (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, 1200 &priv->eq_table.eq[dev->caps.num_comp_vectors]); 1201 if (err) 1202 goto err_out_comp; 1203 1204 /*if additional completion vectors poolsize is 0 this loop will not run*/ 1205 for (i = dev->caps.num_comp_vectors + 1; 1206 i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { 1207 1208 err = mlx4_create_eq(dev, dev->caps.num_cqs - 1209 dev->caps.reserved_cqs + 1210 MLX4_NUM_SPARE_EQE, 1211 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 1212 &priv->eq_table.eq[i]); 1213 if (err) { 1214 --i; 1215 goto err_out_unmap; 1216 } 1217 } 1218 1219 1220 if (dev->flags & MLX4_FLAG_MSI_X) { 1221 const char *eq_name; 1222 1223 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { 1224 if (i < dev->caps.num_comp_vectors) { 1225 snprintf(priv->eq_table.irq_names + 1226 i * MLX4_IRQNAME_SIZE, 1227 MLX4_IRQNAME_SIZE, 1228 "mlx4-comp-%d@pci:%s", i, 1229 pci_name(dev->persist->pdev)); 1230 } else { 1231 snprintf(priv->eq_table.irq_names + 1232 i * MLX4_IRQNAME_SIZE, 1233 MLX4_IRQNAME_SIZE, 1234 "mlx4-async@pci:%s", 1235 pci_name(dev->persist->pdev)); 1236 } 1237 1238 eq_name = priv->eq_table.irq_names + 1239 i * MLX4_IRQNAME_SIZE; 1240 err = request_irq(priv->eq_table.eq[i].irq, 1241 mlx4_msi_x_interrupt, 0, eq_name, 1242 priv->eq_table.eq + i); 1243 if (err) 1244 goto err_out_async; 1245 1246 priv->eq_table.eq[i].have_irq = 1; 1247 } 1248 } else { 1249 snprintf(priv->eq_table.irq_names, 1250 MLX4_IRQNAME_SIZE, 1251 DRV_NAME "@pci:%s", 1252 pci_name(dev->persist->pdev)); 1253 err = request_irq(dev->persist->pdev->irq, mlx4_interrupt, 1254 IRQF_SHARED, priv->eq_table.irq_names, dev); 1255 if (err) 1256 goto err_out_async; 1257 1258 priv->eq_table.have_irq = 1; 1259 } 1260 1261 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1262 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 1263 if (err) 1264 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", 1265 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); 1266 1267 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 1268 eq_set_ci(&priv->eq_table.eq[i], 1); 1269 1270 return 0; 1271 1272 err_out_async: 1273 mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); 1274 1275 err_out_comp: 1276 i = dev->caps.num_comp_vectors - 1; 1277 1278 err_out_unmap: 1279 while (i >= 0) { 1280 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 1281 --i; 1282 } 1283 if (!mlx4_is_slave(dev)) 1284 mlx4_unmap_clr_int(dev); 1285 mlx4_free_irqs(dev); 1286 1287 err_out_bitmap: 1288 mlx4_unmap_uar(dev); 1289 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1290 1291 err_out_free: 1292 kfree(priv->eq_table.uar_map); 1293 1294 return err; 1295 } 1296 1297 void mlx4_cleanup_eq_table(struct mlx4_dev *dev) 1298 { 1299 struct mlx4_priv *priv = mlx4_priv(dev); 1300 int i; 1301 1302 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, 1303 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 1304 1305 mlx4_free_irqs(dev); 1306 1307 for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) 1308 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 1309 1310 if (!mlx4_is_slave(dev)) 1311 mlx4_unmap_clr_int(dev); 1312 1313 mlx4_unmap_uar(dev); 1314 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1315 1316 kfree(priv->eq_table.uar_map); 1317 } 1318 1319 /* A test that verifies that we can accept interrupts on all 1320 * the irq vectors of the device. 1321 * Interrupts are checked using the NOP command. 1322 */ 1323 int mlx4_test_interrupts(struct mlx4_dev *dev) 1324 { 1325 struct mlx4_priv *priv = mlx4_priv(dev); 1326 int i; 1327 int err; 1328 1329 err = mlx4_NOP(dev); 1330 /* When not in MSI_X, there is only one irq to check */ 1331 if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev)) 1332 return err; 1333 1334 /* A loop over all completion vectors, for each vector we will check 1335 * whether it works by mapping command completions to that vector 1336 * and performing a NOP command 1337 */ 1338 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { 1339 /* Temporary use polling for command completions */ 1340 mlx4_cmd_use_polling(dev); 1341 1342 /* Map the new eq to handle all asynchronous events */ 1343 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1344 priv->eq_table.eq[i].eqn); 1345 if (err) { 1346 mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); 1347 mlx4_cmd_use_events(dev); 1348 break; 1349 } 1350 1351 /* Go back to using events */ 1352 mlx4_cmd_use_events(dev); 1353 err = mlx4_NOP(dev); 1354 } 1355 1356 /* Return to default */ 1357 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1358 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 1359 return err; 1360 } 1361 EXPORT_SYMBOL(mlx4_test_interrupts); 1362 1363 int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, 1364 int *vector) 1365 { 1366 1367 struct mlx4_priv *priv = mlx4_priv(dev); 1368 int vec = 0, err = 0, i; 1369 1370 mutex_lock(&priv->msix_ctl.pool_lock); 1371 for (i = 0; !vec && i < dev->caps.comp_pool; i++) { 1372 if (~priv->msix_ctl.pool_bm & 1ULL << i) { 1373 priv->msix_ctl.pool_bm |= 1ULL << i; 1374 vec = dev->caps.num_comp_vectors + 1 + i; 1375 snprintf(priv->eq_table.irq_names + 1376 vec * MLX4_IRQNAME_SIZE, 1377 MLX4_IRQNAME_SIZE, "%s", name); 1378 #ifdef CONFIG_RFS_ACCEL 1379 if (rmap) { 1380 err = irq_cpu_rmap_add(rmap, 1381 priv->eq_table.eq[vec].irq); 1382 if (err) 1383 mlx4_warn(dev, "Failed adding irq rmap\n"); 1384 } 1385 #endif 1386 err = request_irq(priv->eq_table.eq[vec].irq, 1387 mlx4_msi_x_interrupt, 0, 1388 &priv->eq_table.irq_names[vec<<5], 1389 priv->eq_table.eq + vec); 1390 if (err) { 1391 /*zero out bit by fliping it*/ 1392 priv->msix_ctl.pool_bm ^= 1 << i; 1393 vec = 0; 1394 continue; 1395 /*we dont want to break here*/ 1396 } 1397 1398 eq_set_ci(&priv->eq_table.eq[vec], 1); 1399 } 1400 } 1401 mutex_unlock(&priv->msix_ctl.pool_lock); 1402 1403 if (vec) { 1404 *vector = vec; 1405 } else { 1406 *vector = 0; 1407 err = (i == dev->caps.comp_pool) ? -ENOSPC : err; 1408 } 1409 return err; 1410 } 1411 EXPORT_SYMBOL(mlx4_assign_eq); 1412 1413 int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec) 1414 { 1415 struct mlx4_priv *priv = mlx4_priv(dev); 1416 1417 return priv->eq_table.eq[vec].irq; 1418 } 1419 EXPORT_SYMBOL(mlx4_eq_get_irq); 1420 1421 void mlx4_release_eq(struct mlx4_dev *dev, int vec) 1422 { 1423 struct mlx4_priv *priv = mlx4_priv(dev); 1424 /*bm index*/ 1425 int i = vec - dev->caps.num_comp_vectors - 1; 1426 1427 if (likely(i >= 0)) { 1428 /*sanity check , making sure were not trying to free irq's 1429 Belonging to a legacy EQ*/ 1430 mutex_lock(&priv->msix_ctl.pool_lock); 1431 if (priv->msix_ctl.pool_bm & 1ULL << i) { 1432 free_irq(priv->eq_table.eq[vec].irq, 1433 &priv->eq_table.eq[vec]); 1434 priv->msix_ctl.pool_bm &= ~(1ULL << i); 1435 } 1436 mutex_unlock(&priv->msix_ctl.pool_lock); 1437 } 1438 1439 } 1440 EXPORT_SYMBOL(mlx4_release_eq); 1441 1442