1 /* 2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/interrupt.h> 35 #include <linux/slab.h> 36 #include <linux/export.h> 37 #include <linux/mm.h> 38 #include <linux/dma-mapping.h> 39 40 #include <linux/mlx4/cmd.h> 41 #include <linux/cpu_rmap.h> 42 43 #include "mlx4.h" 44 #include "fw.h" 45 46 enum { 47 MLX4_IRQNAME_SIZE = 32 48 }; 49 50 enum { 51 MLX4_NUM_ASYNC_EQE = 0x100, 52 MLX4_NUM_SPARE_EQE = 0x80, 53 MLX4_EQ_ENTRY_SIZE = 0x20 54 }; 55 56 #define MLX4_EQ_STATUS_OK ( 0 << 28) 57 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 58 #define MLX4_EQ_OWNER_SW ( 0 << 24) 59 #define MLX4_EQ_OWNER_HW ( 1 << 24) 60 #define MLX4_EQ_FLAG_EC ( 1 << 18) 61 #define MLX4_EQ_FLAG_OI ( 1 << 17) 62 #define MLX4_EQ_STATE_ARMED ( 9 << 8) 63 #define MLX4_EQ_STATE_FIRED (10 << 8) 64 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) 65 66 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ 67 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ 68 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ 69 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ 70 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ 71 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ 72 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ 73 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 74 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 75 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ 76 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ 77 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 78 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ 79 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ 80 (1ull << MLX4_EVENT_TYPE_CMD) | \ 81 (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \ 82 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ 83 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ 84 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) 85 86 static u64 get_async_ev_mask(struct mlx4_dev *dev) 87 { 88 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; 89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) 90 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); 91 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) 92 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT); 93 94 return async_ev_mask; 95 } 96 97 static void eq_set_ci(struct mlx4_eq *eq, int req_not) 98 { 99 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | 100 req_not << 31), 101 eq->doorbell); 102 /* We still want ordering, just not swabbing, so add a barrier */ 103 mb(); 104 } 105 106 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, 107 u8 eqe_size) 108 { 109 /* (entry & (eq->nent - 1)) gives us a cyclic array */ 110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; 111 /* CX3 is capable of extending the EQE from 32 to 64 bytes with 112 * strides of 64B,128B and 256B. 113 * When 64B EQE is used, the first (in the lower addresses) 114 * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes 115 * contain the legacy EQE information. 116 * In all other cases, the first 32B contains the legacy EQE info. 117 */ 118 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; 119 } 120 121 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) 122 { 123 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); 124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; 125 } 126 127 static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq) 128 { 129 struct mlx4_eqe *eqe = 130 &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)]; 131 return (!!(eqe->owner & 0x80) ^ 132 !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ? 133 eqe : NULL; 134 } 135 136 void mlx4_gen_slave_eqe(struct work_struct *work) 137 { 138 struct mlx4_mfunc_master_ctx *master = 139 container_of(work, struct mlx4_mfunc_master_ctx, 140 slave_event_work); 141 struct mlx4_mfunc *mfunc = 142 container_of(master, struct mlx4_mfunc, master); 143 struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); 144 struct mlx4_dev *dev = &priv->dev; 145 struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; 146 struct mlx4_eqe *eqe; 147 u8 slave; 148 int i, phys_port, slave_port; 149 150 for (eqe = next_slave_event_eqe(slave_eq); eqe; 151 eqe = next_slave_event_eqe(slave_eq)) { 152 slave = eqe->slave_id; 153 154 if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE && 155 eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN && 156 mlx4_is_bonded(dev)) { 157 struct mlx4_port_cap port_cap; 158 159 if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state) 160 goto consume; 161 162 if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state) 163 goto consume; 164 } 165 /* All active slaves need to receive the event */ 166 if (slave == ALL_SLAVES) { 167 for (i = 0; i <= dev->persist->num_vfs; i++) { 168 phys_port = 0; 169 if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT && 170 eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) { 171 phys_port = eqe->event.port_mgmt_change.port; 172 slave_port = mlx4_phys_to_slave_port(dev, i, phys_port); 173 if (slave_port < 0) /* VF doesn't have this port */ 174 continue; 175 eqe->event.port_mgmt_change.port = slave_port; 176 } 177 if (mlx4_GEN_EQE(dev, i, eqe)) 178 mlx4_warn(dev, "Failed to generate event for slave %d\n", 179 i); 180 if (phys_port) 181 eqe->event.port_mgmt_change.port = phys_port; 182 } 183 } else { 184 if (mlx4_GEN_EQE(dev, slave, eqe)) 185 mlx4_warn(dev, "Failed to generate event for slave %d\n", 186 slave); 187 } 188 consume: 189 ++slave_eq->cons; 190 } 191 } 192 193 194 static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) 195 { 196 struct mlx4_priv *priv = mlx4_priv(dev); 197 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; 198 struct mlx4_eqe *s_eqe; 199 unsigned long flags; 200 201 spin_lock_irqsave(&slave_eq->event_lock, flags); 202 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; 203 if ((!!(s_eqe->owner & 0x80)) ^ 204 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { 205 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n", 206 slave); 207 spin_unlock_irqrestore(&slave_eq->event_lock, flags); 208 return; 209 } 210 211 memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1); 212 s_eqe->slave_id = slave; 213 /* ensure all information is written before setting the ownersip bit */ 214 dma_wmb(); 215 s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; 216 ++slave_eq->prod; 217 218 queue_work(priv->mfunc.master.comm_wq, 219 &priv->mfunc.master.slave_event_work); 220 spin_unlock_irqrestore(&slave_eq->event_lock, flags); 221 } 222 223 static void mlx4_slave_event(struct mlx4_dev *dev, int slave, 224 struct mlx4_eqe *eqe) 225 { 226 struct mlx4_priv *priv = mlx4_priv(dev); 227 228 if (slave < 0 || slave > dev->persist->num_vfs || 229 slave == dev->caps.function || 230 !priv->mfunc.master.slave_state[slave].active) 231 return; 232 233 slave_event(dev, slave, eqe); 234 } 235 236 #if defined(CONFIG_SMP) 237 static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) 238 { 239 int hint_err; 240 struct mlx4_dev *dev = &priv->dev; 241 struct mlx4_eq *eq = &priv->eq_table.eq[vec]; 242 243 if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask)) 244 return; 245 246 hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); 247 if (hint_err) 248 mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err); 249 } 250 #endif 251 252 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) 253 { 254 struct mlx4_eqe eqe; 255 256 struct mlx4_priv *priv = mlx4_priv(dev); 257 struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave]; 258 259 if (!s_slave->active) 260 return 0; 261 262 memset(&eqe, 0, sizeof eqe); 263 264 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; 265 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE; 266 eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); 267 268 return mlx4_GEN_EQE(dev, slave, &eqe); 269 } 270 EXPORT_SYMBOL(mlx4_gen_pkey_eqe); 271 272 int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) 273 { 274 struct mlx4_eqe eqe; 275 276 /*don't send if we don't have the that slave */ 277 if (dev->persist->num_vfs < slave) 278 return 0; 279 memset(&eqe, 0, sizeof eqe); 280 281 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; 282 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO; 283 eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); 284 285 return mlx4_GEN_EQE(dev, slave, &eqe); 286 } 287 EXPORT_SYMBOL(mlx4_gen_guid_change_eqe); 288 289 int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, 290 u8 port_subtype_change) 291 { 292 struct mlx4_eqe eqe; 293 u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port); 294 295 /*don't send if we don't have the that slave */ 296 if (dev->persist->num_vfs < slave) 297 return 0; 298 memset(&eqe, 0, sizeof eqe); 299 300 eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE; 301 eqe.subtype = port_subtype_change; 302 eqe.event.port_change.port = cpu_to_be32(slave_port << 28); 303 304 mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__, 305 port_subtype_change, slave, port); 306 return mlx4_GEN_EQE(dev, slave, &eqe); 307 } 308 EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe); 309 310 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port) 311 { 312 struct mlx4_priv *priv = mlx4_priv(dev); 313 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 314 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); 315 316 if (slave >= dev->num_slaves || port > dev->caps.num_ports || 317 port <= 0 || !test_bit(port - 1, actv_ports.ports)) { 318 pr_err("%s: Error: asking for slave:%d, port:%d\n", 319 __func__, slave, port); 320 return SLAVE_PORT_DOWN; 321 } 322 return s_state[slave].port_state[port]; 323 } 324 EXPORT_SYMBOL(mlx4_get_slave_port_state); 325 326 static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, 327 enum slave_port_state state) 328 { 329 struct mlx4_priv *priv = mlx4_priv(dev); 330 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 331 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); 332 333 if (slave >= dev->num_slaves || port > dev->caps.num_ports || 334 port <= 0 || !test_bit(port - 1, actv_ports.ports)) { 335 pr_err("%s: Error: asking for slave:%d, port:%d\n", 336 __func__, slave, port); 337 return -1; 338 } 339 s_state[slave].port_state[port] = state; 340 341 return 0; 342 } 343 344 static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) 345 { 346 int i; 347 enum slave_port_gen_event gen_event; 348 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, 349 port); 350 351 for (i = 0; i < dev->persist->num_vfs + 1; i++) 352 if (test_bit(i, slaves_pport.slaves)) 353 set_and_calc_slave_port_state(dev, i, port, 354 event, &gen_event); 355 } 356 /************************************************************************** 357 The function get as input the new event to that port, 358 and according to the prev state change the slave's port state. 359 The events are: 360 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, 361 MLX4_PORT_STATE_DEV_EVENT_PORT_UP 362 MLX4_PORT_STATE_IB_EVENT_GID_VALID 363 MLX4_PORT_STATE_IB_EVENT_GID_INVALID 364 ***************************************************************************/ 365 int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, 366 u8 port, int event, 367 enum slave_port_gen_event *gen_event) 368 { 369 struct mlx4_priv *priv = mlx4_priv(dev); 370 struct mlx4_slave_state *ctx = NULL; 371 unsigned long flags; 372 int ret = -1; 373 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); 374 enum slave_port_state cur_state = 375 mlx4_get_slave_port_state(dev, slave, port); 376 377 *gen_event = SLAVE_PORT_GEN_EVENT_NONE; 378 379 if (slave >= dev->num_slaves || port > dev->caps.num_ports || 380 port <= 0 || !test_bit(port - 1, actv_ports.ports)) { 381 pr_err("%s: Error: asking for slave:%d, port:%d\n", 382 __func__, slave, port); 383 return ret; 384 } 385 386 ctx = &priv->mfunc.master.slave_state[slave]; 387 spin_lock_irqsave(&ctx->lock, flags); 388 389 switch (cur_state) { 390 case SLAVE_PORT_DOWN: 391 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) 392 mlx4_set_slave_port_state(dev, slave, port, 393 SLAVE_PENDING_UP); 394 break; 395 case SLAVE_PENDING_UP: 396 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) 397 mlx4_set_slave_port_state(dev, slave, port, 398 SLAVE_PORT_DOWN); 399 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) { 400 mlx4_set_slave_port_state(dev, slave, port, 401 SLAVE_PORT_UP); 402 *gen_event = SLAVE_PORT_GEN_EVENT_UP; 403 } 404 break; 405 case SLAVE_PORT_UP: 406 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) { 407 mlx4_set_slave_port_state(dev, slave, port, 408 SLAVE_PORT_DOWN); 409 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; 410 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID == 411 event) { 412 mlx4_set_slave_port_state(dev, slave, port, 413 SLAVE_PENDING_UP); 414 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; 415 } 416 break; 417 default: 418 pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n", 419 __func__, slave, port); 420 goto out; 421 } 422 ret = mlx4_get_slave_port_state(dev, slave, port); 423 424 out: 425 spin_unlock_irqrestore(&ctx->lock, flags); 426 return ret; 427 } 428 429 EXPORT_SYMBOL(set_and_calc_slave_port_state); 430 431 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr) 432 { 433 struct mlx4_eqe eqe; 434 435 memset(&eqe, 0, sizeof eqe); 436 437 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; 438 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO; 439 eqe.event.port_mgmt_change.port = port; 440 eqe.event.port_mgmt_change.params.port_info.changed_attr = 441 cpu_to_be32((u32) attr); 442 443 slave_event(dev, ALL_SLAVES, &eqe); 444 return 0; 445 } 446 EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev); 447 448 void mlx4_master_handle_slave_flr(struct work_struct *work) 449 { 450 struct mlx4_mfunc_master_ctx *master = 451 container_of(work, struct mlx4_mfunc_master_ctx, 452 slave_flr_event_work); 453 struct mlx4_mfunc *mfunc = 454 container_of(master, struct mlx4_mfunc, master); 455 struct mlx4_priv *priv = 456 container_of(mfunc, struct mlx4_priv, mfunc); 457 struct mlx4_dev *dev = &priv->dev; 458 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 459 int i; 460 int err; 461 unsigned long flags; 462 463 mlx4_dbg(dev, "mlx4_handle_slave_flr\n"); 464 465 for (i = 0 ; i < dev->num_slaves; i++) { 466 467 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { 468 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n", 469 i); 470 /* In case of 'Reset flow' FLR can be generated for 471 * a slave before mlx4_load_one is done. 472 * make sure interface is up before trying to delete 473 * slave resources which weren't allocated yet. 474 */ 475 if (dev->persist->interface_state & 476 MLX4_INTERFACE_STATE_UP) 477 mlx4_delete_all_resources_for_slave(dev, i); 478 /*return the slave to running mode*/ 479 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 480 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; 481 slave_state[i].is_slave_going_down = 0; 482 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 483 /*notify the FW:*/ 484 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, 485 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 486 if (err) 487 mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n", 488 i); 489 } 490 } 491 } 492 493 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) 494 { 495 struct mlx4_priv *priv = mlx4_priv(dev); 496 struct mlx4_eqe *eqe; 497 int cqn; 498 int eqes_found = 0; 499 int set_ci = 0; 500 int port; 501 int slave = 0; 502 int ret; 503 u32 flr_slave; 504 u8 update_slave_state; 505 int i; 506 enum slave_port_gen_event gen_event; 507 unsigned long flags; 508 struct mlx4_vport_state *s_info; 509 int eqe_size = dev->caps.eqe_size; 510 511 while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) { 512 /* 513 * Make sure we read EQ entry contents after we've 514 * checked the ownership bit. 515 */ 516 dma_rmb(); 517 518 switch (eqe->type) { 519 case MLX4_EVENT_TYPE_COMP: 520 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; 521 mlx4_cq_completion(dev, cqn); 522 break; 523 524 case MLX4_EVENT_TYPE_PATH_MIG: 525 case MLX4_EVENT_TYPE_COMM_EST: 526 case MLX4_EVENT_TYPE_SQ_DRAINED: 527 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: 528 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: 529 case MLX4_EVENT_TYPE_PATH_MIG_FAILED: 530 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 531 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: 532 mlx4_dbg(dev, "event %d arrived\n", eqe->type); 533 if (mlx4_is_master(dev)) { 534 /* forward only to slave owning the QP */ 535 ret = mlx4_get_slave_from_resource_id(dev, 536 RES_QP, 537 be32_to_cpu(eqe->event.qp.qpn) 538 & 0xffffff, &slave); 539 if (ret && ret != -ENOENT) { 540 mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", 541 eqe->type, eqe->subtype, 542 eq->eqn, eq->cons_index, ret); 543 break; 544 } 545 546 if (!ret && slave != dev->caps.function) { 547 mlx4_slave_event(dev, slave, eqe); 548 break; 549 } 550 551 } 552 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 553 0xffffff, eqe->type); 554 break; 555 556 case MLX4_EVENT_TYPE_SRQ_LIMIT: 557 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n", 558 __func__, be32_to_cpu(eqe->event.srq.srqn), 559 eq->eqn); 560 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: 561 if (mlx4_is_master(dev)) { 562 /* forward only to slave owning the SRQ */ 563 ret = mlx4_get_slave_from_resource_id(dev, 564 RES_SRQ, 565 be32_to_cpu(eqe->event.srq.srqn) 566 & 0xffffff, 567 &slave); 568 if (ret && ret != -ENOENT) { 569 mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", 570 eqe->type, eqe->subtype, 571 eq->eqn, eq->cons_index, ret); 572 break; 573 } 574 if (eqe->type == 575 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) 576 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", 577 __func__, slave, 578 be32_to_cpu(eqe->event.srq.srqn), 579 eqe->type, eqe->subtype); 580 581 if (!ret && slave != dev->caps.function) { 582 if (eqe->type == 583 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) 584 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", 585 __func__, eqe->type, 586 eqe->subtype, slave); 587 mlx4_slave_event(dev, slave, eqe); 588 break; 589 } 590 } 591 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 592 0xffffff, eqe->type); 593 break; 594 595 case MLX4_EVENT_TYPE_CMD: 596 mlx4_cmd_event(dev, 597 be16_to_cpu(eqe->event.cmd.token), 598 eqe->event.cmd.status, 599 be64_to_cpu(eqe->event.cmd.out_param)); 600 break; 601 602 case MLX4_EVENT_TYPE_PORT_CHANGE: { 603 struct mlx4_slaves_pport slaves_port; 604 port = be32_to_cpu(eqe->event.port_change.port) >> 28; 605 slaves_port = mlx4_phys_to_slaves_pport(dev, port); 606 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { 607 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, 608 port); 609 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 610 if (!mlx4_is_master(dev)) 611 break; 612 for (i = 0; i < dev->persist->num_vfs + 1; 613 i++) { 614 int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); 615 616 if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) 617 continue; 618 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 619 if (i == mlx4_master_func_num(dev)) 620 continue; 621 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", 622 __func__, i, port); 623 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; 624 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 625 eqe->event.port_change.port = 626 cpu_to_be32( 627 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) 628 | (reported_port << 28)); 629 mlx4_slave_event(dev, i, eqe); 630 } 631 } else { /* IB port */ 632 set_and_calc_slave_port_state(dev, i, port, 633 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, 634 &gen_event); 635 /*we can be in pending state, then do not send port_down event*/ 636 if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) { 637 if (i == mlx4_master_func_num(dev)) 638 continue; 639 eqe->event.port_change.port = 640 cpu_to_be32( 641 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) 642 | (mlx4_phys_to_slave_port(dev, i, port) << 28)); 643 mlx4_slave_event(dev, i, eqe); 644 } 645 } 646 } 647 } else { 648 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port); 649 650 mlx4_priv(dev)->sense.do_sense_port[port] = 0; 651 652 if (!mlx4_is_master(dev)) 653 break; 654 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 655 for (i = 0; 656 i < dev->persist->num_vfs + 1; 657 i++) { 658 int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); 659 660 if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) 661 continue; 662 if (i == mlx4_master_func_num(dev)) 663 continue; 664 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; 665 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 666 eqe->event.port_change.port = 667 cpu_to_be32( 668 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) 669 | (reported_port << 28)); 670 mlx4_slave_event(dev, i, eqe); 671 } 672 } 673 else /* IB port */ 674 /* port-up event will be sent to a slave when the 675 * slave's alias-guid is set. This is done in alias_GUID.c 676 */ 677 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); 678 } 679 break; 680 } 681 682 case MLX4_EVENT_TYPE_CQ_ERROR: 683 mlx4_warn(dev, "CQ %s on CQN %06x\n", 684 eqe->event.cq_err.syndrome == 1 ? 685 "overrun" : "access violation", 686 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); 687 if (mlx4_is_master(dev)) { 688 ret = mlx4_get_slave_from_resource_id(dev, 689 RES_CQ, 690 be32_to_cpu(eqe->event.cq_err.cqn) 691 & 0xffffff, &slave); 692 if (ret && ret != -ENOENT) { 693 mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", 694 eqe->type, eqe->subtype, 695 eq->eqn, eq->cons_index, ret); 696 break; 697 } 698 699 if (!ret && slave != dev->caps.function) { 700 mlx4_slave_event(dev, slave, eqe); 701 break; 702 } 703 } 704 mlx4_cq_event(dev, 705 be32_to_cpu(eqe->event.cq_err.cqn) 706 & 0xffffff, 707 eqe->type); 708 break; 709 710 case MLX4_EVENT_TYPE_EQ_OVERFLOW: 711 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); 712 break; 713 714 case MLX4_EVENT_TYPE_OP_REQUIRED: 715 atomic_inc(&priv->opreq_count); 716 /* FW commands can't be executed from interrupt context 717 * working in deferred task 718 */ 719 queue_work(mlx4_wq, &priv->opreq_task); 720 break; 721 722 case MLX4_EVENT_TYPE_COMM_CHANNEL: 723 if (!mlx4_is_master(dev)) { 724 mlx4_warn(dev, "Received comm channel event for non master device\n"); 725 break; 726 } 727 memcpy(&priv->mfunc.master.comm_arm_bit_vector, 728 eqe->event.comm_channel_arm.bit_vec, 729 sizeof eqe->event.comm_channel_arm.bit_vec); 730 queue_work(priv->mfunc.master.comm_wq, 731 &priv->mfunc.master.comm_work); 732 break; 733 734 case MLX4_EVENT_TYPE_FLR_EVENT: 735 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); 736 if (!mlx4_is_master(dev)) { 737 mlx4_warn(dev, "Non-master function received FLR event\n"); 738 break; 739 } 740 741 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); 742 743 if (flr_slave >= dev->num_slaves) { 744 mlx4_warn(dev, 745 "Got FLR for unknown function: %d\n", 746 flr_slave); 747 update_slave_state = 0; 748 } else 749 update_slave_state = 1; 750 751 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 752 if (update_slave_state) { 753 priv->mfunc.master.slave_state[flr_slave].active = false; 754 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR; 755 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; 756 } 757 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 758 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, 759 flr_slave); 760 queue_work(priv->mfunc.master.comm_wq, 761 &priv->mfunc.master.slave_flr_event_work); 762 break; 763 764 case MLX4_EVENT_TYPE_FATAL_WARNING: 765 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { 766 if (mlx4_is_master(dev)) 767 for (i = 0; i < dev->num_slaves; i++) { 768 mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n", 769 __func__, i); 770 if (i == dev->caps.function) 771 continue; 772 mlx4_slave_event(dev, i, eqe); 773 } 774 mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n", 775 be16_to_cpu(eqe->event.warming.warning_threshold), 776 be16_to_cpu(eqe->event.warming.current_temperature)); 777 } else 778 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n", 779 eqe->type, eqe->subtype, eq->eqn, 780 eq->cons_index, eqe->owner, eq->nent, 781 eqe->slave_id, 782 !!(eqe->owner & 0x80) ^ 783 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); 784 785 break; 786 787 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: 788 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, 789 (unsigned long) eqe); 790 break; 791 792 case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: 793 switch (eqe->subtype) { 794 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE: 795 mlx4_warn(dev, "Bad cable detected on port %u\n", 796 eqe->event.bad_cable.port); 797 break; 798 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE: 799 mlx4_warn(dev, "Unsupported cable detected\n"); 800 break; 801 default: 802 mlx4_dbg(dev, 803 "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n", 804 eqe->type, eqe->subtype, eq->eqn, 805 eq->cons_index, eqe->owner, eq->nent, 806 !!(eqe->owner & 0x80) ^ 807 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); 808 break; 809 } 810 break; 811 812 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 813 case MLX4_EVENT_TYPE_ECC_DETECT: 814 default: 815 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n", 816 eqe->type, eqe->subtype, eq->eqn, 817 eq->cons_index, eqe->owner, eq->nent, 818 eqe->slave_id, 819 !!(eqe->owner & 0x80) ^ 820 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); 821 break; 822 }; 823 824 ++eq->cons_index; 825 eqes_found = 1; 826 ++set_ci; 827 828 /* 829 * The HCA will think the queue has overflowed if we 830 * don't tell it we've been processing events. We 831 * create our EQs with MLX4_NUM_SPARE_EQE extra 832 * entries, so we must update our consumer index at 833 * least that often. 834 */ 835 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { 836 eq_set_ci(eq, 0); 837 set_ci = 0; 838 } 839 } 840 841 eq_set_ci(eq, 1); 842 843 return eqes_found; 844 } 845 846 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) 847 { 848 struct mlx4_dev *dev = dev_ptr; 849 struct mlx4_priv *priv = mlx4_priv(dev); 850 int work = 0; 851 int i; 852 853 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); 854 855 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 856 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); 857 858 return IRQ_RETVAL(work); 859 } 860 861 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) 862 { 863 struct mlx4_eq *eq = eq_ptr; 864 struct mlx4_dev *dev = eq->dev; 865 866 mlx4_eq_int(dev, eq); 867 868 /* MSI-X vectors always belong to us */ 869 return IRQ_HANDLED; 870 } 871 872 int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, 873 struct mlx4_vhcr *vhcr, 874 struct mlx4_cmd_mailbox *inbox, 875 struct mlx4_cmd_mailbox *outbox, 876 struct mlx4_cmd_info *cmd) 877 { 878 struct mlx4_priv *priv = mlx4_priv(dev); 879 struct mlx4_slave_event_eq_info *event_eq = 880 priv->mfunc.master.slave_state[slave].event_eq; 881 u32 in_modifier = vhcr->in_modifier; 882 u32 eqn = in_modifier & 0x3FF; 883 u64 in_param = vhcr->in_param; 884 int err = 0; 885 int i; 886 887 if (slave == dev->caps.function) 888 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, 889 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, 890 MLX4_CMD_NATIVE); 891 if (!err) 892 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) 893 if (in_param & (1LL << i)) 894 event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn; 895 896 return err; 897 } 898 899 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, 900 int eq_num) 901 { 902 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, 903 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, 904 MLX4_CMD_WRAPPED); 905 } 906 907 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 908 int eq_num) 909 { 910 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, 911 MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, 912 MLX4_CMD_WRAPPED); 913 } 914 915 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num) 916 { 917 return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ, 918 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 919 } 920 921 static int mlx4_num_eq_uar(struct mlx4_dev *dev) 922 { 923 /* 924 * Each UAR holds 4 EQ doorbells. To figure out how many UARs 925 * we need to map, take the difference of highest index and 926 * the lowest index we'll use and add 1. 927 */ 928 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - 929 dev->caps.reserved_eqs / 4 + 1; 930 } 931 932 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) 933 { 934 struct mlx4_priv *priv = mlx4_priv(dev); 935 int index; 936 937 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; 938 939 if (!priv->eq_table.uar_map[index]) { 940 priv->eq_table.uar_map[index] = 941 ioremap( 942 pci_resource_start(dev->persist->pdev, 2) + 943 ((eq->eqn / 4) << (dev->uar_page_shift)), 944 (1 << (dev->uar_page_shift))); 945 if (!priv->eq_table.uar_map[index]) { 946 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", 947 eq->eqn); 948 return NULL; 949 } 950 } 951 952 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); 953 } 954 955 static void mlx4_unmap_uar(struct mlx4_dev *dev) 956 { 957 struct mlx4_priv *priv = mlx4_priv(dev); 958 int i; 959 960 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 961 if (priv->eq_table.uar_map[i]) { 962 iounmap(priv->eq_table.uar_map[i]); 963 priv->eq_table.uar_map[i] = NULL; 964 } 965 } 966 967 static int mlx4_create_eq(struct mlx4_dev *dev, int nent, 968 u8 intr, struct mlx4_eq *eq) 969 { 970 struct mlx4_priv *priv = mlx4_priv(dev); 971 struct mlx4_cmd_mailbox *mailbox; 972 struct mlx4_eq_context *eq_context; 973 int npages; 974 u64 *dma_list = NULL; 975 dma_addr_t t; 976 u64 mtt_addr; 977 int err = -ENOMEM; 978 int i; 979 980 eq->dev = dev; 981 eq->nent = roundup_pow_of_two(max(nent, 2)); 982 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with 983 * strides of 64B,128B and 256B. 984 */ 985 npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; 986 987 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 988 GFP_KERNEL); 989 if (!eq->page_list) 990 goto err_out; 991 992 for (i = 0; i < npages; ++i) 993 eq->page_list[i].buf = NULL; 994 995 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 996 if (!dma_list) 997 goto err_out_free; 998 999 mailbox = mlx4_alloc_cmd_mailbox(dev); 1000 if (IS_ERR(mailbox)) 1001 goto err_out_free; 1002 eq_context = mailbox->buf; 1003 1004 for (i = 0; i < npages; ++i) { 1005 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> 1006 pdev->dev, 1007 PAGE_SIZE, &t, 1008 GFP_KERNEL); 1009 if (!eq->page_list[i].buf) 1010 goto err_out_free_pages; 1011 1012 dma_list[i] = t; 1013 eq->page_list[i].map = t; 1014 1015 memset(eq->page_list[i].buf, 0, PAGE_SIZE); 1016 } 1017 1018 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); 1019 if (eq->eqn == -1) 1020 goto err_out_free_pages; 1021 1022 eq->doorbell = mlx4_get_eq_uar(dev, eq); 1023 if (!eq->doorbell) { 1024 err = -ENOMEM; 1025 goto err_out_free_eq; 1026 } 1027 1028 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); 1029 if (err) 1030 goto err_out_free_eq; 1031 1032 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); 1033 if (err) 1034 goto err_out_free_mtt; 1035 1036 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | 1037 MLX4_EQ_STATE_ARMED); 1038 eq_context->log_eq_size = ilog2(eq->nent); 1039 eq_context->intr = intr; 1040 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; 1041 1042 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); 1043 eq_context->mtt_base_addr_h = mtt_addr >> 32; 1044 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 1045 1046 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); 1047 if (err) { 1048 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); 1049 goto err_out_free_mtt; 1050 } 1051 1052 kfree(dma_list); 1053 mlx4_free_cmd_mailbox(dev, mailbox); 1054 1055 eq->cons_index = 0; 1056 1057 INIT_LIST_HEAD(&eq->tasklet_ctx.list); 1058 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); 1059 spin_lock_init(&eq->tasklet_ctx.lock); 1060 tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb, 1061 (unsigned long)&eq->tasklet_ctx); 1062 1063 return err; 1064 1065 err_out_free_mtt: 1066 mlx4_mtt_cleanup(dev, &eq->mtt); 1067 1068 err_out_free_eq: 1069 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); 1070 1071 err_out_free_pages: 1072 for (i = 0; i < npages; ++i) 1073 if (eq->page_list[i].buf) 1074 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, 1075 eq->page_list[i].buf, 1076 eq->page_list[i].map); 1077 1078 mlx4_free_cmd_mailbox(dev, mailbox); 1079 1080 err_out_free: 1081 kfree(eq->page_list); 1082 kfree(dma_list); 1083 1084 err_out: 1085 return err; 1086 } 1087 1088 static void mlx4_free_eq(struct mlx4_dev *dev, 1089 struct mlx4_eq *eq) 1090 { 1091 struct mlx4_priv *priv = mlx4_priv(dev); 1092 int err; 1093 int i; 1094 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with 1095 * strides of 64B,128B and 256B 1096 */ 1097 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; 1098 1099 err = mlx4_HW2SW_EQ(dev, eq->eqn); 1100 if (err) 1101 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); 1102 1103 synchronize_irq(eq->irq); 1104 tasklet_disable(&eq->tasklet_ctx.task); 1105 1106 mlx4_mtt_cleanup(dev, &eq->mtt); 1107 for (i = 0; i < npages; ++i) 1108 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, 1109 eq->page_list[i].buf, 1110 eq->page_list[i].map); 1111 1112 kfree(eq->page_list); 1113 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); 1114 } 1115 1116 static void mlx4_free_irqs(struct mlx4_dev *dev) 1117 { 1118 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 1119 int i; 1120 1121 if (eq_table->have_irq) 1122 free_irq(dev->persist->pdev->irq, dev); 1123 1124 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 1125 if (eq_table->eq[i].have_irq) { 1126 free_cpumask_var(eq_table->eq[i].affinity_mask); 1127 #if defined(CONFIG_SMP) 1128 irq_set_affinity_hint(eq_table->eq[i].irq, NULL); 1129 #endif 1130 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 1131 eq_table->eq[i].have_irq = 0; 1132 } 1133 1134 kfree(eq_table->irq_names); 1135 } 1136 1137 static int mlx4_map_clr_int(struct mlx4_dev *dev) 1138 { 1139 struct mlx4_priv *priv = mlx4_priv(dev); 1140 1141 priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev, 1142 priv->fw.clr_int_bar) + 1143 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 1144 if (!priv->clr_base) { 1145 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); 1146 return -ENOMEM; 1147 } 1148 1149 return 0; 1150 } 1151 1152 static void mlx4_unmap_clr_int(struct mlx4_dev *dev) 1153 { 1154 struct mlx4_priv *priv = mlx4_priv(dev); 1155 1156 iounmap(priv->clr_base); 1157 } 1158 1159 int mlx4_alloc_eq_table(struct mlx4_dev *dev) 1160 { 1161 struct mlx4_priv *priv = mlx4_priv(dev); 1162 1163 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, 1164 sizeof *priv->eq_table.eq, GFP_KERNEL); 1165 if (!priv->eq_table.eq) 1166 return -ENOMEM; 1167 1168 return 0; 1169 } 1170 1171 void mlx4_free_eq_table(struct mlx4_dev *dev) 1172 { 1173 kfree(mlx4_priv(dev)->eq_table.eq); 1174 } 1175 1176 int mlx4_init_eq_table(struct mlx4_dev *dev) 1177 { 1178 struct mlx4_priv *priv = mlx4_priv(dev); 1179 int err; 1180 int i; 1181 1182 priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev), 1183 sizeof *priv->eq_table.uar_map, 1184 GFP_KERNEL); 1185 if (!priv->eq_table.uar_map) { 1186 err = -ENOMEM; 1187 goto err_out_free; 1188 } 1189 1190 err = mlx4_bitmap_init(&priv->eq_table.bitmap, 1191 roundup_pow_of_two(dev->caps.num_eqs), 1192 dev->caps.num_eqs - 1, 1193 dev->caps.reserved_eqs, 1194 roundup_pow_of_two(dev->caps.num_eqs) - 1195 dev->caps.num_eqs); 1196 if (err) 1197 goto err_out_free; 1198 1199 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 1200 priv->eq_table.uar_map[i] = NULL; 1201 1202 if (!mlx4_is_slave(dev)) { 1203 err = mlx4_map_clr_int(dev); 1204 if (err) 1205 goto err_out_bitmap; 1206 1207 priv->eq_table.clr_mask = 1208 swab32(1 << (priv->eq_table.inta_pin & 31)); 1209 priv->eq_table.clr_int = priv->clr_base + 1210 (priv->eq_table.inta_pin < 32 ? 4 : 0); 1211 } 1212 1213 priv->eq_table.irq_names = 1214 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), 1215 GFP_KERNEL); 1216 if (!priv->eq_table.irq_names) { 1217 err = -ENOMEM; 1218 goto err_out_clr_int; 1219 } 1220 1221 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { 1222 if (i == MLX4_EQ_ASYNC) { 1223 err = mlx4_create_eq(dev, 1224 MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, 1225 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]); 1226 } else { 1227 struct mlx4_eq *eq = &priv->eq_table.eq[i]; 1228 #ifdef CONFIG_RFS_ACCEL 1229 int port = find_first_bit(eq->actv_ports.ports, 1230 dev->caps.num_ports) + 1; 1231 1232 if (port <= dev->caps.num_ports) { 1233 struct mlx4_port_info *info = 1234 &mlx4_priv(dev)->port[port]; 1235 1236 if (!info->rmap) { 1237 info->rmap = alloc_irq_cpu_rmap( 1238 mlx4_get_eqs_per_port(dev, port)); 1239 if (!info->rmap) { 1240 mlx4_warn(dev, "Failed to allocate cpu rmap\n"); 1241 err = -ENOMEM; 1242 goto err_out_unmap; 1243 } 1244 } 1245 1246 err = irq_cpu_rmap_add( 1247 info->rmap, eq->irq); 1248 if (err) 1249 mlx4_warn(dev, "Failed adding irq rmap\n"); 1250 } 1251 #endif 1252 err = mlx4_create_eq(dev, dev->quotas.cq + 1253 MLX4_NUM_SPARE_EQE, 1254 (dev->flags & MLX4_FLAG_MSI_X) ? 1255 i + 1 - !!(i > MLX4_EQ_ASYNC) : 0, 1256 eq); 1257 } 1258 if (err) 1259 goto err_out_unmap; 1260 } 1261 1262 if (dev->flags & MLX4_FLAG_MSI_X) { 1263 const char *eq_name; 1264 1265 snprintf(priv->eq_table.irq_names + 1266 MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE, 1267 MLX4_IRQNAME_SIZE, 1268 "mlx4-async@pci:%s", 1269 pci_name(dev->persist->pdev)); 1270 eq_name = priv->eq_table.irq_names + 1271 MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE; 1272 1273 err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq, 1274 mlx4_msi_x_interrupt, 0, eq_name, 1275 priv->eq_table.eq + MLX4_EQ_ASYNC); 1276 if (err) 1277 goto err_out_unmap; 1278 1279 priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1; 1280 } else { 1281 snprintf(priv->eq_table.irq_names, 1282 MLX4_IRQNAME_SIZE, 1283 DRV_NAME "@pci:%s", 1284 pci_name(dev->persist->pdev)); 1285 err = request_irq(dev->persist->pdev->irq, mlx4_interrupt, 1286 IRQF_SHARED, priv->eq_table.irq_names, dev); 1287 if (err) 1288 goto err_out_unmap; 1289 1290 priv->eq_table.have_irq = 1; 1291 } 1292 1293 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1294 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); 1295 if (err) 1296 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", 1297 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); 1298 1299 /* arm ASYNC eq */ 1300 eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1); 1301 1302 return 0; 1303 1304 err_out_unmap: 1305 while (i > 0) 1306 mlx4_free_eq(dev, &priv->eq_table.eq[--i]); 1307 #ifdef CONFIG_RFS_ACCEL 1308 for (i = 1; i <= dev->caps.num_ports; i++) { 1309 if (mlx4_priv(dev)->port[i].rmap) { 1310 free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); 1311 mlx4_priv(dev)->port[i].rmap = NULL; 1312 } 1313 } 1314 #endif 1315 mlx4_free_irqs(dev); 1316 1317 err_out_clr_int: 1318 if (!mlx4_is_slave(dev)) 1319 mlx4_unmap_clr_int(dev); 1320 1321 err_out_bitmap: 1322 mlx4_unmap_uar(dev); 1323 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1324 1325 err_out_free: 1326 kfree(priv->eq_table.uar_map); 1327 1328 return err; 1329 } 1330 1331 void mlx4_cleanup_eq_table(struct mlx4_dev *dev) 1332 { 1333 struct mlx4_priv *priv = mlx4_priv(dev); 1334 int i; 1335 1336 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, 1337 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); 1338 1339 #ifdef CONFIG_RFS_ACCEL 1340 for (i = 1; i <= dev->caps.num_ports; i++) { 1341 if (mlx4_priv(dev)->port[i].rmap) { 1342 free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); 1343 mlx4_priv(dev)->port[i].rmap = NULL; 1344 } 1345 } 1346 #endif 1347 mlx4_free_irqs(dev); 1348 1349 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 1350 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 1351 1352 if (!mlx4_is_slave(dev)) 1353 mlx4_unmap_clr_int(dev); 1354 1355 mlx4_unmap_uar(dev); 1356 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1357 1358 kfree(priv->eq_table.uar_map); 1359 } 1360 1361 /* A test that verifies that we can accept interrupts 1362 * on the vector allocated for asynchronous events 1363 */ 1364 int mlx4_test_async(struct mlx4_dev *dev) 1365 { 1366 return mlx4_NOP(dev); 1367 } 1368 EXPORT_SYMBOL(mlx4_test_async); 1369 1370 /* A test that verifies that we can accept interrupts 1371 * on the given irq vector of the tested port. 1372 * Interrupts are checked using the NOP command. 1373 */ 1374 int mlx4_test_interrupt(struct mlx4_dev *dev, int vector) 1375 { 1376 struct mlx4_priv *priv = mlx4_priv(dev); 1377 int err; 1378 1379 /* Temporary use polling for command completions */ 1380 mlx4_cmd_use_polling(dev); 1381 1382 /* Map the new eq to handle all asynchronous events */ 1383 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1384 priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn); 1385 if (err) { 1386 mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); 1387 goto out; 1388 } 1389 1390 /* Go back to using events */ 1391 mlx4_cmd_use_events(dev); 1392 err = mlx4_NOP(dev); 1393 1394 /* Return to default */ 1395 mlx4_cmd_use_polling(dev); 1396 out: 1397 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1398 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); 1399 mlx4_cmd_use_events(dev); 1400 1401 return err; 1402 } 1403 EXPORT_SYMBOL(mlx4_test_interrupt); 1404 1405 bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) 1406 { 1407 struct mlx4_priv *priv = mlx4_priv(dev); 1408 1409 vector = MLX4_CQ_TO_EQ_VECTOR(vector); 1410 if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) || 1411 (vector == MLX4_EQ_ASYNC)) 1412 return false; 1413 1414 return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports); 1415 } 1416 EXPORT_SYMBOL(mlx4_is_eq_vector_valid); 1417 1418 u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port) 1419 { 1420 struct mlx4_priv *priv = mlx4_priv(dev); 1421 unsigned int i; 1422 unsigned int sum = 0; 1423 1424 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) 1425 sum += !!test_bit(port - 1, 1426 priv->eq_table.eq[i].actv_ports.ports); 1427 1428 return sum; 1429 } 1430 EXPORT_SYMBOL(mlx4_get_eqs_per_port); 1431 1432 int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector) 1433 { 1434 struct mlx4_priv *priv = mlx4_priv(dev); 1435 1436 vector = MLX4_CQ_TO_EQ_VECTOR(vector); 1437 if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1)) 1438 return -EINVAL; 1439 1440 return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports, 1441 dev->caps.num_ports) > 1); 1442 } 1443 EXPORT_SYMBOL(mlx4_is_eq_shared); 1444 1445 struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port) 1446 { 1447 return mlx4_priv(dev)->port[port].rmap; 1448 } 1449 EXPORT_SYMBOL(mlx4_get_cpu_rmap); 1450 1451 int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector) 1452 { 1453 struct mlx4_priv *priv = mlx4_priv(dev); 1454 int err = 0, i = 0; 1455 u32 min_ref_count_val = (u32)-1; 1456 int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector); 1457 int *prequested_vector = NULL; 1458 1459 1460 mutex_lock(&priv->msix_ctl.pool_lock); 1461 if (requested_vector < (dev->caps.num_comp_vectors + 1) && 1462 (requested_vector >= 0) && 1463 (requested_vector != MLX4_EQ_ASYNC)) { 1464 if (test_bit(port - 1, 1465 priv->eq_table.eq[requested_vector].actv_ports.ports)) { 1466 prequested_vector = &requested_vector; 1467 } else { 1468 struct mlx4_eq *eq; 1469 1470 for (i = 1; i < port; 1471 requested_vector += mlx4_get_eqs_per_port(dev, i++)) 1472 ; 1473 1474 eq = &priv->eq_table.eq[requested_vector]; 1475 if (requested_vector < dev->caps.num_comp_vectors + 1 && 1476 test_bit(port - 1, eq->actv_ports.ports)) { 1477 prequested_vector = &requested_vector; 1478 } 1479 } 1480 } 1481 1482 if (!prequested_vector) { 1483 requested_vector = -1; 1484 for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1; 1485 i++) { 1486 struct mlx4_eq *eq = &priv->eq_table.eq[i]; 1487 1488 if (min_ref_count_val > eq->ref_count && 1489 test_bit(port - 1, eq->actv_ports.ports)) { 1490 min_ref_count_val = eq->ref_count; 1491 requested_vector = i; 1492 } 1493 } 1494 1495 if (requested_vector < 0) { 1496 err = -ENOSPC; 1497 goto err_unlock; 1498 } 1499 1500 prequested_vector = &requested_vector; 1501 } 1502 1503 if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) && 1504 dev->flags & MLX4_FLAG_MSI_X) { 1505 set_bit(*prequested_vector, priv->msix_ctl.pool_bm); 1506 snprintf(priv->eq_table.irq_names + 1507 *prequested_vector * MLX4_IRQNAME_SIZE, 1508 MLX4_IRQNAME_SIZE, "mlx4-%d@%s", 1509 *prequested_vector, dev_name(&dev->persist->pdev->dev)); 1510 1511 err = request_irq(priv->eq_table.eq[*prequested_vector].irq, 1512 mlx4_msi_x_interrupt, 0, 1513 &priv->eq_table.irq_names[*prequested_vector << 5], 1514 priv->eq_table.eq + *prequested_vector); 1515 1516 if (err) { 1517 clear_bit(*prequested_vector, priv->msix_ctl.pool_bm); 1518 *prequested_vector = -1; 1519 } else { 1520 #if defined(CONFIG_SMP) 1521 mlx4_set_eq_affinity_hint(priv, *prequested_vector); 1522 #endif 1523 eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1); 1524 priv->eq_table.eq[*prequested_vector].have_irq = 1; 1525 } 1526 } 1527 1528 if (!err && *prequested_vector >= 0) 1529 priv->eq_table.eq[*prequested_vector].ref_count++; 1530 1531 err_unlock: 1532 mutex_unlock(&priv->msix_ctl.pool_lock); 1533 1534 if (!err && *prequested_vector >= 0) 1535 *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector); 1536 else 1537 *vector = 0; 1538 1539 return err; 1540 } 1541 EXPORT_SYMBOL(mlx4_assign_eq); 1542 1543 int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec) 1544 { 1545 struct mlx4_priv *priv = mlx4_priv(dev); 1546 1547 return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq; 1548 } 1549 EXPORT_SYMBOL(mlx4_eq_get_irq); 1550 1551 void mlx4_release_eq(struct mlx4_dev *dev, int vec) 1552 { 1553 struct mlx4_priv *priv = mlx4_priv(dev); 1554 int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec); 1555 1556 mutex_lock(&priv->msix_ctl.pool_lock); 1557 priv->eq_table.eq[eq_vec].ref_count--; 1558 1559 /* once we allocated EQ, we don't release it because it might be binded 1560 * to cpu_rmap. 1561 */ 1562 mutex_unlock(&priv->msix_ctl.pool_lock); 1563 } 1564 EXPORT_SYMBOL(mlx4_release_eq); 1565 1566