Lines Matching +full:mailbox +full:-

15  *      - Redistributions of source code must retain the above
19 * - Redistributions in binary form must reproduce the above
45 return 1 << dev->oper_log_mgm_entry_size; in mlx4_get_mgm_entry_size()
50 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); in mlx4_get_qp_per_mgm()
54 struct mlx4_cmd_mailbox *mailbox, in mlx4_QP_FLOW_STEERING_ATTACH() argument
61 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, in mlx4_QP_FLOW_STEERING_ATTACH()
83 struct mlx4_cmd_mailbox *mailbox) in mlx4_READ_ENTRY() argument
85 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, in mlx4_READ_ENTRY()
90 struct mlx4_cmd_mailbox *mailbox) in mlx4_WRITE_ENTRY() argument
92 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, in mlx4_WRITE_ENTRY()
97 struct mlx4_cmd_mailbox *mailbox) in mlx4_WRITE_PROMISC() argument
102 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, in mlx4_WRITE_PROMISC()
107 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, in mlx4_GID_HASH() argument
113 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, in mlx4_GID_HASH()
130 if (port < 1 || port > dev->caps.num_ports) in get_promisc_qp()
133 s_steer = &mlx4_priv(dev)->steer[port - 1]; in get_promisc_qp()
135 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { in get_promisc_qp()
136 if (pqp->qpn == qpn) in get_promisc_qp()
152 struct mlx4_cmd_mailbox *mailbox; in new_steering_entry() local
161 if (port < 1 || port > dev->caps.num_ports) in new_steering_entry()
162 return -EINVAL; in new_steering_entry()
164 s_steer = &mlx4_priv(dev)->steer[port - 1]; in new_steering_entry()
167 return -ENOMEM; in new_steering_entry()
169 INIT_LIST_HEAD(&new_entry->duplicates); in new_steering_entry()
170 new_entry->index = index; in new_steering_entry()
171 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); in new_steering_entry()
180 err = -ENOMEM; in new_steering_entry()
183 dqp->qpn = qpn; in new_steering_entry()
184 list_add_tail(&dqp->list, &new_entry->duplicates); in new_steering_entry()
188 if (list_empty(&s_steer->promisc_qps[steer])) in new_steering_entry()
194 mailbox = mlx4_alloc_cmd_mailbox(dev); in new_steering_entry()
195 if (IS_ERR(mailbox)) { in new_steering_entry()
196 err = -ENOMEM; in new_steering_entry()
199 mgm = mailbox->buf; in new_steering_entry()
201 err = mlx4_READ_ENTRY(dev, index, mailbox); in new_steering_entry()
205 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; in new_steering_entry()
206 prot = be32_to_cpu(mgm->members_count) >> 30; in new_steering_entry()
207 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { in new_steering_entry()
209 if (pqp->qpn == qpn) in new_steering_entry()
211 if (members_count == dev->caps.num_qp_per_mgm) { in new_steering_entry()
213 err = -ENOMEM; in new_steering_entry()
218 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); in new_steering_entry()
221 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); in new_steering_entry()
222 err = mlx4_WRITE_ENTRY(dev, index, mailbox); in new_steering_entry()
225 mlx4_free_cmd_mailbox(dev, mailbox); in new_steering_entry()
230 list_del(&dqp->list); in new_steering_entry()
233 list_del(&new_entry->list); in new_steering_entry()
248 if (port < 1 || port > dev->caps.num_ports) in existing_steering_entry()
249 return -EINVAL; in existing_steering_entry()
251 s_steer = &mlx4_priv(dev)->steer[port - 1]; in existing_steering_entry()
257 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { in existing_steering_entry()
258 if (tmp_entry->index == index) { in existing_steering_entry()
265 return -EINVAL; in existing_steering_entry()
271 list_for_each_entry(dqp, &entry->duplicates, list) { in existing_steering_entry()
272 if (qpn == dqp->qpn) in existing_steering_entry()
279 return -ENOMEM; in existing_steering_entry()
280 dqp->qpn = qpn; in existing_steering_entry()
281 list_add_tail(&dqp->list, &entry->duplicates); in existing_steering_entry()
296 if (port < 1 || port > dev->caps.num_ports) in check_duplicate_entry()
299 s_steer = &mlx4_priv(dev)->steer[port - 1]; in check_duplicate_entry()
307 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { in check_duplicate_entry()
308 if (tmp_entry->index == index) { in check_duplicate_entry()
317 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { in check_duplicate_entry()
318 if (dqp->qpn == qpn) { in check_duplicate_entry()
319 list_del(&dqp->list); in check_duplicate_entry()
334 struct mlx4_cmd_mailbox *mailbox; in promisc_steering_entry() local
340 if (port < 1 || port > dev->caps.num_ports) in promisc_steering_entry()
343 mailbox = mlx4_alloc_cmd_mailbox(dev); in promisc_steering_entry()
344 if (IS_ERR(mailbox)) in promisc_steering_entry()
346 mgm = mailbox->buf; in promisc_steering_entry()
348 if (mlx4_READ_ENTRY(dev, index, mailbox)) in promisc_steering_entry()
350 m_count = be32_to_cpu(mgm->members_count) & 0xffffff; in promisc_steering_entry()
355 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; in promisc_steering_entry()
363 mlx4_free_cmd_mailbox(dev, mailbox); in promisc_steering_entry()
377 if (port < 1 || port > dev->caps.num_ports) in can_remove_steering_entry()
380 s_steer = &mlx4_priv(dev)->steer[port - 1]; in can_remove_steering_entry()
389 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { in can_remove_steering_entry()
390 if (entry->index == index) { in can_remove_steering_entry()
391 if (list_empty(&entry->duplicates) || in can_remove_steering_entry()
398 list_del(&entry->list); in can_remove_steering_entry()
400 &entry->duplicates, in can_remove_steering_entry()
402 list_del(&pqp->list); in can_remove_steering_entry()
422 struct mlx4_cmd_mailbox *mailbox; in add_promisc_qp() local
434 if (port < 1 || port > dev->caps.num_ports) in add_promisc_qp()
435 return -EINVAL; in add_promisc_qp()
437 s_steer = &mlx4_priv(dev)->steer[port - 1]; in add_promisc_qp()
439 mutex_lock(&priv->mcg_table.mutex); in add_promisc_qp()
448 err = -ENOMEM; in add_promisc_qp()
451 pqp->qpn = qpn; in add_promisc_qp()
453 mailbox = mlx4_alloc_cmd_mailbox(dev); in add_promisc_qp()
454 if (IS_ERR(mailbox)) { in add_promisc_qp()
455 err = -ENOMEM; in add_promisc_qp()
458 mgm = mailbox->buf; in add_promisc_qp()
466 &s_steer->steer_entries[steer], in add_promisc_qp()
468 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); in add_promisc_qp()
472 members_count = be32_to_cpu(mgm->members_count) & in add_promisc_qp()
474 prot = be32_to_cpu(mgm->members_count) >> 30; in add_promisc_qp()
477 if ((be32_to_cpu(mgm->qp[i]) & in add_promisc_qp()
484 err = -ENOMEM; in add_promisc_qp()
487 dqp->qpn = qpn; in add_promisc_qp()
488 list_add_tail(&dqp->list, in add_promisc_qp()
489 &entry->duplicates); in add_promisc_qp()
496 dev->caps.num_qp_per_mgm) { in add_promisc_qp()
498 err = -ENOMEM; in add_promisc_qp()
501 mgm->qp[members_count++] = in add_promisc_qp()
503 mgm->members_count = in add_promisc_qp()
506 err = mlx4_WRITE_ENTRY(dev, entry->index, in add_promisc_qp()
507 mailbox); in add_promisc_qp()
515 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); in add_promisc_qp()
519 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) { in add_promisc_qp()
520 if (members_count == dev->caps.num_qp_per_mgm) { in add_promisc_qp()
522 err = -ENOMEM; in add_promisc_qp()
525 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); in add_promisc_qp()
527 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); in add_promisc_qp()
529 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); in add_promisc_qp()
533 mlx4_free_cmd_mailbox(dev, mailbox); in add_promisc_qp()
534 mutex_unlock(&priv->mcg_table.mutex); in add_promisc_qp()
538 list_del(&pqp->list); in add_promisc_qp()
540 mlx4_free_cmd_mailbox(dev, mailbox); in add_promisc_qp()
544 mutex_unlock(&priv->mcg_table.mutex); in add_promisc_qp()
553 struct mlx4_cmd_mailbox *mailbox; in remove_promisc_qp() local
564 if (port < 1 || port > dev->caps.num_ports) in remove_promisc_qp()
565 return -EINVAL; in remove_promisc_qp()
567 s_steer = &mlx4_priv(dev)->steer[port - 1]; in remove_promisc_qp()
568 mutex_lock(&priv->mcg_table.mutex); in remove_promisc_qp()
579 list_del(&pqp->list); in remove_promisc_qp()
582 mailbox = mlx4_alloc_cmd_mailbox(dev); in remove_promisc_qp()
583 if (IS_ERR(mailbox)) { in remove_promisc_qp()
584 err = -ENOMEM; in remove_promisc_qp()
588 mgm = mailbox->buf; in remove_promisc_qp()
590 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) in remove_promisc_qp()
591 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); in remove_promisc_qp()
592 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); in remove_promisc_qp()
594 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); in remove_promisc_qp()
601 &s_steer->steer_entries[steer], in remove_promisc_qp()
604 list_for_each_entry(dqp, &entry->duplicates, list) { in remove_promisc_qp()
605 if (dqp->qpn == qpn) { in remove_promisc_qp()
614 list_del(&dqp->list); in remove_promisc_qp()
617 int loc = -1; in remove_promisc_qp()
620 entry->index, in remove_promisc_qp()
621 mailbox); in remove_promisc_qp()
625 be32_to_cpu(mgm->members_count) & in remove_promisc_qp()
629 qpn, entry->index); in remove_promisc_qp()
630 list_del(&entry->list); in remove_promisc_qp()
636 if ((be32_to_cpu(mgm->qp[i]) & in remove_promisc_qp()
644 qpn, entry->index); in remove_promisc_qp()
645 err = -EINVAL; in remove_promisc_qp()
652 mgm->qp[loc] = mgm->qp[members_count - 1]; in remove_promisc_qp()
653 mgm->qp[members_count - 1] = 0; in remove_promisc_qp()
654 mgm->members_count = in remove_promisc_qp()
655 cpu_to_be32(--members_count | in remove_promisc_qp()
659 entry->index, in remove_promisc_qp()
660 mailbox); in remove_promisc_qp()
668 mlx4_free_cmd_mailbox(dev, mailbox); in remove_promisc_qp()
671 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); in remove_promisc_qp()
675 mutex_unlock(&priv->mcg_table.mutex); in remove_promisc_qp()
685 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
691 * If no AMGM exists for given gid, *index = -1, *prev = index of last
699 struct mlx4_cmd_mailbox *mailbox; in find_entry() local
700 struct mlx4_mgm *mgm = mgm_mailbox->buf; in find_entry()
705 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; in find_entry()
707 mailbox = mlx4_alloc_cmd_mailbox(dev); in find_entry()
708 if (IS_ERR(mailbox)) in find_entry()
709 return -ENOMEM; in find_entry()
710 mgid = mailbox->buf; in find_entry()
714 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); in find_entry()
715 mlx4_free_cmd_mailbox(dev, mailbox); in find_entry()
723 *prev = -1; in find_entry()
730 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { in find_entry()
733 err = -EINVAL; in find_entry()
738 if (!memcmp(mgm->gid, gid, 16) && in find_entry()
739 be32_to_cpu(mgm->members_count) >> 30 == prot) in find_entry()
743 *index = be32_to_cpu(mgm->next_gid_index) >> 6; in find_entry()
746 *index = -1; in find_entry()
765 return -EINVAL; in mlx4_map_sw_to_hw_steering_mode()
776 flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; in trans_rule_ctrl_to_hw()
777 flags |= ctrl->exclusive ? (1 << 2) : 0; in trans_rule_ctrl_to_hw()
778 flags |= ctrl->allow_loopback ? (1 << 3) : 0; in trans_rule_ctrl_to_hw()
780 hw->flags = flags; in trans_rule_ctrl_to_hw()
781 hw->type = __promisc_mode[ctrl->promisc_mode]; in trans_rule_ctrl_to_hw()
782 hw->prio = cpu_to_be16(ctrl->priority); in trans_rule_ctrl_to_hw()
783 hw->port = ctrl->port; in trans_rule_ctrl_to_hw()
784 hw->qpn = cpu_to_be32(ctrl->qpn); in trans_rule_ctrl_to_hw()
802 return -EINVAL; in mlx4_map_sw_to_hw_steering_id()
829 return -EINVAL; in mlx4_hw_rule_sz()
839 if (mlx4_hw_rule_sz(dev, spec->id) < 0) in parse_trans_rule()
840 return -EINVAL; in parse_trans_rule()
841 memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id)); in parse_trans_rule()
842 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); in parse_trans_rule()
843 rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2; in parse_trans_rule()
845 switch (spec->id) { in parse_trans_rule()
847 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); in parse_trans_rule()
848 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, in parse_trans_rule()
850 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); in parse_trans_rule()
851 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, in parse_trans_rule()
853 if (spec->eth.ether_type_enable) { in parse_trans_rule()
854 rule_hw->eth.ether_type_enable = 1; in parse_trans_rule()
855 rule_hw->eth.ether_type = spec->eth.ether_type; in parse_trans_rule()
857 rule_hw->eth.vlan_tag = spec->eth.vlan_id; in parse_trans_rule()
858 rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; in parse_trans_rule()
862 rule_hw->ib.l3_qpn = spec->ib.l3_qpn; in parse_trans_rule()
863 rule_hw->ib.qpn_mask = spec->ib.qpn_msk; in parse_trans_rule()
864 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); in parse_trans_rule()
865 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); in parse_trans_rule()
869 return -EOPNOTSUPP; in parse_trans_rule()
872 rule_hw->ipv4.src_ip = spec->ipv4.src_ip; in parse_trans_rule()
873 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; in parse_trans_rule()
874 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; in parse_trans_rule()
875 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; in parse_trans_rule()
880 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; in parse_trans_rule()
881 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; in parse_trans_rule()
882 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; in parse_trans_rule()
883 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; in parse_trans_rule()
887 rule_hw->vxlan.vni = in parse_trans_rule()
888 cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8); in parse_trans_rule()
889 rule_hw->vxlan.vni_mask = in parse_trans_rule()
890 cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8); in parse_trans_rule()
894 return -EINVAL; in parse_trans_rule()
897 return __rule_hw_sz[spec->id]; in parse_trans_rule()
909 len += scnprintf(buf + len, BUF_SIZE - len, in mlx4_err_rule()
911 rule->port, rule->priority, rule->qpn); in mlx4_err_rule()
913 list_for_each_entry(cur, &rule->list, list) { in mlx4_err_rule()
914 switch (cur->id) { in mlx4_err_rule()
916 len += scnprintf(buf + len, BUF_SIZE - len, in mlx4_err_rule()
917 "dmac = %pM ", &cur->eth.dst_mac); in mlx4_err_rule()
918 if (cur->eth.ether_type) in mlx4_err_rule()
919 len += scnprintf(buf + len, BUF_SIZE - len, in mlx4_err_rule()
921 be16_to_cpu(cur->eth.ether_type)); in mlx4_err_rule()
922 if (cur->eth.vlan_id) in mlx4_err_rule()
923 len += scnprintf(buf + len, BUF_SIZE - len, in mlx4_err_rule()
924 "vlan-id = %d ", in mlx4_err_rule()
925 be16_to_cpu(cur->eth.vlan_id)); in mlx4_err_rule()
929 if (cur->ipv4.src_ip) in mlx4_err_rule()
930 len += scnprintf(buf + len, BUF_SIZE - len, in mlx4_err_rule()
931 "src-ip = %pI4 ", in mlx4_err_rule()
932 &cur->ipv4.src_ip); in mlx4_err_rule()
933 if (cur->ipv4.dst_ip) in mlx4_err_rule()
934 len += scnprintf(buf + len, BUF_SIZE - len, in mlx4_err_rule()
935 "dst-ip = %pI4 ", in mlx4_err_rule()
936 &cur->ipv4.dst_ip); in mlx4_err_rule()
941 if (cur->tcp_udp.src_port) in mlx4_err_rule()
942 len += scnprintf(buf + len, BUF_SIZE - len, in mlx4_err_rule()
943 "src-port = %d ", in mlx4_err_rule()
944 be16_to_cpu(cur->tcp_udp.src_port)); in mlx4_err_rule()
945 if (cur->tcp_udp.dst_port) in mlx4_err_rule()
946 len += scnprintf(buf + len, BUF_SIZE - len, in mlx4_err_rule()
947 "dst-port = %d ", in mlx4_err_rule()
948 be16_to_cpu(cur->tcp_udp.dst_port)); in mlx4_err_rule()
952 len += scnprintf(buf + len, BUF_SIZE - len, in mlx4_err_rule()
953 "dst-gid = %pI6\n", cur->ib.dst_gid); in mlx4_err_rule()
954 len += scnprintf(buf + len, BUF_SIZE - len, in mlx4_err_rule()
955 "dst-gid-mask = %pI6\n", in mlx4_err_rule()
956 cur->ib.dst_gid_msk); in mlx4_err_rule()
960 len += scnprintf(buf + len, BUF_SIZE - len, in mlx4_err_rule()
961 "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); in mlx4_err_rule()
970 len += scnprintf(buf + len, BUF_SIZE - len, "\n"); in mlx4_err_rule()
980 struct mlx4_cmd_mailbox *mailbox; in mlx4_flow_attach() local
985 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_flow_attach()
986 if (IS_ERR(mailbox)) in mlx4_flow_attach()
987 return PTR_ERR(mailbox); in mlx4_flow_attach()
989 if (!mlx4_qp_lookup(dev, rule->qpn)) { in mlx4_flow_attach()
991 ret = -EINVAL; in mlx4_flow_attach()
995 trans_rule_ctrl_to_hw(rule, mailbox->buf); in mlx4_flow_attach()
999 list_for_each_entry(cur, &rule->list, list) { in mlx4_flow_attach()
1000 ret = parse_trans_rule(dev, cur, mailbox->buf + size); in mlx4_flow_attach()
1007 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); in mlx4_flow_attach()
1008 if (ret == -ENOMEM) { in mlx4_flow_attach()
1013 if (ret == -ENXIO) { in mlx4_flow_attach()
1014 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) in mlx4_flow_attach()
1031 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_flow_attach()
1109 struct mlx4_cmd_mailbox *mailbox; in mlx4_qp_attach_common() local
1112 int index = -1, prev; in mlx4_qp_attach_common()
1119 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_qp_attach_common()
1120 if (IS_ERR(mailbox)) in mlx4_qp_attach_common()
1121 return PTR_ERR(mailbox); in mlx4_qp_attach_common()
1122 mgm = mailbox->buf; in mlx4_qp_attach_common()
1124 mutex_lock(&priv->mcg_table.mutex); in mlx4_qp_attach_common()
1126 mailbox, &prev, &index); in mlx4_qp_attach_common()
1130 if (index != -1) { in mlx4_qp_attach_common()
1131 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { in mlx4_qp_attach_common()
1133 memcpy(mgm->gid, gid, 16); in mlx4_qp_attach_common()
1138 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); in mlx4_qp_attach_common()
1139 if (index == -1) { in mlx4_qp_attach_common()
1141 err = -ENOMEM; in mlx4_qp_attach_common()
1144 index += dev->caps.num_mgms; in mlx4_qp_attach_common()
1148 memcpy(mgm->gid, gid, 16); in mlx4_qp_attach_common()
1151 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; in mlx4_qp_attach_common()
1152 if (members_count == dev->caps.num_qp_per_mgm) { in mlx4_qp_attach_common()
1154 err = -ENOMEM; in mlx4_qp_attach_common()
1159 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { in mlx4_qp_attach_common()
1160 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); in mlx4_qp_attach_common()
1166 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | in mlx4_qp_attach_common()
1169 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); in mlx4_qp_attach_common()
1171 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); in mlx4_qp_attach_common()
1173 err = mlx4_WRITE_ENTRY(dev, index, mailbox); in mlx4_qp_attach_common()
1180 err = mlx4_READ_ENTRY(dev, prev, mailbox); in mlx4_qp_attach_common()
1184 mgm->next_gid_index = cpu_to_be32(index << 6); in mlx4_qp_attach_common()
1186 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); in mlx4_qp_attach_common()
1191 if (prot == MLX4_PROT_ETH && index != -1) { in mlx4_qp_attach_common()
1195 index, qp->qpn); in mlx4_qp_attach_common()
1198 index, qp->qpn); in mlx4_qp_attach_common()
1200 if (err && link && index != -1) { in mlx4_qp_attach_common()
1201 if (index < dev->caps.num_mgms) in mlx4_qp_attach_common()
1203 index, dev->caps.num_mgms); in mlx4_qp_attach_common()
1205 mlx4_bitmap_free(&priv->mcg_table.bitmap, in mlx4_qp_attach_common()
1206 index - dev->caps.num_mgms, MLX4_USE_RR); in mlx4_qp_attach_common()
1208 mutex_unlock(&priv->mcg_table.mutex); in mlx4_qp_attach_common()
1210 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_qp_attach_common()
1218 struct mlx4_cmd_mailbox *mailbox; in mlx4_qp_detach_common() local
1222 int i, loc = -1; in mlx4_qp_detach_common()
1227 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_qp_detach_common()
1228 if (IS_ERR(mailbox)) in mlx4_qp_detach_common()
1229 return PTR_ERR(mailbox); in mlx4_qp_detach_common()
1230 mgm = mailbox->buf; in mlx4_qp_detach_common()
1232 mutex_lock(&priv->mcg_table.mutex); in mlx4_qp_detach_common()
1235 mailbox, &prev, &index); in mlx4_qp_detach_common()
1239 if (index == -1) { in mlx4_qp_detach_common()
1241 err = -EINVAL; in mlx4_qp_detach_common()
1249 check_duplicate_entry(dev, port, steer, index, qp->qpn) && in mlx4_qp_detach_common()
1250 !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL)) in mlx4_qp_detach_common()
1253 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; in mlx4_qp_detach_common()
1255 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { in mlx4_qp_detach_common()
1260 if (loc == -1) { in mlx4_qp_detach_common()
1261 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); in mlx4_qp_detach_common()
1262 err = -EINVAL; in mlx4_qp_detach_common()
1267 mgm->qp[loc] = mgm->qp[members_count - 1]; in mlx4_qp_detach_common()
1268 mgm->qp[members_count - 1] = 0; in mlx4_qp_detach_common()
1269 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); in mlx4_qp_detach_common()
1273 index, qp->qpn); in mlx4_qp_detach_common()
1275 err = mlx4_WRITE_ENTRY(dev, index, mailbox); in mlx4_qp_detach_common()
1280 mgm->members_count = cpu_to_be32((u32) prot << 30); in mlx4_qp_detach_common()
1282 if (prev == -1) { in mlx4_qp_detach_common()
1284 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; in mlx4_qp_detach_common()
1286 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); in mlx4_qp_detach_common()
1290 memset(mgm->gid, 0, 16); in mlx4_qp_detach_common()
1292 err = mlx4_WRITE_ENTRY(dev, index, mailbox); in mlx4_qp_detach_common()
1297 if (amgm_index < dev->caps.num_mgms) in mlx4_qp_detach_common()
1299 index, amgm_index, dev->caps.num_mgms); in mlx4_qp_detach_common()
1301 mlx4_bitmap_free(&priv->mcg_table.bitmap, in mlx4_qp_detach_common()
1302 amgm_index - dev->caps.num_mgms, MLX4_USE_RR); in mlx4_qp_detach_common()
1306 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; in mlx4_qp_detach_common()
1307 err = mlx4_READ_ENTRY(dev, prev, mailbox); in mlx4_qp_detach_common()
1311 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); in mlx4_qp_detach_common()
1313 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); in mlx4_qp_detach_common()
1317 if (index < dev->caps.num_mgms) in mlx4_qp_detach_common()
1319 prev, index, dev->caps.num_mgms); in mlx4_qp_detach_common()
1321 mlx4_bitmap_free(&priv->mcg_table.bitmap, in mlx4_qp_detach_common()
1322 index - dev->caps.num_mgms, MLX4_USE_RR); in mlx4_qp_detach_common()
1326 mutex_unlock(&priv->mcg_table.mutex); in mlx4_qp_detach_common()
1328 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_qp_detach_common()
1329 if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) in mlx4_qp_detach_common()
1339 struct mlx4_cmd_mailbox *mailbox; in mlx4_QP_ATTACH() local
1344 return -EBADF; in mlx4_QP_ATTACH()
1346 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_QP_ATTACH()
1347 if (IS_ERR(mailbox)) in mlx4_QP_ATTACH()
1348 return PTR_ERR(mailbox); in mlx4_QP_ATTACH()
1350 memcpy(mailbox->buf, gid, 16); in mlx4_QP_ATTACH()
1351 qpn = qp->qpn; in mlx4_QP_ATTACH()
1356 err = mlx4_cmd(dev, mailbox->dma, qpn, attach, in mlx4_QP_ATTACH()
1360 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_QP_ATTACH()
1362 dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) in mlx4_QP_ATTACH()
1384 rule.qpn = qp->qpn; in mlx4_trans_to_dmfs_attach()
1400 return -EINVAL; in mlx4_trans_to_dmfs_attach()
1411 switch (dev->caps.steering_mode) { in mlx4_multicast_attach()
1433 return -EINVAL; in mlx4_multicast_attach()
1441 switch (dev->caps.steering_mode) { in mlx4_multicast_detach()
1461 return -EINVAL; in mlx4_multicast_detach()
1479 regid_p = &dev->regid_promisc_array[port]; in mlx4_flow_steer_promisc_add()
1482 regid_p = &dev->regid_allmulti_array[port]; in mlx4_flow_steer_promisc_add()
1485 return -1; in mlx4_flow_steer_promisc_add()
1489 return -1; in mlx4_flow_steer_promisc_add()
1509 regid_p = &dev->regid_promisc_array[port]; in mlx4_flow_steer_promisc_remove()
1512 regid_p = &dev->regid_allmulti_array[port]; in mlx4_flow_steer_promisc_remove()
1515 return -1; in mlx4_flow_steer_promisc_remove()
1519 return -1; in mlx4_flow_steer_promisc_remove()
1564 u32 qpn = (u32) vhcr->in_param & 0xffffffff; in mlx4_PROMISC_wrapper()
1565 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62); in mlx4_PROMISC_wrapper()
1566 enum mlx4_steer_type steer = vhcr->in_modifier; in mlx4_PROMISC_wrapper()
1569 return -EINVAL; in mlx4_PROMISC_wrapper()
1575 if (vhcr->op_modifier) in mlx4_PROMISC_wrapper()
1631 if (dev->caps.steering_mode == in mlx4_init_mcg_table()
1634 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, in mlx4_init_mcg_table()
1635 dev->caps.num_amgms - 1, 0, 0); in mlx4_init_mcg_table()
1639 mutex_init(&priv->mcg_table.mutex); in mlx4_init_mcg_table()
1646 if (dev->caps.steering_mode != in mlx4_cleanup_mcg_table()
1648 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); in mlx4_cleanup_mcg_table()