Lines Matching refs:dev

56 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)  in mlx4_qp_event()  argument
58 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_qp_event()
63 qp = __mlx4_qp_lookup(dev, qpn); in mlx4_qp_event()
70 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); in mlx4_qp_event()
79 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0) in is_master_qp0() argument
83 u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev); in is_master_qp0()
86 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && in is_master_qp0()
87 qp->qpn <= dev->phys_caps.base_sqpn + 1; in is_master_qp0()
92 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in __mlx4_qp_modify() argument
138 struct mlx4_priv *priv = mlx4_priv(dev); in __mlx4_qp_modify()
150 ret = mlx4_cmd(dev, 0, qp->qpn, 2, in __mlx4_qp_modify()
152 if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR && in __mlx4_qp_modify()
154 is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { in __mlx4_qp_modify()
164 mailbox = mlx4_alloc_cmd_mailbox(dev); in __mlx4_qp_modify()
169 u64 mtt_addr = mlx4_mtt_addr(dev, mtt); in __mlx4_qp_modify()
177 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) in __mlx4_qp_modify()
179 cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn)); in __mlx4_qp_modify()
187 ret = mlx4_cmd(dev, mailbox->dma, in __mlx4_qp_modify()
192 if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { in __mlx4_qp_modify()
209 mlx4_free_cmd_mailbox(dev, mailbox); in __mlx4_qp_modify()
213 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in mlx4_qp_modify() argument
219 return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context, in mlx4_qp_modify()
224 int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, in __mlx4_qp_reserve_range() argument
230 struct mlx4_priv *priv = mlx4_priv(dev); in __mlx4_qp_reserve_range()
252 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, in mlx4_qp_reserve_range() argument
261 flags &= dev->caps.alloc_res_qp_mask; in mlx4_qp_reserve_range()
263 if (mlx4_is_mfunc(dev)) { in mlx4_qp_reserve_range()
266 err = mlx4_cmd_imm(dev, in_param, &out_param, in mlx4_qp_reserve_range()
276 return __mlx4_qp_reserve_range(dev, cnt, align, base, flags); in mlx4_qp_reserve_range()
280 void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) in __mlx4_qp_release_range() argument
282 struct mlx4_priv *priv = mlx4_priv(dev); in __mlx4_qp_release_range()
285 if (mlx4_is_qp_reserved(dev, (u32) base_qpn)) in __mlx4_qp_release_range()
290 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) in mlx4_qp_release_range() argument
298 if (mlx4_is_mfunc(dev)) { in mlx4_qp_release_range()
301 err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE, in mlx4_qp_release_range()
305 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n", in mlx4_qp_release_range()
309 __mlx4_qp_release_range(dev, base_qpn, cnt); in mlx4_qp_release_range()
313 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) in __mlx4_qp_alloc_icm() argument
315 struct mlx4_priv *priv = mlx4_priv(dev); in __mlx4_qp_alloc_icm()
319 err = mlx4_table_get(dev, &qp_table->qp_table, qpn); in __mlx4_qp_alloc_icm()
323 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn); in __mlx4_qp_alloc_icm()
327 err = mlx4_table_get(dev, &qp_table->altc_table, qpn); in __mlx4_qp_alloc_icm()
331 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn); in __mlx4_qp_alloc_icm()
335 err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn); in __mlx4_qp_alloc_icm()
342 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); in __mlx4_qp_alloc_icm()
345 mlx4_table_put(dev, &qp_table->altc_table, qpn); in __mlx4_qp_alloc_icm()
348 mlx4_table_put(dev, &qp_table->auxc_table, qpn); in __mlx4_qp_alloc_icm()
351 mlx4_table_put(dev, &qp_table->qp_table, qpn); in __mlx4_qp_alloc_icm()
357 static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) in mlx4_qp_alloc_icm() argument
361 if (mlx4_is_mfunc(dev)) { in mlx4_qp_alloc_icm()
363 return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM, in mlx4_qp_alloc_icm()
367 return __mlx4_qp_alloc_icm(dev, qpn); in mlx4_qp_alloc_icm()
370 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) in __mlx4_qp_free_icm() argument
372 struct mlx4_priv *priv = mlx4_priv(dev); in __mlx4_qp_free_icm()
375 mlx4_table_put(dev, &qp_table->cmpt_table, qpn); in __mlx4_qp_free_icm()
376 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); in __mlx4_qp_free_icm()
377 mlx4_table_put(dev, &qp_table->altc_table, qpn); in __mlx4_qp_free_icm()
378 mlx4_table_put(dev, &qp_table->auxc_table, qpn); in __mlx4_qp_free_icm()
379 mlx4_table_put(dev, &qp_table->qp_table, qpn); in __mlx4_qp_free_icm()
382 static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) in mlx4_qp_free_icm() argument
386 if (mlx4_is_mfunc(dev)) { in mlx4_qp_free_icm()
388 if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM, in mlx4_qp_free_icm()
391 mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn); in mlx4_qp_free_icm()
393 __mlx4_qp_free_icm(dev, qpn); in mlx4_qp_free_icm()
396 struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) in mlx4_qp_lookup() argument
398 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_qp_lookup()
403 qp = __mlx4_qp_lookup(dev, qpn); in mlx4_qp_lookup()
409 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) in mlx4_qp_alloc() argument
411 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_qp_alloc()
420 err = mlx4_qp_alloc_icm(dev, qpn); in mlx4_qp_alloc()
425 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & in mlx4_qp_alloc()
426 (dev->caps.num_qps - 1), qp); in mlx4_qp_alloc()
437 mlx4_qp_free_icm(dev, qpn); in mlx4_qp_alloc()
443 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, in mlx4_update_qp() argument
456 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_update_qp()
468 if (!(dev->caps.flags2 in mlx4_update_qp()
470 mlx4_warn(dev, in mlx4_update_qp()
496 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) { in mlx4_update_qp()
497 mlx4_warn(dev, "Granular QoS per VF is not enabled\n"); in mlx4_update_qp()
509 err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0, in mlx4_update_qp()
513 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_update_qp()
518 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) in mlx4_qp_remove() argument
520 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_qp_remove()
524 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); in mlx4_qp_remove()
529 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) in mlx4_qp_free() argument
534 mlx4_qp_free_icm(dev, qp->qpn); in mlx4_qp_free()
538 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) in mlx4_CONF_SPECIAL_QP() argument
540 return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, in mlx4_CONF_SPECIAL_QP()
548 static int mlx4_create_zones(struct mlx4_dev *dev, in mlx4_create_zones() argument
555 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_create_zones()
574 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps, in mlx4_create_zones()
594 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], in mlx4_create_zones()
611 last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; in mlx4_create_zones()
739 static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev) in mlx4_cleanup_qp_zones() argument
741 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_cleanup_qp_zones()
766 int mlx4_init_qp_table(struct mlx4_dev *dev) in mlx4_init_qp_table() argument
768 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_init_qp_table()
775 u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base + in mlx4_init_qp_table()
776 dev->caps.dmfs_high_rate_qpn_range; in mlx4_init_qp_table()
779 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); in mlx4_init_qp_table()
780 if (mlx4_is_slave(dev)) in mlx4_init_qp_table()
791 fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k]; in mlx4_init_qp_table()
799 dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8); in mlx4_init_qp_table()
804 int last_base = dev->caps.num_qps; in mlx4_init_qp_table()
811 if (dev->caps.reserved_qps_cnt[sort[j]] > in mlx4_init_qp_table()
812 dev->caps.reserved_qps_cnt[sort[j - 1]]) in mlx4_init_qp_table()
818 last_base -= dev->caps.reserved_qps_cnt[sort[i]]; in mlx4_init_qp_table()
819 dev->caps.reserved_qps_base[sort[i]] = last_base; in mlx4_init_qp_table()
821 dev->caps.reserved_qps_cnt[sort[i]]; in mlx4_init_qp_table()
835 reserved_from_bot = mlx4_num_reserved_sqps(dev); in mlx4_init_qp_table()
836 if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) { in mlx4_init_qp_table()
837 mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n"); in mlx4_init_qp_table()
841 err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot, in mlx4_init_qp_table()
849 if (mlx4_is_mfunc(dev)) { in mlx4_init_qp_table()
851 dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8; in mlx4_init_qp_table()
852 dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX; in mlx4_init_qp_table()
856 dev->caps.spec_qps = kcalloc(dev->caps.num_ports, in mlx4_init_qp_table()
857 sizeof(*dev->caps.spec_qps), in mlx4_init_qp_table()
859 if (!dev->caps.spec_qps) { in mlx4_init_qp_table()
864 for (k = 0; k < dev->caps.num_ports; k++) { in mlx4_init_qp_table()
865 dev->caps.spec_qps[k].qp0_proxy = dev->phys_caps.base_proxy_sqpn + in mlx4_init_qp_table()
866 8 * mlx4_master_func_num(dev) + k; in mlx4_init_qp_table()
867 dev->caps.spec_qps[k].qp0_tunnel = dev->caps.spec_qps[k].qp0_proxy + 8 * MLX4_MFUNC_MAX; in mlx4_init_qp_table()
868 dev->caps.spec_qps[k].qp1_proxy = dev->phys_caps.base_proxy_sqpn + in mlx4_init_qp_table()
869 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k; in mlx4_init_qp_table()
870 dev->caps.spec_qps[k].qp1_tunnel = dev->caps.spec_qps[k].qp1_proxy + 8 * MLX4_MFUNC_MAX; in mlx4_init_qp_table()
875 err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn); in mlx4_init_qp_table()
882 kfree(dev->caps.spec_qps); in mlx4_init_qp_table()
883 dev->caps.spec_qps = NULL; in mlx4_init_qp_table()
884 mlx4_cleanup_qp_zones(dev); in mlx4_init_qp_table()
888 void mlx4_cleanup_qp_table(struct mlx4_dev *dev) in mlx4_cleanup_qp_table() argument
890 if (mlx4_is_slave(dev)) in mlx4_cleanup_qp_table()
893 mlx4_CONF_SPECIAL_QP(dev, 0); in mlx4_cleanup_qp_table()
895 mlx4_cleanup_qp_zones(dev); in mlx4_cleanup_qp_table()
898 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, in mlx4_qp_query() argument
904 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_qp_query()
908 err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, in mlx4_qp_query()
914 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_qp_query()
919 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in mlx4_qp_to_ready() argument
937 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], in mlx4_qp_to_ready()
940 mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n", in mlx4_qp_to_ready()
952 u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn) in mlx4_qp_roce_entropy() argument
959 err = mlx4_qp_query(dev, &qp, &context); in mlx4_qp_roce_entropy()