/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
H A D | dr_domain.c | 24 dmn->ptrn_mgr = mlx5dr_ptrn_mgr_create(dmn); in dr_domain_init_modify_header_resources() 31 dmn->arg_mgr = mlx5dr_arg_mgr_create(dmn); in dr_domain_init_modify_header_resources() 173 ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn); in dr_domain_init_resources() 179 dmn->uar = mlx5_get_uars_page(dmn->mdev); in dr_domain_init_resources() 211 mlx5_put_uars_page(dmn->mdev, dmn->uar); in dr_domain_init_resources() 213 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn); in dr_domain_init_resources() 220 mlx5dr_send_ring_free(dmn, dmn->send_ring); in dr_domain_uninit_resources() 223 mlx5_put_uars_page(dmn->mdev, dmn->uar); in dr_domain_uninit_resources() 224 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn); in dr_domain_uninit_resources() 468 dmn = kzalloc(sizeof(*dmn), GFP_KERNEL); in mlx5dr_domain_create() [all …]
|
H A D | dr_fw.c | 21 ft_attr.level = dmn->info.caps.max_ft_level - 1; in mlx5dr_fw_create_recalc_cs_ft() 24 ret = mlx5dr_cmd_create_flow_table(dmn->mdev, in mlx5dr_fw_create_recalc_cs_ft() 33 ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev, in mlx5dr_fw_create_recalc_cs_ft() 54 ret = mlx5dr_cmd_set_fte_modify_and_vport(dmn->mdev, in mlx5dr_fw_create_recalc_cs_ft() 73 mlx5dr_cmd_destroy_flow_group(dmn->mdev, in mlx5dr_fw_create_recalc_cs_ft() 86 mlx5dr_cmd_del_flow_table_entry(dmn->mdev, in mlx5dr_fw_destroy_recalc_cs_ft() 90 mlx5dr_cmd_destroy_flow_group(dmn->mdev, in mlx5dr_fw_destroy_recalc_cs_ft() 94 mlx5dr_cmd_destroy_flow_table(dmn->mdev, in mlx5dr_fw_destroy_recalc_cs_ft() 157 mlx5dr_cmd_destroy_flow_table(dmn->mdev, *tbl_id, in mlx5dr_fw_create_md_tbl() 166 mlx5dr_cmd_destroy_flow_group(dmn->mdev, in mlx5dr_fw_destroy_md_tbl() [all …]
|
H A D | dr_table.c | 54 mlx5dr_domain_lock(tbl->dmn); in mlx5dr_table_set_miss_action() 83 mlx5dr_domain_unlock(tbl->dmn); in mlx5dr_table_set_miss_action() 100 mlx5dr_domain_lock(tbl->dmn); in dr_table_uninit() 102 switch (tbl->dmn->type) { in dr_table_uninit() 117 mlx5dr_domain_unlock(tbl->dmn); in dr_table_uninit() 184 mlx5dr_domain_lock(tbl->dmn); in dr_table_init() 186 switch (tbl->dmn->type) { in dr_table_init() 208 mlx5dr_domain_unlock(tbl->dmn); in dr_table_init() 256 refcount_inc(&dmn->refcount); in mlx5dr_table_create() 262 tbl->dmn = dmn; in mlx5dr_table_create() [all …]
|
H A D | dr_action.c | 676 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_action_get_dest_fw_tbl_addr() local 708 if (dest_tbl->tbl->dmn != dmn) { in dr_action_get_dest_sw_tbl_addr() 1021 action->dest_tbl->fw_tbl.dmn = dmn; in mlx5dr_action_create_dest_table_num() 1144 action->range->dmn = dmn; in mlx5dr_action_create_dest_match_range() 1254 action->dest_tbl->fw_tbl.dmn = dmn; in mlx5dr_action_create_mult_dest_tbl() 1285 action->dest_tbl->fw_tbl.dmn = dmn; in mlx5dr_action_create_dest_flow_fw_table() 1335 action->sampler->dmn = dmn; in mlx5dr_action_create_flow_sampler() 1449 action->rewrite->dmn = dmn; in dr_action_create_reformat_action() 1545 action->reformat->dmn = dmn; in mlx5dr_action_create_packet_reformat() 2056 action->rewrite->dmn = dmn; in mlx5dr_action_create_modify_header() [all …]
|
H A D | dr_matcher.c | 117 struct mlx5dr_domain *dmn) in dr_mask_is_tnl_vxlan_gpe() argument 175 struct mlx5dr_domain *dmn) in dr_mask_is_tnl_gtpu() argument 408 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_matcher_set_ste_builders() local 802 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_matcher_uninit() local 804 switch (dmn->type) { in dr_matcher_uninit() 823 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_matcher_set_all_ste_builders() local 841 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_matcher_init_nic() local 900 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_matcher_copy_param() local 929 mlx5dr_dbg(dmn, in dr_matcher_copy_param() 945 struct mlx5dr_domain *dmn = tbl->dmn; in dr_matcher_init() local [all …]
|
H A D | dr_send.c | 1232 dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size); in mlx5dr_send_ring_alloc() 1250 dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr); in mlx5dr_send_ring_alloc() 1257 dmn->send_ring->cq->qp = dmn->send_ring->qp; in mlx5dr_send_ring_alloc() 1263 dmn->send_ring->signal_th = dmn->info.max_send_wr / in mlx5dr_send_ring_alloc() 1285 dmn->send_ring->mr = dr_reg_mr(dmn->mdev, in mlx5dr_send_ring_alloc() 1286 dmn->pdn, dmn->send_ring->buf, size); in mlx5dr_send_ring_alloc() 1299 dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev, in mlx5dr_send_ring_alloc() 1300 dmn->pdn, dmn->send_ring->sync_buff, in mlx5dr_send_ring_alloc() 1312 dr_dereg_mr(dmn->mdev, dmn->send_ring->mr); in mlx5dr_send_ring_alloc() 1316 dr_destroy_qp(dmn->mdev, dmn->send_ring->qp); in mlx5dr_send_ring_alloc() [all …]
|
H A D | dr_definer.c | 40 dr_definer_find_obj(struct mlx5dr_domain *dmn, u16 format_id, in dr_definer_find_obj() argument 46 xa_for_each(&dmn->definers_xa, id, definer_obj) { in dr_definer_find_obj() 67 ret = mlx5dr_cmd_create_definer(dmn->mdev, in dr_definer_create_obj() 100 mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id); in dr_definer_create_obj() 107 static void dr_definer_destroy_obj(struct mlx5dr_domain *dmn, in dr_definer_destroy_obj() argument 110 mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id); in dr_definer_destroy_obj() 111 xa_erase(&dmn->definers_xa, definer_obj->id); in dr_definer_destroy_obj() 125 definer_obj = dr_definer_create_obj(dmn, format_id, in mlx5dr_definer_get() 143 definer_obj = xa_load(&dmn->definers_xa, definer_id); in mlx5dr_definer_put() 145 mlx5dr_err(dmn, "Definer ID %d not found\n", definer_id); in mlx5dr_definer_put() [all …]
|
H A D | dr_rule.c | 62 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_create_collision_htbl() local 198 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_rehash_handle_collision() local 255 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_rehash_copy_ste() local 403 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_rehash_htbl() local 521 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn; in dr_rule_rehash() local 542 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_handle_collision() local 722 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_handle_action_stes() local 788 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_handle_empty_entry() local 841 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_handle_ste_branch() local 1122 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_create_rule_nic() local [all …]
|
H A D | dr_arg.c | 21 struct mlx5dr_domain *dmn; member 27 struct mlx5dr_domain *dmn; member 60 ret = mlx5dr_cmd_create_modify_header_arg(pool->dmn->mdev, in dr_arg_pool_alloc_objs() 62 pool->dmn->pdn, in dr_arg_pool_alloc_objs() 139 pool->dmn = dmn; in dr_arg_pool_create() 208 ret = mlx5dr_send_postsend_args(mgr->dmn, in mlx5dr_arg_get_obj() 212 mlx5dr_err(mgr->dmn, "Failed writing args object\n"); in mlx5dr_arg_get_obj() 230 mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn) in mlx5dr_arg_mgr_create() argument 235 if (!mlx5dr_domain_is_support_ptrn_arg(dmn)) in mlx5dr_arg_mgr_create() 242 pool_mgr->dmn = dmn; in mlx5dr_arg_mgr_create() [all …]
|
H A D | dr_dbg.c | 73 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn; in mlx5dr_dbg_rule_add() local 82 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn; in mlx5dr_dbg_rule_del() local 638 domain_id, dmn->type, dmn->info.caps.gvmi, in dr_dump_domain() 639 dmn->info.supp_sw_steering, in dr_dump_domain() 643 pci_name(dmn->mdev->pdev), in dr_dump_domain() 653 if (dmn->info.supp_sw_steering) { in dr_dump_domain() 668 mlx5dr_domain_lock(dmn); in dr_dump_domain_all() 670 ret = dr_dump_domain(file, dmn); in dr_dump_domain_all() 681 mlx5dr_domain_unlock(dmn); in dr_dump_domain_all() 705 dmn->dump_info.fdb_debugfs = in mlx5dr_dbg_init_dump() [all …]
|
H A D | dr_icm_pool.c | 20 struct mlx5dr_domain *dmn; member 48 struct mlx5dr_domain *dmn; member 113 struct mlx5_core_dev *mdev = pool->dmn->mdev; in dr_icm_pool_mr_create() 123 icm_mr->dmn = pool->dmn; in dr_icm_pool_mr_create() 158 err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn, in dr_icm_pool_mr_create() 190 struct mlx5_core_dev *mdev = icm_mr->dmn->mdev; in dr_icm_pool_mr_destroy() 291 pool->dmn->num_buddies[pool->icm_type]++; in dr_icm_buddy_create() 315 buddy->pool->dmn->num_buddies[icm_type]--; in dr_icm_buddy_destroy() 405 mlx5dr_err(pool->dmn, in dr_icm_handle_buddies_get_mem() 415 mlx5dr_err(pool->dmn, in dr_icm_handle_buddies_get_mem() [all …]
|
H A D | dr_types.h | 26 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg) argument 27 #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg) argument 28 #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg) argument 218 struct mlx5dr_domain *dmn; member 971 struct mlx5dr_domain *dmn; member 1037 struct mlx5dr_domain *dmn; member 1051 struct mlx5dr_domain *dmn; member 1059 struct mlx5dr_domain *dmn; member 1083 struct mlx5dr_domain *dmn; member 1097 struct mlx5dr_domain *dmn; member [all …]
|
H A D | mlx5dr.h | 50 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, 86 mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num); 101 mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, 112 mlx5dr_action_create_flow_sampler(struct mlx5dr_domain *dmn, u32 sampler_id); 118 mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn, 137 mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, 145 mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn, 156 int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id, 159 void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
|
H A D | dr_ptrn.c | 16 struct mlx5dr_domain *dmn; member 94 mgr->dmn->info.caps.hdr_modify_pattern_icm_addr) / in dr_ptrn_alloc_pattern() 165 if (mlx5dr_send_postsend_pattern(mgr->dmn, pattern->chunk, in mlx5dr_ptrn_cache_get_pattern() 197 struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn) in mlx5dr_ptrn_mgr_create() argument 201 if (!mlx5dr_domain_is_support_ptrn_arg(dmn)) in mlx5dr_ptrn_mgr_create() 208 mgr->dmn = dmn; in mlx5dr_ptrn_mgr_create() 209 mgr->ptrn_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_HDR_PTRN); in mlx5dr_ptrn_mgr_create() 211 mlx5dr_err(dmn, "Couldn't get modify-header-pattern memory\n"); in mlx5dr_ptrn_mgr_create()
|
H A D | dr_ste.c | 313 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in mlx5dr_ste_free() local 452 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in mlx5dr_ste_create_next_htbl() local 640 struct mlx5dr_domain *dmn = action->rewrite->dmn; in dr_ste_alloc_modify_hdr_chunk() local 676 struct mlx5dr_domain *dmn = action->rewrite->dmn; in mlx5dr_ste_alloc_modify_hdr() local 686 struct mlx5dr_domain *dmn = action->rewrite->dmn; in mlx5dr_ste_free_modify_hdr() local 699 mlx5dr_err(dmn, in dr_ste_build_pre_check_spec() 705 mlx5dr_err(dmn, in dr_ste_build_pre_check_spec() 723 mlx5dr_err(dmn, in mlx5dr_ste_build_pre_check() 729 mlx5dr_err(dmn, in mlx5dr_ste_build_pre_check() 753 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in mlx5dr_ste_build_ste_arr() local [all …]
|
H A D | dr_dbg.h | 10 void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn); 11 void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn);
|
H A D | dr_ste_v1.h | 20 void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set, 23 void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
|
H A D | dr_ste_v1.c | 623 void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, in dr_ste_v1_set_actions_tx() argument 779 void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, in dr_ste_v1_set_actions_rx() argument 1984 struct mlx5dr_domain *dmn = sb->dmn; in dr_ste_v1_build_src_gvmi_qpn_tag() local 1992 peer = xa_load(&dmn->peer_dmn_xa, id); in dr_ste_v1_build_src_gvmi_qpn_tag() 1994 if (id == dmn->info.caps.gvmi) in dr_ste_v1_build_src_gvmi_qpn_tag() 1995 vport_dmn = dmn; in dr_ste_v1_build_src_gvmi_qpn_tag() 2003 vport_dmn = dmn; in dr_ste_v1_build_src_gvmi_qpn_tag() 2011 mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n", in dr_ste_v1_build_src_gvmi_qpn_tag() 2237 ptrn_mgr = action->rewrite->dmn->ptrn_mgr; in dr_ste_v1_alloc_modify_hdr_ptrn_arg() 2262 mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr, in dr_ste_v1_alloc_modify_hdr_ptrn_arg() [all …]
|
H A D | dr_ste.h | 163 void (*set_actions_rx)(struct mlx5dr_domain *dmn, 169 void (*set_actions_tx)(struct mlx5dr_domain *dmn,
|
H A D | dr_ste_v0.c | 409 dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, in dr_ste_v0_set_actions_tx() argument 467 if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required)) in dr_ste_v0_set_actions_tx() 479 dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, in dr_ste_v0_set_actions_rx() argument 1652 struct mlx5dr_domain *dmn = sb->dmn; in dr_ste_v0_build_src_gvmi_qpn_tag() local 1661 peer = xa_load(&dmn->peer_dmn_xa, id); in dr_ste_v0_build_src_gvmi_qpn_tag() 1663 if (id == dmn->info.caps.gvmi) in dr_ste_v0_build_src_gvmi_qpn_tag() 1664 vport_dmn = dmn; in dr_ste_v0_build_src_gvmi_qpn_tag() 1672 vport_dmn = dmn; in dr_ste_v0_build_src_gvmi_qpn_tag() 1680 mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n", in dr_ste_v0_build_src_gvmi_qpn_tag()
|
/openbmc/linux/drivers/powercap/ |
H A D | intel_rapl_common.c | 1292 int dmn, prim; in rapl_update_domain_data() local 1295 for (dmn = 0; dmn < rp->nr_domains; dmn++) { in rapl_update_domain_data() 1297 rp->domains[dmn].name); in rapl_update_domain_data() 1302 if (!rapl_read_data_raw(&rp->domains[dmn], prim, in rapl_update_domain_data() 1304 rp->domains[dmn].rdd.primitives[prim] = val; in rapl_update_domain_data()
|