1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c 3 * Copyright (c) 2018 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the names of the copyright holders nor the names of its 15 * contributors may be used to endorse or promote products derived from 16 * this software without specific prior written permission. 17 * 18 * Alternatively, this software may be distributed under the terms of the 19 * GNU General Public License ("GPL") version 2 as published by the Free 20 * Software Foundation. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <linux/kernel.h> 36 37 #include "spectrum.h" 38 #include "spectrum_acl_tcam.h" 39 #include "core_acl_flex_actions.h" 40 41 struct mlxsw_sp2_acl_tcam { 42 u32 kvdl_index; 43 unsigned int kvdl_count; 44 }; 45 46 struct mlxsw_sp2_acl_tcam_region { 47 struct mlxsw_sp_acl_ctcam_region cregion; 48 }; 49 50 struct mlxsw_sp2_acl_tcam_chunk { 51 struct mlxsw_sp_acl_ctcam_chunk cchunk; 52 }; 53 54 struct mlxsw_sp2_acl_tcam_entry { 55 struct mlxsw_sp_acl_ctcam_entry centry; 56 struct mlxsw_afa_block *act_block; 57 }; 58 59 static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, 60 struct mlxsw_sp_acl_tcam *_tcam) 61 { 62 struct mlxsw_sp2_acl_tcam *tcam = priv; 63 struct mlxsw_afa_block *afa_block; 64 char pefa_pl[MLXSW_REG_PEFA_LEN]; 65 char pgcr_pl[MLXSW_REG_PGCR_LEN]; 66 char *enc_actions; 67 int i; 68 int err; 69 70 tcam->kvdl_count = _tcam->max_regions; 71 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, 72 tcam->kvdl_count, &tcam->kvdl_index); 73 if (err) 74 return err; 75 76 /* Create flex action block, set default action (continue) 77 * but don't commit. We need just the current set encoding 78 * to be written using PEFA register to all indexes for all regions. 79 */ 80 afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); 81 if (!afa_block) { 82 err = -ENOMEM; 83 goto err_afa_block; 84 } 85 err = mlxsw_afa_block_continue(afa_block); 86 if (WARN_ON(err)) 87 goto err_afa_block_continue; 88 enc_actions = mlxsw_afa_block_cur_set(afa_block); 89 90 for (i = 0; i < tcam->kvdl_count; i++) { 91 mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i, 92 true, enc_actions); 93 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); 94 if (err) 95 goto err_pefa_write; 96 } 97 mlxsw_reg_pgcr_pack(pgcr_pl, tcam->kvdl_index); 98 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pgcr), pgcr_pl); 99 if (err) 100 goto err_pgcr_write; 101 102 mlxsw_afa_block_destroy(afa_block); 103 return 0; 104 105 err_pgcr_write: 106 err_pefa_write: 107 err_afa_block_continue: 108 mlxsw_afa_block_destroy(afa_block); 109 err_afa_block: 110 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, 111 tcam->kvdl_count, tcam->kvdl_index); 112 return err; 113 } 114 115 static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) 116 { 117 struct mlxsw_sp2_acl_tcam *tcam = priv; 118 119 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, 120 tcam->kvdl_count, tcam->kvdl_index); 121 } 122 123 static int 124 mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, 125 struct mlxsw_sp_acl_tcam_region *_region) 126 { 127 struct mlxsw_sp2_acl_tcam_region *region = region_priv; 128 int err; 129 130 err = mlxsw_sp_acl_atcam_region_init(mlxsw_sp, _region); 131 if (err) 132 return err; 133 return mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, ®ion->cregion, 134 _region); 135 } 136 137 static void 138 mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv) 139 { 140 struct mlxsw_sp2_acl_tcam_region *region = region_priv; 141 142 mlxsw_sp_acl_ctcam_region_fini(®ion->cregion); 143 } 144 145 static int 146 mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp, 147 struct mlxsw_sp_acl_tcam_region *region) 148 { 149 return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id); 150 } 151 152 static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, 153 unsigned int priority) 154 { 155 struct mlxsw_sp2_acl_tcam_region *region = region_priv; 156 struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; 157 158 mlxsw_sp_acl_ctcam_chunk_init(®ion->cregion, &chunk->cchunk, 159 priority); 160 } 161 162 static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv) 163 { 164 struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; 165 166 mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk); 167 } 168 169 static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, 170 void *region_priv, void *chunk_priv, 171 void *entry_priv, 172 struct mlxsw_sp_acl_rule_info *rulei) 173 { 174 struct mlxsw_sp2_acl_tcam_region *region = region_priv; 175 struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; 176 struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; 177 178 entry->act_block = rulei->act_block; 179 return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->cregion, 180 &chunk->cchunk, &entry->centry, 181 rulei, true); 182 } 183 184 static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, 185 void *region_priv, void *chunk_priv, 186 void *entry_priv) 187 { 188 struct mlxsw_sp2_acl_tcam_region *region = region_priv; 189 struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; 190 struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; 191 192 mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->cregion, 193 &chunk->cchunk, &entry->centry); 194 } 195 196 static int 197 mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, 198 void *region_priv, void *entry_priv, 199 bool *activity) 200 { 201 struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; 202 203 return mlxsw_afa_block_activity_get(entry->act_block, activity); 204 } 205 206 const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = { 207 .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX2, 208 .priv_size = sizeof(struct mlxsw_sp2_acl_tcam), 209 .init = mlxsw_sp2_acl_tcam_init, 210 .fini = mlxsw_sp2_acl_tcam_fini, 211 .region_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_region), 212 .region_init = mlxsw_sp2_acl_tcam_region_init, 213 .region_fini = mlxsw_sp2_acl_tcam_region_fini, 214 .region_associate = mlxsw_sp2_acl_tcam_region_associate, 215 .chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk), 216 .chunk_init = mlxsw_sp2_acl_tcam_chunk_init, 217 .chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini, 218 .entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry), 219 .entry_add = mlxsw_sp2_acl_tcam_entry_add, 220 .entry_del = mlxsw_sp2_acl_tcam_entry_del, 221 .entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get, 222 }; 223