1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/hash.h> 5 #include <linux/hashtable.h> 6 #include <linux/jhash.h> 7 #include <linux/math64.h> 8 #include <linux/vmalloc.h> 9 #include <net/pkt_cls.h> 10 11 #include "cmsg.h" 12 #include "main.h" 13 #include "../nfp_app.h" 14 15 struct nfp_mask_id_table { 16 struct hlist_node link; 17 u32 hash_key; 18 u32 ref_cnt; 19 u8 mask_id; 20 }; 21 22 struct nfp_fl_flow_table_cmp_arg { 23 struct net_device *netdev; 24 unsigned long cookie; 25 }; 26 27 static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) 28 { 29 struct nfp_flower_priv *priv = app->priv; 30 struct circ_buf *ring; 31 32 ring = &priv->stats_ids.free_list; 33 /* Check if buffer is full. */ 34 if (!CIRC_SPACE(ring->head, ring->tail, 35 priv->stats_ring_size * NFP_FL_STATS_ELEM_RS - 36 NFP_FL_STATS_ELEM_RS + 1)) 37 return -ENOBUFS; 38 39 memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS); 40 ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) % 41 (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS); 42 43 return 0; 44 } 45 46 static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) 47 { 48 struct nfp_flower_priv *priv = app->priv; 49 u32 freed_stats_id, temp_stats_id; 50 struct circ_buf *ring; 51 52 ring = &priv->stats_ids.free_list; 53 freed_stats_id = priv->stats_ring_size; 54 /* Check for unallocated entries first. */ 55 if (priv->stats_ids.init_unalloc > 0) { 56 if (priv->active_mem_unit == priv->total_mem_units) { 57 priv->stats_ids.init_unalloc--; 58 priv->active_mem_unit = 0; 59 } 60 61 *stats_context_id = 62 FIELD_PREP(NFP_FL_STAT_ID_STAT, 63 priv->stats_ids.init_unalloc - 1) | 64 FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, 65 priv->active_mem_unit); 66 priv->active_mem_unit++; 67 return 0; 68 } 69 70 /* Check if buffer is empty. */ 71 if (ring->head == ring->tail) { 72 *stats_context_id = freed_stats_id; 73 return -ENOENT; 74 } 75 76 memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS); 77 *stats_context_id = temp_stats_id; 78 memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS); 79 ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) % 80 (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS); 81 82 return 0; 83 } 84 85 /* Must be called with either RTNL or rcu_read_lock */ 86 struct nfp_fl_payload * 87 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, 88 struct net_device *netdev) 89 { 90 struct nfp_fl_flow_table_cmp_arg flower_cmp_arg; 91 struct nfp_flower_priv *priv = app->priv; 92 93 flower_cmp_arg.netdev = netdev; 94 flower_cmp_arg.cookie = tc_flower_cookie; 95 96 return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg, 97 nfp_flower_table_params); 98 } 99 100 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb) 101 { 102 unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); 103 struct nfp_flower_priv *priv = app->priv; 104 struct nfp_fl_stats_frame *stats; 105 unsigned char *msg; 106 u32 ctx_id; 107 int i; 108 109 msg = nfp_flower_cmsg_get_data(skb); 110 111 spin_lock(&priv->stats_lock); 112 for (i = 0; i < msg_len / sizeof(*stats); i++) { 113 stats = (struct nfp_fl_stats_frame *)msg + i; 114 ctx_id = be32_to_cpu(stats->stats_con_id); 115 priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count); 116 priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count); 117 priv->stats[ctx_id].used = jiffies; 118 } 119 spin_unlock(&priv->stats_lock); 120 } 121 122 static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) 123 { 124 struct nfp_flower_priv *priv = app->priv; 125 struct circ_buf *ring; 126 127 ring = &priv->mask_ids.mask_id_free_list; 128 /* Checking if buffer is full. */ 129 if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0) 130 return -ENOBUFS; 131 132 memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS); 133 ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) % 134 (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS); 135 136 priv->mask_ids.last_used[mask_id] = ktime_get(); 137 138 return 0; 139 } 140 141 static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id) 142 { 143 struct nfp_flower_priv *priv = app->priv; 144 ktime_t reuse_timeout; 145 struct circ_buf *ring; 146 u8 temp_id, freed_id; 147 148 ring = &priv->mask_ids.mask_id_free_list; 149 freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1; 150 /* Checking for unallocated entries first. */ 151 if (priv->mask_ids.init_unallocated > 0) { 152 *mask_id = priv->mask_ids.init_unallocated; 153 priv->mask_ids.init_unallocated--; 154 return 0; 155 } 156 157 /* Checking if buffer is empty. */ 158 if (ring->head == ring->tail) 159 goto err_not_found; 160 161 memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS); 162 *mask_id = temp_id; 163 164 reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id], 165 NFP_FL_MASK_REUSE_TIME_NS); 166 167 if (ktime_before(ktime_get(), reuse_timeout)) 168 goto err_not_found; 169 170 memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS); 171 ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) % 172 (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS); 173 174 return 0; 175 176 err_not_found: 177 *mask_id = freed_id; 178 return -ENOENT; 179 } 180 181 static int 182 nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len) 183 { 184 struct nfp_flower_priv *priv = app->priv; 185 struct nfp_mask_id_table *mask_entry; 186 unsigned long hash_key; 187 u8 mask_id; 188 189 if (nfp_mask_alloc(app, &mask_id)) 190 return -ENOENT; 191 192 mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL); 193 if (!mask_entry) { 194 nfp_release_mask_id(app, mask_id); 195 return -ENOMEM; 196 } 197 198 INIT_HLIST_NODE(&mask_entry->link); 199 mask_entry->mask_id = mask_id; 200 hash_key = jhash(mask_data, mask_len, priv->mask_id_seed); 201 mask_entry->hash_key = hash_key; 202 mask_entry->ref_cnt = 1; 203 hash_add(priv->mask_table, &mask_entry->link, hash_key); 204 205 return mask_id; 206 } 207 208 static struct nfp_mask_id_table * 209 nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len) 210 { 211 struct nfp_flower_priv *priv = app->priv; 212 struct nfp_mask_id_table *mask_entry; 213 unsigned long hash_key; 214 215 hash_key = jhash(mask_data, mask_len, priv->mask_id_seed); 216 217 hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key) 218 if (mask_entry->hash_key == hash_key) 219 return mask_entry; 220 221 return NULL; 222 } 223 224 static int 225 nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len) 226 { 227 struct nfp_mask_id_table *mask_entry; 228 229 mask_entry = nfp_search_mask_table(app, mask_data, mask_len); 230 if (!mask_entry) 231 return -ENOENT; 232 233 mask_entry->ref_cnt++; 234 235 /* Casting u8 to int for later use. */ 236 return mask_entry->mask_id; 237 } 238 239 static bool 240 nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len, 241 u8 *meta_flags, u8 *mask_id) 242 { 243 int id; 244 245 id = nfp_find_in_mask_table(app, mask_data, mask_len); 246 if (id < 0) { 247 id = nfp_add_mask_table(app, mask_data, mask_len); 248 if (id < 0) 249 return false; 250 *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; 251 } 252 *mask_id = id; 253 254 return true; 255 } 256 257 static bool 258 nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, 259 u8 *meta_flags, u8 *mask_id) 260 { 261 struct nfp_mask_id_table *mask_entry; 262 263 mask_entry = nfp_search_mask_table(app, mask_data, mask_len); 264 if (!mask_entry) 265 return false; 266 267 if (meta_flags) 268 *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK; 269 270 *mask_id = mask_entry->mask_id; 271 mask_entry->ref_cnt--; 272 if (!mask_entry->ref_cnt) { 273 hash_del(&mask_entry->link); 274 nfp_release_mask_id(app, *mask_id); 275 kfree(mask_entry); 276 if (meta_flags) 277 *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; 278 } 279 280 return true; 281 } 282 283 int nfp_compile_flow_metadata(struct nfp_app *app, 284 struct tc_cls_flower_offload *flow, 285 struct nfp_fl_payload *nfp_flow, 286 struct net_device *netdev) 287 { 288 struct nfp_flower_priv *priv = app->priv; 289 struct nfp_fl_payload *check_entry; 290 u8 new_mask_id; 291 u32 stats_cxt; 292 293 if (nfp_get_stats_entry(app, &stats_cxt)) 294 return -ENOENT; 295 296 nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt); 297 nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie); 298 nfp_flow->ingress_dev = netdev; 299 300 new_mask_id = 0; 301 if (!nfp_check_mask_add(app, nfp_flow->mask_data, 302 nfp_flow->meta.mask_len, 303 &nfp_flow->meta.flags, &new_mask_id)) { 304 if (nfp_release_stats_entry(app, stats_cxt)) 305 return -EINVAL; 306 return -ENOENT; 307 } 308 309 nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version); 310 priv->flower_version++; 311 312 /* Update flow payload with mask ids. */ 313 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; 314 priv->stats[stats_cxt].pkts = 0; 315 priv->stats[stats_cxt].bytes = 0; 316 priv->stats[stats_cxt].used = jiffies; 317 318 check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev); 319 if (check_entry) { 320 if (nfp_release_stats_entry(app, stats_cxt)) 321 return -EINVAL; 322 323 if (!nfp_check_mask_remove(app, nfp_flow->mask_data, 324 nfp_flow->meta.mask_len, 325 NULL, &new_mask_id)) 326 return -EINVAL; 327 328 return -EEXIST; 329 } 330 331 return 0; 332 } 333 334 int nfp_modify_flow_metadata(struct nfp_app *app, 335 struct nfp_fl_payload *nfp_flow) 336 { 337 struct nfp_flower_priv *priv = app->priv; 338 u8 new_mask_id = 0; 339 u32 temp_ctx_id; 340 341 nfp_check_mask_remove(app, nfp_flow->mask_data, 342 nfp_flow->meta.mask_len, &nfp_flow->meta.flags, 343 &new_mask_id); 344 345 nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version); 346 priv->flower_version++; 347 348 /* Update flow payload with mask ids. */ 349 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; 350 351 /* Release the stats ctx id. */ 352 temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); 353 354 return nfp_release_stats_entry(app, temp_ctx_id); 355 } 356 357 static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg, 358 const void *obj) 359 { 360 const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key; 361 const struct nfp_fl_payload *flow_entry = obj; 362 363 if (flow_entry->ingress_dev == cmp_arg->netdev) 364 return flow_entry->tc_flower_cookie != cmp_arg->cookie; 365 366 return 1; 367 } 368 369 static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed) 370 { 371 const struct nfp_fl_payload *flower_entry = data; 372 373 return jhash2((u32 *)&flower_entry->tc_flower_cookie, 374 sizeof(flower_entry->tc_flower_cookie) / sizeof(u32), 375 seed); 376 } 377 378 static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed) 379 { 380 const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data; 381 382 return jhash2((u32 *)&cmp_arg->cookie, 383 sizeof(cmp_arg->cookie) / sizeof(u32), seed); 384 } 385 386 const struct rhashtable_params nfp_flower_table_params = { 387 .head_offset = offsetof(struct nfp_fl_payload, fl_node), 388 .hashfn = nfp_fl_key_hashfn, 389 .obj_cmpfn = nfp_fl_obj_cmpfn, 390 .obj_hashfn = nfp_fl_obj_hashfn, 391 .automatic_shrinking = true, 392 }; 393 394 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, 395 unsigned int host_num_mems) 396 { 397 struct nfp_flower_priv *priv = app->priv; 398 int err, stats_size; 399 400 hash_init(priv->mask_table); 401 402 err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params); 403 if (err) 404 return err; 405 406 get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); 407 408 /* Init ring buffer and unallocated mask_ids. */ 409 priv->mask_ids.mask_id_free_list.buf = 410 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, 411 NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); 412 if (!priv->mask_ids.mask_id_free_list.buf) 413 goto err_free_flow_table; 414 415 priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; 416 417 /* Init timestamps for mask id*/ 418 priv->mask_ids.last_used = 419 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, 420 sizeof(*priv->mask_ids.last_used), GFP_KERNEL); 421 if (!priv->mask_ids.last_used) 422 goto err_free_mask_id; 423 424 /* Init ring buffer and unallocated stats_ids. */ 425 priv->stats_ids.free_list.buf = 426 vmalloc(array_size(NFP_FL_STATS_ELEM_RS, 427 priv->stats_ring_size)); 428 if (!priv->stats_ids.free_list.buf) 429 goto err_free_last_used; 430 431 priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems); 432 433 stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) | 434 FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1); 435 priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats), 436 GFP_KERNEL); 437 if (!priv->stats) 438 goto err_free_ring_buf; 439 440 spin_lock_init(&priv->stats_lock); 441 442 return 0; 443 444 err_free_ring_buf: 445 vfree(priv->stats_ids.free_list.buf); 446 err_free_last_used: 447 kfree(priv->mask_ids.last_used); 448 err_free_mask_id: 449 kfree(priv->mask_ids.mask_id_free_list.buf); 450 err_free_flow_table: 451 rhashtable_destroy(&priv->flow_table); 452 return -ENOMEM; 453 } 454 455 void nfp_flower_metadata_cleanup(struct nfp_app *app) 456 { 457 struct nfp_flower_priv *priv = app->priv; 458 459 if (!priv) 460 return; 461 462 rhashtable_free_and_destroy(&priv->flow_table, 463 nfp_check_rhashtable_empty, NULL); 464 kvfree(priv->stats); 465 kfree(priv->mask_ids.mask_id_free_list.buf); 466 kfree(priv->mask_ids.last_used); 467 vfree(priv->stats_ids.free_list.buf); 468 } 469