1*2874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 28c3e34a4SDavid Howells /* ar-skbuff.c: socket buffer destruction handling 38c3e34a4SDavid Howells * 48c3e34a4SDavid Howells * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 58c3e34a4SDavid Howells * Written by David Howells (dhowells@redhat.com) 68c3e34a4SDavid Howells */ 78c3e34a4SDavid Howells 88c3e34a4SDavid Howells #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 98c3e34a4SDavid Howells 108c3e34a4SDavid Howells #include <linux/module.h> 118c3e34a4SDavid Howells #include <linux/net.h> 128c3e34a4SDavid Howells #include <linux/skbuff.h> 138c3e34a4SDavid Howells #include <net/sock.h> 148c3e34a4SDavid Howells #include <net/af_rxrpc.h> 158c3e34a4SDavid Howells #include "ar-internal.h" 168c3e34a4SDavid Howells 1771f3ca40SDavid Howells #define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) 1871f3ca40SDavid Howells 198c3e34a4SDavid Howells /* 2071f3ca40SDavid Howells * Note the allocation or reception of a socket buffer. 21df844fd4SDavid Howells */ 2271f3ca40SDavid Howells void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 23df844fd4SDavid Howells { 24df844fd4SDavid Howells const void *here = __builtin_return_address(0); 2571f3ca40SDavid Howells int n = atomic_inc_return(select_skb_count(op)); 2663354797SReshetova, Elena trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 27df844fd4SDavid Howells } 28df844fd4SDavid Howells 29df844fd4SDavid Howells /* 30df844fd4SDavid Howells * Note the re-emergence of a socket buffer from a queue or buffer. 31df844fd4SDavid Howells */ 3271f3ca40SDavid Howells void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 33df844fd4SDavid Howells { 34df844fd4SDavid Howells const void *here = __builtin_return_address(0); 35df844fd4SDavid Howells if (skb) { 3671f3ca40SDavid Howells int n = atomic_read(select_skb_count(op)); 3763354797SReshetova, Elena trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 38df844fd4SDavid Howells } 39df844fd4SDavid Howells } 40df844fd4SDavid Howells 41df844fd4SDavid Howells /* 42df844fd4SDavid Howells * Note the addition of a ref on a socket buffer. 43df844fd4SDavid Howells */ 4471f3ca40SDavid Howells void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 45df844fd4SDavid Howells { 46df844fd4SDavid Howells const void *here = __builtin_return_address(0); 4771f3ca40SDavid Howells int n = atomic_inc_return(select_skb_count(op)); 4863354797SReshetova, Elena trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 49df844fd4SDavid Howells skb_get(skb); 50df844fd4SDavid Howells } 51df844fd4SDavid Howells 52df844fd4SDavid Howells /* 53df844fd4SDavid Howells * Note the destruction of a socket buffer. 54df844fd4SDavid Howells */ 5571f3ca40SDavid Howells void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 56df844fd4SDavid Howells { 57df844fd4SDavid Howells const void *here = __builtin_return_address(0); 58df844fd4SDavid Howells if (skb) { 59df844fd4SDavid Howells int n; 60df844fd4SDavid Howells CHECK_SLAB_OKAY(&skb->users); 6171f3ca40SDavid Howells n = atomic_dec_return(select_skb_count(op)); 6263354797SReshetova, Elena trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 63df844fd4SDavid Howells kfree_skb(skb); 64df844fd4SDavid Howells } 65df844fd4SDavid Howells } 66df844fd4SDavid Howells 67df844fd4SDavid Howells /* 68df844fd4SDavid Howells * Clear a queue of socket buffers. 69df844fd4SDavid Howells */ 70df844fd4SDavid Howells void rxrpc_purge_queue(struct sk_buff_head *list) 71df844fd4SDavid Howells { 72df844fd4SDavid Howells const void *here = __builtin_return_address(0); 73df844fd4SDavid Howells struct sk_buff *skb; 74df844fd4SDavid Howells while ((skb = skb_dequeue((list))) != NULL) { 7571f3ca40SDavid Howells int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged)); 7671f3ca40SDavid Howells trace_rxrpc_skb(skb, rxrpc_skb_rx_purged, 7763354797SReshetova, Elena refcount_read(&skb->users), n, here); 78df844fd4SDavid Howells kfree_skb(skb); 79df844fd4SDavid Howells } 80df844fd4SDavid Howells } 81