18c3e34a4SDavid Howells /* ar-skbuff.c: socket buffer destruction handling 28c3e34a4SDavid Howells * 38c3e34a4SDavid Howells * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 48c3e34a4SDavid Howells * Written by David Howells (dhowells@redhat.com) 58c3e34a4SDavid Howells * 68c3e34a4SDavid Howells * This program is free software; you can redistribute it and/or 78c3e34a4SDavid Howells * modify it under the terms of the GNU General Public License 88c3e34a4SDavid Howells * as published by the Free Software Foundation; either version 98c3e34a4SDavid Howells * 2 of the License, or (at your option) any later version. 108c3e34a4SDavid Howells */ 118c3e34a4SDavid Howells 128c3e34a4SDavid Howells #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 138c3e34a4SDavid Howells 148c3e34a4SDavid Howells #include <linux/module.h> 158c3e34a4SDavid Howells #include <linux/net.h> 168c3e34a4SDavid Howells #include <linux/skbuff.h> 178c3e34a4SDavid Howells #include <net/sock.h> 188c3e34a4SDavid Howells #include <net/af_rxrpc.h> 198c3e34a4SDavid Howells #include "ar-internal.h" 208c3e34a4SDavid Howells 218c3e34a4SDavid Howells /* 228c3e34a4SDavid Howells * set up for the ACK at the end of the receive phase when we discard the final 238c3e34a4SDavid Howells * receive phase data packet 248c3e34a4SDavid Howells * - called with softirqs disabled 258c3e34a4SDavid Howells */ 268c3e34a4SDavid Howells static void rxrpc_request_final_ACK(struct rxrpc_call *call) 278c3e34a4SDavid Howells { 288c3e34a4SDavid Howells /* the call may be aborted before we have a chance to ACK it */ 298c3e34a4SDavid Howells write_lock(&call->state_lock); 308c3e34a4SDavid Howells 318c3e34a4SDavid Howells switch (call->state) { 328c3e34a4SDavid Howells case RXRPC_CALL_CLIENT_RECV_REPLY: 338c3e34a4SDavid Howells call->state = RXRPC_CALL_CLIENT_FINAL_ACK; 348c3e34a4SDavid Howells _debug("request final ACK"); 358c3e34a4SDavid Howells 368c3e34a4SDavid Howells /* get an extra ref on the call for the final-ACK generator to 378c3e34a4SDavid Howells * release */ 388c3e34a4SDavid Howells rxrpc_get_call(call); 398c3e34a4SDavid Howells set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events); 408c3e34a4SDavid Howells if (try_to_del_timer_sync(&call->ack_timer) >= 0) 418c3e34a4SDavid Howells rxrpc_queue_call(call); 428c3e34a4SDavid Howells break; 438c3e34a4SDavid Howells 448c3e34a4SDavid Howells case RXRPC_CALL_SERVER_RECV_REQUEST: 458c3e34a4SDavid Howells call->state = RXRPC_CALL_SERVER_ACK_REQUEST; 468c3e34a4SDavid Howells default: 478c3e34a4SDavid Howells break; 488c3e34a4SDavid Howells } 498c3e34a4SDavid Howells 508c3e34a4SDavid Howells write_unlock(&call->state_lock); 518c3e34a4SDavid Howells } 528c3e34a4SDavid Howells 538c3e34a4SDavid Howells /* 548c3e34a4SDavid Howells * drop the bottom ACK off of the call ACK window and advance the window 558c3e34a4SDavid Howells */ 568c3e34a4SDavid Howells static void rxrpc_hard_ACK_data(struct rxrpc_call *call, 578c3e34a4SDavid Howells struct rxrpc_skb_priv *sp) 588c3e34a4SDavid Howells { 598c3e34a4SDavid Howells int loop; 608c3e34a4SDavid Howells u32 seq; 618c3e34a4SDavid Howells 628c3e34a4SDavid Howells spin_lock_bh(&call->lock); 638c3e34a4SDavid Howells 648c3e34a4SDavid Howells _debug("hard ACK #%u", sp->hdr.seq); 658c3e34a4SDavid Howells 668c3e34a4SDavid Howells for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) { 678c3e34a4SDavid Howells call->ackr_window[loop] >>= 1; 688c3e34a4SDavid Howells call->ackr_window[loop] |= 698c3e34a4SDavid Howells call->ackr_window[loop + 1] << (BITS_PER_LONG - 1); 708c3e34a4SDavid Howells } 718c3e34a4SDavid Howells 728c3e34a4SDavid Howells seq = sp->hdr.seq; 738c3e34a4SDavid Howells ASSERTCMP(seq, ==, call->rx_data_eaten + 1); 748c3e34a4SDavid Howells call->rx_data_eaten = seq; 758c3e34a4SDavid Howells 768c3e34a4SDavid Howells if (call->ackr_win_top < UINT_MAX) 778c3e34a4SDavid Howells call->ackr_win_top++; 788c3e34a4SDavid Howells 798c3e34a4SDavid Howells ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE, 808c3e34a4SDavid Howells call->rx_data_post, >=, call->rx_data_recv); 818c3e34a4SDavid Howells ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE, 828c3e34a4SDavid Howells call->rx_data_recv, >=, call->rx_data_eaten); 838c3e34a4SDavid Howells 848c3e34a4SDavid Howells if (sp->hdr.flags & RXRPC_LAST_PACKET) { 858c3e34a4SDavid Howells rxrpc_request_final_ACK(call); 868c3e34a4SDavid Howells } else if (atomic_dec_and_test(&call->ackr_not_idle) && 878c3e34a4SDavid Howells test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) { 888c3e34a4SDavid Howells /* We previously soft-ACK'd some received packets that have now 898c3e34a4SDavid Howells * been consumed, so send a hard-ACK if no more packets are 908c3e34a4SDavid Howells * immediately forthcoming to allow the transmitter to free up 918c3e34a4SDavid Howells * its Tx bufferage. 928c3e34a4SDavid Howells */ 938c3e34a4SDavid Howells _debug("send Rx idle ACK"); 948c3e34a4SDavid Howells __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial, 958c3e34a4SDavid Howells false); 968c3e34a4SDavid Howells } 978c3e34a4SDavid Howells 988c3e34a4SDavid Howells spin_unlock_bh(&call->lock); 998c3e34a4SDavid Howells } 1008c3e34a4SDavid Howells 101372ee163SDavid Howells /** 102372ee163SDavid Howells * rxrpc_kernel_data_consumed - Record consumption of data message 103372ee163SDavid Howells * @call: The call to which the message pertains. 104372ee163SDavid Howells * @skb: Message holding data 105372ee163SDavid Howells * 106372ee163SDavid Howells * Record the consumption of a data message and generate an ACK if appropriate. 107372ee163SDavid Howells * The call state is shifted if this was the final packet. The caller must be 108372ee163SDavid Howells * in process context with no spinlocks held. 109372ee163SDavid Howells * 110372ee163SDavid Howells * TODO: Actually generate the ACK here rather than punting this to the 111372ee163SDavid Howells * workqueue. 112372ee163SDavid Howells */ 113372ee163SDavid Howells void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb) 114372ee163SDavid Howells { 115372ee163SDavid Howells struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 116372ee163SDavid Howells 117372ee163SDavid Howells _enter("%d,%p{%u}", call->debug_id, skb, sp->hdr.seq); 118372ee163SDavid Howells 119372ee163SDavid Howells ASSERTCMP(sp->call, ==, call); 120372ee163SDavid Howells ASSERTCMP(sp->hdr.type, ==, RXRPC_PACKET_TYPE_DATA); 121372ee163SDavid Howells 122372ee163SDavid Howells /* TODO: Fix the sequence number tracking */ 123372ee163SDavid Howells ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv); 124372ee163SDavid Howells ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1); 125372ee163SDavid Howells ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten); 126372ee163SDavid Howells 127372ee163SDavid Howells call->rx_data_recv = sp->hdr.seq; 128372ee163SDavid Howells rxrpc_hard_ACK_data(call, sp); 129372ee163SDavid Howells } 130372ee163SDavid Howells EXPORT_SYMBOL(rxrpc_kernel_data_consumed); 131372ee163SDavid Howells 1328c3e34a4SDavid Howells /* 133372ee163SDavid Howells * Destroy a packet that has an RxRPC control buffer 1348c3e34a4SDavid Howells */ 1358c3e34a4SDavid Howells void rxrpc_packet_destructor(struct sk_buff *skb) 1368c3e34a4SDavid Howells { 1378c3e34a4SDavid Howells struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 1388c3e34a4SDavid Howells struct rxrpc_call *call = sp->call; 1398c3e34a4SDavid Howells 1408c3e34a4SDavid Howells _enter("%p{%p}", skb, call); 1418c3e34a4SDavid Howells 1428c3e34a4SDavid Howells if (call) { 143372ee163SDavid Howells if (atomic_dec_return(&call->skb_count) < 0) 144372ee163SDavid Howells BUG(); 1458c3e34a4SDavid Howells rxrpc_put_call(call); 1468c3e34a4SDavid Howells sp->call = NULL; 1478c3e34a4SDavid Howells } 1488c3e34a4SDavid Howells 1498c3e34a4SDavid Howells if (skb->sk) 1508c3e34a4SDavid Howells sock_rfree(skb); 1518c3e34a4SDavid Howells _leave(""); 1528c3e34a4SDavid Howells } 1538c3e34a4SDavid Howells 1548c3e34a4SDavid Howells /** 1558c3e34a4SDavid Howells * rxrpc_kernel_free_skb - Free an RxRPC socket buffer 1568c3e34a4SDavid Howells * @skb: The socket buffer to be freed 1578c3e34a4SDavid Howells * 1588c3e34a4SDavid Howells * Let RxRPC free its own socket buffer, permitting it to maintain debug 1598c3e34a4SDavid Howells * accounting. 1608c3e34a4SDavid Howells */ 1618c3e34a4SDavid Howells void rxrpc_kernel_free_skb(struct sk_buff *skb) 1628c3e34a4SDavid Howells { 1638c3e34a4SDavid Howells rxrpc_free_skb(skb); 1648c3e34a4SDavid Howells } 1658c3e34a4SDavid Howells EXPORT_SYMBOL(rxrpc_kernel_free_skb); 166*df844fd4SDavid Howells 167*df844fd4SDavid Howells /* 168*df844fd4SDavid Howells * Note the existence of a new-to-us socket buffer (allocated or dequeued). 169*df844fd4SDavid Howells */ 170*df844fd4SDavid Howells void rxrpc_new_skb(struct sk_buff *skb) 171*df844fd4SDavid Howells { 172*df844fd4SDavid Howells const void *here = __builtin_return_address(0); 173*df844fd4SDavid Howells int n = atomic_inc_return(&rxrpc_n_skbs); 174*df844fd4SDavid Howells trace_rxrpc_skb(skb, 0, atomic_read(&skb->users), n, here); 175*df844fd4SDavid Howells } 176*df844fd4SDavid Howells 177*df844fd4SDavid Howells /* 178*df844fd4SDavid Howells * Note the re-emergence of a socket buffer from a queue or buffer. 179*df844fd4SDavid Howells */ 180*df844fd4SDavid Howells void rxrpc_see_skb(struct sk_buff *skb) 181*df844fd4SDavid Howells { 182*df844fd4SDavid Howells const void *here = __builtin_return_address(0); 183*df844fd4SDavid Howells if (skb) { 184*df844fd4SDavid Howells int n = atomic_read(&rxrpc_n_skbs); 185*df844fd4SDavid Howells trace_rxrpc_skb(skb, 1, atomic_read(&skb->users), n, here); 186*df844fd4SDavid Howells } 187*df844fd4SDavid Howells } 188*df844fd4SDavid Howells 189*df844fd4SDavid Howells /* 190*df844fd4SDavid Howells * Note the addition of a ref on a socket buffer. 191*df844fd4SDavid Howells */ 192*df844fd4SDavid Howells void rxrpc_get_skb(struct sk_buff *skb) 193*df844fd4SDavid Howells { 194*df844fd4SDavid Howells const void *here = __builtin_return_address(0); 195*df844fd4SDavid Howells int n = atomic_inc_return(&rxrpc_n_skbs); 196*df844fd4SDavid Howells trace_rxrpc_skb(skb, 2, atomic_read(&skb->users), n, here); 197*df844fd4SDavid Howells skb_get(skb); 198*df844fd4SDavid Howells } 199*df844fd4SDavid Howells 200*df844fd4SDavid Howells /* 201*df844fd4SDavid Howells * Note the destruction of a socket buffer. 202*df844fd4SDavid Howells */ 203*df844fd4SDavid Howells void rxrpc_free_skb(struct sk_buff *skb) 204*df844fd4SDavid Howells { 205*df844fd4SDavid Howells const void *here = __builtin_return_address(0); 206*df844fd4SDavid Howells if (skb) { 207*df844fd4SDavid Howells int n; 208*df844fd4SDavid Howells CHECK_SLAB_OKAY(&skb->users); 209*df844fd4SDavid Howells n = atomic_dec_return(&rxrpc_n_skbs); 210*df844fd4SDavid Howells trace_rxrpc_skb(skb, 3, atomic_read(&skb->users), n, here); 211*df844fd4SDavid Howells kfree_skb(skb); 212*df844fd4SDavid Howells } 213*df844fd4SDavid Howells } 214*df844fd4SDavid Howells 215*df844fd4SDavid Howells /* 216*df844fd4SDavid Howells * Clear a queue of socket buffers. 217*df844fd4SDavid Howells */ 218*df844fd4SDavid Howells void rxrpc_purge_queue(struct sk_buff_head *list) 219*df844fd4SDavid Howells { 220*df844fd4SDavid Howells const void *here = __builtin_return_address(0); 221*df844fd4SDavid Howells struct sk_buff *skb; 222*df844fd4SDavid Howells while ((skb = skb_dequeue((list))) != NULL) { 223*df844fd4SDavid Howells int n = atomic_dec_return(&rxrpc_n_skbs); 224*df844fd4SDavid Howells trace_rxrpc_skb(skb, 4, atomic_read(&skb->users), n, here); 225*df844fd4SDavid Howells kfree_skb(skb); 226*df844fd4SDavid Howells } 227*df844fd4SDavid Howells } 228