tcp.c (717cb906bd43a9ac00631d600adda5c6546843a6) tcp.c (b03efcfb2180289718991bb984044ce6c5b7d1b0)
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $

--- 1091 unchanged lines hidden (view full) ---

1100 tcp_send_ack(sk);
1101}
1102
1103static void tcp_prequeue_process(struct sock *sk)
1104{
1105 struct sk_buff *skb;
1106 struct tcp_sock *tp = tcp_sk(sk);
1107
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $

--- 1091 unchanged lines hidden (view full) ---

1100 tcp_send_ack(sk);
1101}
1102
1103static void tcp_prequeue_process(struct sock *sk)
1104{
1105 struct sk_buff *skb;
1106 struct tcp_sock *tp = tcp_sk(sk);
1107
1108 NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue));
1108 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1109
1110 /* RX process wants to run with disabled BHs, though it is not
1111 * necessary */
1112 local_bh_disable();
1113 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1114 sk->sk_backlog_rcv(sk, skb);
1115 local_bh_enable();
1116

--- 247 unchanged lines hidden (view full) ---

1364 * packets arrived _after_ prequeued ones.
1365 *
1366 * Shortly, algorithm is clear --- to process all
1367 * the queues in order. We could make it more directly,
1368 * requeueing packets from backlog to prequeue, if
1369 * is not empty. It is more elegant, but eats cycles,
1370 * unfortunately.
1371 */
1109
1110 /* RX process wants to run with disabled BHs, though it is not
1111 * necessary */
1112 local_bh_disable();
1113 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1114 sk->sk_backlog_rcv(sk, skb);
1115 local_bh_enable();
1116

--- 247 unchanged lines hidden (view full) ---

1364 * packets arrived _after_ prequeued ones.
1365 *
1366 * Shortly, algorithm is clear --- to process all
1367 * the queues in order. We could make it more directly,
1368 * requeueing packets from backlog to prequeue, if
1369 * is not empty. It is more elegant, but eats cycles,
1370 * unfortunately.
1371 */
1372 if (skb_queue_len(&tp->ucopy.prequeue))
1372 if (!skb_queue_empty(&tp->ucopy.prequeue))
1373 goto do_prequeue;
1374
1375 /* __ Set realtime policy in scheduler __ */
1376 }
1377
1378 if (copied >= target) {
1379 /* Do not sleep, just process backlog. */
1380 release_sock(sk);

--- 8 unchanged lines hidden (view full) ---

1389
1390 if ((chunk = len - tp->ucopy.len) != 0) {
1391 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1392 len -= chunk;
1393 copied += chunk;
1394 }
1395
1396 if (tp->rcv_nxt == tp->copied_seq &&
1373 goto do_prequeue;
1374
1375 /* __ Set realtime policy in scheduler __ */
1376 }
1377
1378 if (copied >= target) {
1379 /* Do not sleep, just process backlog. */
1380 release_sock(sk);

--- 8 unchanged lines hidden (view full) ---

1389
1390 if ((chunk = len - tp->ucopy.len) != 0) {
1391 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1392 len -= chunk;
1393 copied += chunk;
1394 }
1395
1396 if (tp->rcv_nxt == tp->copied_seq &&
1397 skb_queue_len(&tp->ucopy.prequeue)) {
1397 !skb_queue_empty(&tp->ucopy.prequeue)) {
1398do_prequeue:
1399 tcp_prequeue_process(sk);
1400
1401 if ((chunk = len - tp->ucopy.len) != 0) {
1402 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1403 len -= chunk;
1404 copied += chunk;
1405 }

--- 65 unchanged lines hidden (view full) ---

1471 /* Process the FIN. */
1472 ++*seq;
1473 if (!(flags & MSG_PEEK))
1474 sk_eat_skb(sk, skb);
1475 break;
1476 } while (len > 0);
1477
1478 if (user_recv) {
1398do_prequeue:
1399 tcp_prequeue_process(sk);
1400
1401 if ((chunk = len - tp->ucopy.len) != 0) {
1402 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1403 len -= chunk;
1404 copied += chunk;
1405 }

--- 65 unchanged lines hidden (view full) ---

1471 /* Process the FIN. */
1472 ++*seq;
1473 if (!(flags & MSG_PEEK))
1474 sk_eat_skb(sk, skb);
1475 break;
1476 } while (len > 0);
1477
1478 if (user_recv) {
1479 if (skb_queue_len(&tp->ucopy.prequeue)) {
1479 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1480 int chunk;
1481
1482 tp->ucopy.len = copied > 0 ? len : 0;
1483
1484 tcp_prequeue_process(sk);
1485
1486 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1487 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);

--- 900 unchanged lines hidden ---
1480 int chunk;
1481
1482 tp->ucopy.len = copied > 0 ? len : 0;
1483
1484 tcp_prequeue_process(sk);
1485
1486 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1487 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);

--- 900 unchanged lines hidden ---