socket.c (1f901d59a5489e4dc7fdd339808d89b89f35483e) socket.c (37922ea4a3105176357c8d565a9d982c4a08714a)
1/*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 275 unchanged lines hidden (view full) ---

284/* tsk_peer_msg - verify if message was sent by connected port's peer
285 *
286 * Handles cases where the node's network address has changed from
287 * the default of <0.0.0> to its configured setting.
288 */
289static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
290{
291 struct sock *sk = &tsk->sk;
1/*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 275 unchanged lines hidden (view full) ---

284/* tsk_peer_msg - verify if message was sent by connected port's peer
285 *
286 * Handles cases where the node's network address has changed from
287 * the default of <0.0.0> to its configured setting.
288 */
289static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
290{
291 struct sock *sk = &tsk->sk;
292 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
292 u32 self = tipc_own_addr(sock_net(sk));
293 u32 peer_port = tsk_peer_port(tsk);
293 u32 peer_port = tsk_peer_port(tsk);
294 u32 orig_node;
295 u32 peer_node;
294 u32 orig_node, peer_node;
296
297 if (unlikely(!tipc_sk_connected(sk)))
298 return false;
299
300 if (unlikely(msg_origport(msg) != peer_port))
301 return false;
302
303 orig_node = msg_orignode(msg);
304 peer_node = tsk_peer_node(tsk);
305
306 if (likely(orig_node == peer_node))
307 return true;
308
295
296 if (unlikely(!tipc_sk_connected(sk)))
297 return false;
298
299 if (unlikely(msg_origport(msg) != peer_port))
300 return false;
301
302 orig_node = msg_orignode(msg);
303 peer_node = tsk_peer_node(tsk);
304
305 if (likely(orig_node == peer_node))
306 return true;
307
309 if (!orig_node && (peer_node == tn->own_addr))
308 if (!orig_node && peer_node == self)
310 return true;
311
309 return true;
310
312 if (!peer_node && (orig_node == tn->own_addr))
311 if (!peer_node && orig_node == self)
313 return true;
314
315 return false;
316}
317
318/* tipc_set_sk_state - set the sk_state of the socket
319 * @sk: socket
320 *

--- 135 unchanged lines hidden (view full) ---

456 if (tipc_sk_insert(tsk)) {
457 pr_warn("Socket create failed; port number exhausted\n");
458 return -EINVAL;
459 }
460
461 /* Ensure tsk is visible before we read own_addr. */
462 smp_mb();
463
312 return true;
313
314 return false;
315}
316
317/* tipc_set_sk_state - set the sk_state of the socket
318 * @sk: socket
319 *

--- 135 unchanged lines hidden (view full) ---

455 if (tipc_sk_insert(tsk)) {
456 pr_warn("Socket create failed; port number exhausted\n");
457 return -EINVAL;
458 }
459
460 /* Ensure tsk is visible before we read own_addr. */
461 smp_mb();
462
464 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
465 NAMED_H_SIZE, 0);
463 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
464 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
466
467 msg_set_origport(msg, tsk->portid);
468 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
469 sk->sk_shutdown = 0;
470 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
471 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
472 sk->sk_data_ready = tipc_data_ready;
473 sk->sk_write_space = tipc_write_space;

--- 165 unchanged lines hidden (view full) ---

639
640 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
641 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
642 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
643 res = -EACCES;
644 goto exit;
645 }
646
465
466 msg_set_origport(msg, tsk->portid);
467 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
468 sk->sk_shutdown = 0;
469 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
470 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
471 sk->sk_data_ready = tipc_data_ready;
472 sk->sk_write_space = tipc_write_space;

--- 165 unchanged lines hidden (view full) ---

638
639 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
640 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
641 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
642 res = -EACCES;
643 goto exit;
644 }
645
647 res = (addr->scope > 0) ?
646 res = (addr->scope >= 0) ?
648 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
649 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
650exit:
651 release_sock(sk);
652 return res;
653}
654
655/**

--- 5 unchanged lines hidden (view full) ---

661 *
662 * Returns 0 on success, errno otherwise
663 *
664 * NOTE: This routine doesn't need to take the socket lock since it only
665 * accesses socket information that is unchanging (or which changes in
666 * a completely predictable manner).
667 */
668static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
647 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
648 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
649exit:
650 release_sock(sk);
651 return res;
652}
653
654/**

--- 5 unchanged lines hidden (view full) ---

660 *
661 * Returns 0 on success, errno otherwise
662 *
663 * NOTE: This routine doesn't need to take the socket lock since it only
664 * accesses socket information that is unchanging (or which changes in
665 * a completely predictable manner).
666 */
667static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
669 int *uaddr_len, int peer)
668 int peer)
670{
671 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
672 struct sock *sk = sock->sk;
673 struct tipc_sock *tsk = tipc_sk(sk);
669{
670 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
671 struct sock *sk = sock->sk;
672 struct tipc_sock *tsk = tipc_sk(sk);
674 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
675
676 memset(addr, 0, sizeof(*addr));
677 if (peer) {
678 if ((!tipc_sk_connected(sk)) &&
679 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
680 return -ENOTCONN;
681 addr->addr.id.ref = tsk_peer_port(tsk);
682 addr->addr.id.node = tsk_peer_node(tsk);
683 } else {
684 addr->addr.id.ref = tsk->portid;
673
674 memset(addr, 0, sizeof(*addr));
675 if (peer) {
676 if ((!tipc_sk_connected(sk)) &&
677 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
678 return -ENOTCONN;
679 addr->addr.id.ref = tsk_peer_port(tsk);
680 addr->addr.id.node = tsk_peer_node(tsk);
681 } else {
682 addr->addr.id.ref = tsk->portid;
685 addr->addr.id.node = tn->own_addr;
683 addr->addr.id.node = tipc_own_addr(sock_net(sk));
686 }
687
684 }
685
688 *uaddr_len = sizeof(*addr);
689 addr->addrtype = TIPC_ADDR_ID;
690 addr->family = AF_TIPC;
691 addr->scope = 0;
692 addr->addr.name.domain = 0;
693
686 addr->addrtype = TIPC_ADDR_ID;
687 addr->family = AF_TIPC;
688 addr->scope = 0;
689 addr->addr.name.domain = 0;
690
694 return 0;
691 return sizeof(*addr);
695}
696
697/**
698 * tipc_poll - read and possibly block on pollmask
699 * @file: file structure associated with the socket
700 * @sock: socket for which to calculate the poll bits
701 * @wait: ???
702 *

--- 573 unchanged lines hidden (view full) ---

1276 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1277 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1278 struct list_head *clinks = &tsk->cong_links;
1279 bool syn = !tipc_sk_type_connectionless(sk);
1280 struct tipc_group *grp = tsk->group;
1281 struct tipc_msg *hdr = &tsk->phdr;
1282 struct tipc_name_seq *seq;
1283 struct sk_buff_head pkts;
692}
693
694/**
695 * tipc_poll - read and possibly block on pollmask
696 * @file: file structure associated with the socket
697 * @sock: socket for which to calculate the poll bits
698 * @wait: ???
699 *

--- 573 unchanged lines hidden (view full) ---

1273 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1274 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1275 struct list_head *clinks = &tsk->cong_links;
1276 bool syn = !tipc_sk_type_connectionless(sk);
1277 struct tipc_group *grp = tsk->group;
1278 struct tipc_msg *hdr = &tsk->phdr;
1279 struct tipc_name_seq *seq;
1280 struct sk_buff_head pkts;
1284 u32 type, inst, domain;
1285 u32 dnode, dport;
1281 u32 dnode, dport;
1282 u32 type, inst;
1286 int mtu, rc;
1287
1288 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1289 return -EMSGSIZE;
1290
1291 if (likely(dest)) {
1292 if (unlikely(m->msg_namelen < sizeof(*dest)))
1293 return -EINVAL;

--- 34 unchanged lines hidden (view full) ---

1328
1329 seq = &dest->addr.nameseq;
1330 if (dest->addrtype == TIPC_ADDR_MCAST)
1331 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1332
1333 if (dest->addrtype == TIPC_ADDR_NAME) {
1334 type = dest->addr.name.name.type;
1335 inst = dest->addr.name.name.instance;
1283 int mtu, rc;
1284
1285 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1286 return -EMSGSIZE;
1287
1288 if (likely(dest)) {
1289 if (unlikely(m->msg_namelen < sizeof(*dest)))
1290 return -EINVAL;

--- 34 unchanged lines hidden (view full) ---

1325
1326 seq = &dest->addr.nameseq;
1327 if (dest->addrtype == TIPC_ADDR_MCAST)
1328 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1329
1330 if (dest->addrtype == TIPC_ADDR_NAME) {
1331 type = dest->addr.name.name.type;
1332 inst = dest->addr.name.name.instance;
1336 domain = dest->addr.name.domain;
1337 dnode = domain;
1333 dnode = dest->addr.name.domain;
1338 msg_set_type(hdr, TIPC_NAMED_MSG);
1339 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1340 msg_set_nametype(hdr, type);
1341 msg_set_nameinst(hdr, inst);
1334 msg_set_type(hdr, TIPC_NAMED_MSG);
1335 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1336 msg_set_nametype(hdr, type);
1337 msg_set_nameinst(hdr, inst);
1342 msg_set_lookup_scope(hdr, tipc_addr_scope(domain));
1338 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1343 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1344 msg_set_destnode(hdr, dnode);
1345 msg_set_destport(hdr, dport);
1346 if (unlikely(!dport && !dnode))
1347 return -EHOSTUNREACH;
1348 } else if (dest->addrtype == TIPC_ADDR_ID) {
1349 dnode = dest->addr.id.node;
1350 msg_set_type(hdr, TIPC_DIRECT_MSG);

--- 768 unchanged lines hidden (view full) ---

2119 /* Validate and add to receive buffer if there is space */
2120 while ((skb = __skb_dequeue(&inputq))) {
2121 hdr = buf_msg(skb);
2122 limit = rcvbuf_limit(sk, skb);
2123 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2124 (!sk_conn && msg_connected(hdr)) ||
2125 (!grp && msg_in_group(hdr)))
2126 err = TIPC_ERR_NO_PORT;
1339 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1340 msg_set_destnode(hdr, dnode);
1341 msg_set_destport(hdr, dport);
1342 if (unlikely(!dport && !dnode))
1343 return -EHOSTUNREACH;
1344 } else if (dest->addrtype == TIPC_ADDR_ID) {
1345 dnode = dest->addr.id.node;
1346 msg_set_type(hdr, TIPC_DIRECT_MSG);

--- 768 unchanged lines hidden (view full) ---

2115 /* Validate and add to receive buffer if there is space */
2116 while ((skb = __skb_dequeue(&inputq))) {
2117 hdr = buf_msg(skb);
2118 limit = rcvbuf_limit(sk, skb);
2119 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2120 (!sk_conn && msg_connected(hdr)) ||
2121 (!grp && msg_in_group(hdr)))
2122 err = TIPC_ERR_NO_PORT;
2127 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit)
2123 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2124 atomic_inc(&sk->sk_drops);
2128 err = TIPC_ERR_OVERLOAD;
2125 err = TIPC_ERR_OVERLOAD;
2126 }
2129
2130 if (unlikely(err)) {
2131 tipc_skb_reject(net, err, skb, xmitq);
2132 err = TIPC_OK;
2133 continue;
2134 }
2135 __skb_queue_tail(&sk->sk_receive_queue, skb);
2136 skb_set_owner_r(skb, sk);

--- 62 unchanged lines hidden (view full) ---

2199 if (!sk->sk_backlog.len)
2200 atomic_set(dcnt, 0);
2201 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2202 if (likely(!sk_add_backlog(sk, skb, lim)))
2203 continue;
2204
2205 /* Overload => reject message back to sender */
2206 onode = tipc_own_addr(sock_net(sk));
2127
2128 if (unlikely(err)) {
2129 tipc_skb_reject(net, err, skb, xmitq);
2130 err = TIPC_OK;
2131 continue;
2132 }
2133 __skb_queue_tail(&sk->sk_receive_queue, skb);
2134 skb_set_owner_r(skb, sk);

--- 62 unchanged lines hidden (view full) ---

2197 if (!sk->sk_backlog.len)
2198 atomic_set(dcnt, 0);
2199 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2200 if (likely(!sk_add_backlog(sk, skb, lim)))
2201 continue;
2202
2203 /* Overload => reject message back to sender */
2204 onode = tipc_own_addr(sock_net(sk));
2205 atomic_inc(&sk->sk_drops);
2207 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
2208 __skb_queue_tail(xmitq, skb);
2209 break;
2210 }
2211}
2212
2213/**
2214 * tipc_sk_rcv - handle a chain of incoming buffers

--- 373 unchanged lines hidden (view full) ---

2588static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2589 struct tipc_name_seq const *seq)
2590{
2591 struct sock *sk = &tsk->sk;
2592 struct net *net = sock_net(sk);
2593 struct publication *publ;
2594 u32 key;
2595
2206 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
2207 __skb_queue_tail(xmitq, skb);
2208 break;
2209 }
2210}
2211
2212/**
2213 * tipc_sk_rcv - handle a chain of incoming buffers

--- 373 unchanged lines hidden (view full) ---

2587static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2588 struct tipc_name_seq const *seq)
2589{
2590 struct sock *sk = &tsk->sk;
2591 struct net *net = sock_net(sk);
2592 struct publication *publ;
2593 u32 key;
2594
2595 if (scope != TIPC_NODE_SCOPE)
2596 scope = TIPC_CLUSTER_SCOPE;
2597
2596 if (tipc_sk_connected(sk))
2597 return -EINVAL;
2598 key = tsk->portid + tsk->pub_count + 1;
2599 if (key == tsk->portid)
2600 return -EADDRINUSE;
2601
2602 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2603 scope, tsk->portid, key);
2604 if (unlikely(!publ))
2605 return -EINVAL;
2606
2598 if (tipc_sk_connected(sk))
2599 return -EINVAL;
2600 key = tsk->portid + tsk->pub_count + 1;
2601 if (key == tsk->portid)
2602 return -EADDRINUSE;
2603
2604 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2605 scope, tsk->portid, key);
2606 if (unlikely(!publ))
2607 return -EINVAL;
2608
2607 list_add(&publ->pport_list, &tsk->publications);
2609 list_add(&publ->binding_sock, &tsk->publications);
2608 tsk->pub_count++;
2609 tsk->published = 1;
2610 return 0;
2611}
2612
2613static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2614 struct tipc_name_seq const *seq)
2615{
2616 struct net *net = sock_net(&tsk->sk);
2617 struct publication *publ;
2618 struct publication *safe;
2619 int rc = -EINVAL;
2620
2610 tsk->pub_count++;
2611 tsk->published = 1;
2612 return 0;
2613}
2614
2615static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2616 struct tipc_name_seq const *seq)
2617{
2618 struct net *net = sock_net(&tsk->sk);
2619 struct publication *publ;
2620 struct publication *safe;
2621 int rc = -EINVAL;
2622
2621 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2623 if (scope != TIPC_NODE_SCOPE)
2624 scope = TIPC_CLUSTER_SCOPE;
2625
2626 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2622 if (seq) {
2623 if (publ->scope != scope)
2624 continue;
2625 if (publ->type != seq->type)
2626 continue;
2627 if (publ->lower != seq->lower)
2628 continue;
2629 if (publ->upper != seq->upper)
2630 break;
2631 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2627 if (seq) {
2628 if (publ->scope != scope)
2629 continue;
2630 if (publ->type != seq->type)
2631 continue;
2632 if (publ->lower != seq->lower)
2633 continue;
2634 if (publ->upper != seq->upper)
2635 break;
2636 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2632 publ->ref, publ->key);
2637 publ->upper, publ->key);
2633 rc = 0;
2634 break;
2635 }
2636 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2638 rc = 0;
2639 break;
2640 }
2641 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2637 publ->ref, publ->key);
2642 publ->upper, publ->key);
2638 rc = 0;
2639 }
2640 if (list_empty(&tsk->publications))
2641 tsk->published = 0;
2642 return rc;
2643}
2644
2645/* tipc_sk_reinit: set non-zero address in all existing sockets

--- 9 unchanged lines hidden (view full) ---

2655 rhashtable_walk_enter(&tn->sk_rht, &iter);
2656
2657 do {
2658 rhashtable_walk_start(&iter);
2659
2660 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2661 spin_lock_bh(&tsk->sk.sk_lock.slock);
2662 msg = &tsk->phdr;
2643 rc = 0;
2644 }
2645 if (list_empty(&tsk->publications))
2646 tsk->published = 0;
2647 return rc;
2648}
2649
2650/* tipc_sk_reinit: set non-zero address in all existing sockets

--- 9 unchanged lines hidden (view full) ---

2660 rhashtable_walk_enter(&tn->sk_rht, &iter);
2661
2662 do {
2663 rhashtable_walk_start(&iter);
2664
2665 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2666 spin_lock_bh(&tsk->sk.sk_lock.slock);
2667 msg = &tsk->phdr;
2663 msg_set_prevnode(msg, tn->own_addr);
2664 msg_set_orignode(msg, tn->own_addr);
2668 msg_set_prevnode(msg, tipc_own_addr(net));
2669 msg_set_orignode(msg, tipc_own_addr(net));
2665 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2666 }
2667
2668 rhashtable_walk_stop(&iter);
2669 } while (tsk == ERR_PTR(-EAGAIN));
2670}
2671
2672static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)

--- 478 unchanged lines hidden (view full) ---

3151 return 0;
3152
3153msg_full:
3154 nla_nest_cancel(skb, nest);
3155
3156 return -EMSGSIZE;
3157}
3158
2670 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2671 }
2672
2673 rhashtable_walk_stop(&iter);
2674 } while (tsk == ERR_PTR(-EAGAIN));
2675}
2676
2677static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)

--- 478 unchanged lines hidden (view full) ---

3156 return 0;
3157
3158msg_full:
3159 nla_nest_cancel(skb, nest);
3160
3161 return -EMSGSIZE;
3162}
3163
3164static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3165 *tsk)
3166{
3167 struct net *net = sock_net(skb->sk);
3168 struct sock *sk = &tsk->sk;
3169
3170 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3171 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3172 return -EMSGSIZE;
3173
3174 if (tipc_sk_connected(sk)) {
3175 if (__tipc_nl_add_sk_con(skb, tsk))
3176 return -EMSGSIZE;
3177 } else if (!list_empty(&tsk->publications)) {
3178 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3179 return -EMSGSIZE;
3180 }
3181 return 0;
3182}
3183
3159/* Caller should hold socket lock for the passed tipc socket. */
3160static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3161 struct tipc_sock *tsk)
3162{
3184/* Caller should hold socket lock for the passed tipc socket. */
3185static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3186 struct tipc_sock *tsk)
3187{
3163 int err;
3164 void *hdr;
3165 struct nlattr *attrs;
3188 struct nlattr *attrs;
3166 struct net *net = sock_net(skb->sk);
3167 struct tipc_net *tn = net_generic(net, tipc_net_id);
3168 struct sock *sk = &tsk->sk;
3189 void *hdr;
3169
3170 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3171 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3172 if (!hdr)
3173 goto msg_cancel;
3174
3175 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3176 if (!attrs)
3177 goto genlmsg_cancel;
3190
3191 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3192 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3193 if (!hdr)
3194 goto msg_cancel;
3195
3196 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3197 if (!attrs)
3198 goto genlmsg_cancel;
3178 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
3199
3200 if (__tipc_nl_add_sk_info(skb, tsk))
3179 goto attr_msg_cancel;
3201 goto attr_msg_cancel;
3180 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
3181 goto attr_msg_cancel;
3182
3202
3183 if (tipc_sk_connected(sk)) {
3184 err = __tipc_nl_add_sk_con(skb, tsk);
3185 if (err)
3186 goto attr_msg_cancel;
3187 } else if (!list_empty(&tsk->publications)) {
3188 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3189 goto attr_msg_cancel;
3190 }
3191 nla_nest_end(skb, attrs);
3192 genlmsg_end(skb, hdr);
3193
3194 return 0;
3195
3196attr_msg_cancel:
3197 nla_nest_cancel(skb, attrs);
3198genlmsg_cancel:
3199 genlmsg_cancel(skb, hdr);
3200msg_cancel:
3201 return -EMSGSIZE;
3202}
3203
3203 nla_nest_end(skb, attrs);
3204 genlmsg_end(skb, hdr);
3205
3206 return 0;
3207
3208attr_msg_cancel:
3209 nla_nest_cancel(skb, attrs);
3210genlmsg_cancel:
3211 genlmsg_cancel(skb, hdr);
3212msg_cancel:
3213 return -EMSGSIZE;
3214}
3215
3204int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3216int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3217 int (*skb_handler)(struct sk_buff *skb,
3218 struct netlink_callback *cb,
3219 struct tipc_sock *tsk))
3205{
3220{
3206 int err;
3207 struct tipc_sock *tsk;
3208 const struct bucket_table *tbl;
3209 struct rhash_head *pos;
3210 struct net *net = sock_net(skb->sk);
3221 struct net *net = sock_net(skb->sk);
3211 struct tipc_net *tn = net_generic(net, tipc_net_id);
3212 u32 tbl_id = cb->args[0];
3222 struct tipc_net *tn = tipc_net(net);
3223 const struct bucket_table *tbl;
3213 u32 prev_portid = cb->args[1];
3224 u32 prev_portid = cb->args[1];
3225 u32 tbl_id = cb->args[0];
3226 struct rhash_head *pos;
3227 struct tipc_sock *tsk;
3228 int err;
3214
3215 rcu_read_lock();
3216 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
3217 for (; tbl_id < tbl->size; tbl_id++) {
3218 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
3219 spin_lock_bh(&tsk->sk.sk_lock.slock);
3220 if (prev_portid && prev_portid != tsk->portid) {
3221 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3222 continue;
3223 }
3224
3229
3230 rcu_read_lock();
3231 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
3232 for (; tbl_id < tbl->size; tbl_id++) {
3233 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
3234 spin_lock_bh(&tsk->sk.sk_lock.slock);
3235 if (prev_portid && prev_portid != tsk->portid) {
3236 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3237 continue;
3238 }
3239
3225 err = __tipc_nl_add_sk(skb, cb, tsk);
3240 err = skb_handler(skb, cb, tsk);
3226 if (err) {
3227 prev_portid = tsk->portid;
3228 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3229 goto out;
3230 }
3241 if (err) {
3242 prev_portid = tsk->portid;
3243 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3244 goto out;
3245 }
3246
3231 prev_portid = 0;
3232 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3233 }
3234 }
3235out:
3236 rcu_read_unlock();
3237 cb->args[0] = tbl_id;
3238 cb->args[1] = prev_portid;
3239
3240 return skb->len;
3241}
3247 prev_portid = 0;
3248 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3249 }
3250 }
3251out:
3252 rcu_read_unlock();
3253 cb->args[0] = tbl_id;
3254 cb->args[1] = prev_portid;
3255
3256 return skb->len;
3257}
3258EXPORT_SYMBOL(tipc_nl_sk_walk);
3242
3259
3260int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct tipc_sock *tsk,
3261 u32 sk_filter_state,
3262 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3263{
3264 struct sock *sk = &tsk->sk;
3265 struct nlattr *attrs;
3266 struct nlattr *stat;
3267
3268 /*filter response w.r.t sk_state*/
3269 if (!(sk_filter_state & (1 << sk->sk_state)))
3270 return 0;
3271
3272 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3273 if (!attrs)
3274 goto msg_cancel;
3275
3276 if (__tipc_nl_add_sk_info(skb, tsk))
3277 goto attr_msg_cancel;
3278
3279 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3280 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3281 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3282 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3283 from_kuid_munged(sk_user_ns(sk), sock_i_uid(sk))) ||
3284 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3285 tipc_diag_gen_cookie(sk),
3286 TIPC_NLA_SOCK_PAD))
3287 goto attr_msg_cancel;
3288
3289 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT);
3290 if (!stat)
3291 goto attr_msg_cancel;
3292
3293 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3294 skb_queue_len(&sk->sk_receive_queue)) ||
3295 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3296 skb_queue_len(&sk->sk_write_queue)) ||
3297 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3298 atomic_read(&sk->sk_drops)))
3299 goto stat_msg_cancel;
3300
3301 if (tsk->cong_link_cnt &&
3302 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3303 goto stat_msg_cancel;
3304
3305 if (tsk_conn_cong(tsk) &&
3306 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3307 goto stat_msg_cancel;
3308
3309 nla_nest_end(skb, stat);
3310 nla_nest_end(skb, attrs);
3311
3312 return 0;
3313
3314stat_msg_cancel:
3315 nla_nest_cancel(skb, stat);
3316attr_msg_cancel:
3317 nla_nest_cancel(skb, attrs);
3318msg_cancel:
3319 return -EMSGSIZE;
3320}
3321EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3322
3323int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3324{
3325 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3326}
3327
3243/* Caller should hold socket lock for the passed tipc socket. */
3244static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3245 struct netlink_callback *cb,
3246 struct publication *publ)
3247{
3248 void *hdr;
3249 struct nlattr *attrs;
3250

--- 32 unchanged lines hidden (view full) ---

3283static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3284 struct netlink_callback *cb,
3285 struct tipc_sock *tsk, u32 *last_publ)
3286{
3287 int err;
3288 struct publication *p;
3289
3290 if (*last_publ) {
3328/* Caller should hold socket lock for the passed tipc socket. */
3329static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3330 struct netlink_callback *cb,
3331 struct publication *publ)
3332{
3333 void *hdr;
3334 struct nlattr *attrs;
3335

--- 32 unchanged lines hidden (view full) ---

3368static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3369 struct netlink_callback *cb,
3370 struct tipc_sock *tsk, u32 *last_publ)
3371{
3372 int err;
3373 struct publication *p;
3374
3375 if (*last_publ) {
3291 list_for_each_entry(p, &tsk->publications, pport_list) {
3376 list_for_each_entry(p, &tsk->publications, binding_sock) {
3292 if (p->key == *last_publ)
3293 break;
3294 }
3295 if (p->key != *last_publ) {
3296 /* We never set seq or call nl_dump_check_consistent()
3297 * this means that setting prev_seq here will cause the
3298 * consistence check to fail in the netlink callback
3299 * handler. Resulting in the last NLMSG_DONE message
3300 * having the NLM_F_DUMP_INTR flag set.
3301 */
3302 cb->prev_seq = 1;
3303 *last_publ = 0;
3304 return -EPIPE;
3305 }
3306 } else {
3307 p = list_first_entry(&tsk->publications, struct publication,
3377 if (p->key == *last_publ)
3378 break;
3379 }
3380 if (p->key != *last_publ) {
3381 /* We never set seq or call nl_dump_check_consistent()
3382 * this means that setting prev_seq here will cause the
3383 * consistence check to fail in the netlink callback
3384 * handler. Resulting in the last NLMSG_DONE message
3385 * having the NLM_F_DUMP_INTR flag set.
3386 */
3387 cb->prev_seq = 1;
3388 *last_publ = 0;
3389 return -EPIPE;
3390 }
3391 } else {
3392 p = list_first_entry(&tsk->publications, struct publication,
3308 pport_list);
3393 binding_sock);
3309 }
3310
3394 }
3395
3311 list_for_each_entry_from(p, &tsk->publications, pport_list) {
3396 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3312 err = __tipc_nl_add_sk_publ(skb, cb, p);
3313 if (err) {
3314 *last_publ = p->key;
3315 return err;
3316 }
3317 }
3318 *last_publ = 0;
3319

--- 55 unchanged lines hidden ---
3397 err = __tipc_nl_add_sk_publ(skb, cb, p);
3398 if (err) {
3399 *last_publ = p->key;
3400 return err;
3401 }
3402 }
3403 *last_publ = 0;
3404

--- 55 unchanged lines hidden ---