1 /* 2 * net/tipc/net.c: TIPC network routing code 3 * 4 * Copyright (c) 1995-2006, 2014, Ericsson AB 5 * Copyright (c) 2005, 2010-2011, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "net.h" 39 #include "name_distr.h" 40 #include "subscr.h" 41 #include "socket.h" 42 #include "node.h" 43 #include "bcast.h" 44 #include "netlink.h" 45 #include "monitor.h" 46 47 /* 48 * The TIPC locking policy is designed to ensure a very fine locking 49 * granularity, permitting complete parallel access to individual 50 * port and node/link instances. The code consists of four major 51 * locking domains, each protected with their own disjunct set of locks. 52 * 53 * 1: The bearer level. 54 * RTNL lock is used to serialize the process of configuring bearer 55 * on update side, and RCU lock is applied on read side to make 56 * bearer instance valid on both paths of message transmission and 57 * reception. 58 * 59 * 2: The node and link level. 60 * All node instances are saved into two tipc_node_list and node_htable 61 * lists. The two lists are protected by node_list_lock on write side, 62 * and they are guarded with RCU lock on read side. Especially node 63 * instance is destroyed only when TIPC module is removed, and we can 64 * confirm that there has no any user who is accessing the node at the 65 * moment. Therefore, Except for iterating the two lists within RCU 66 * protection, it's no needed to hold RCU that we access node instance 67 * in other places. 68 * 69 * In addition, all members in node structure including link instances 70 * are protected by node spin lock. 71 * 72 * 3: The transport level of the protocol. 73 * This consists of the structures port, (and its user level 74 * representations, such as user_port and tipc_sock), reference and 75 * tipc_user (port.c, reg.c, socket.c). 76 * 77 * This layer has four different locks: 78 * - The tipc_port spin_lock. This is protecting each port instance 79 * from parallel data access and removal. Since we can not place 80 * this lock in the port itself, it has been placed in the 81 * corresponding reference table entry, which has the same life 82 * cycle as the module. This entry is difficult to access from 83 * outside the TIPC core, however, so a pointer to the lock has 84 * been added in the port instance, -to be used for unlocking 85 * only. 86 * - A read/write lock to protect the reference table itself (teg.c). 87 * (Nobody is using read-only access to this, so it can just as 88 * well be changed to a spin_lock) 89 * - A spin lock to protect the registry of kernel/driver users (reg.c) 90 * - A global spin_lock (tipc_port_lock), which only task is to ensure 91 * consistency where more than one port is involved in an operation, 92 * i.e., whe a port is part of a linked list of ports. 93 * There are two such lists; 'port_list', which is used for management, 94 * and 'wait_list', which is used to queue ports during congestion. 95 * 96 * 4: The name table (name_table.c, name_distr.c, subscription.c) 97 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the 98 * overall name table structure. Nothing must be added/removed to 99 * this structure without holding write access to it. 100 * - There is one local spin_lock per sub_sequence, which can be seen 101 * as a sub-domain to the tipc_nametbl_lock domain. It is used only 102 * for translation operations, and is needed because a translation 103 * steps the root of the 'publication' linked list between each lookup. 104 * This is always used within the scope of a tipc_nametbl_lock(read). 105 * - A local spin_lock protecting the queue of subscriber events. 106 */ 107 108 struct tipc_net_work { 109 struct work_struct work; 110 struct net *net; 111 u32 addr; 112 }; 113 114 static void tipc_net_finalize(struct net *net, u32 addr); 115 116 int tipc_net_init(struct net *net, u8 *node_id, u32 addr) 117 { 118 if (tipc_own_id(net)) { 119 pr_info("Cannot configure node identity twice\n"); 120 return -1; 121 } 122 pr_info("Started in network mode\n"); 123 124 if (node_id) 125 tipc_set_node_id(net, node_id); 126 if (addr) 127 tipc_net_finalize(net, addr); 128 return 0; 129 } 130 131 static void tipc_net_finalize(struct net *net, u32 addr) 132 { 133 struct tipc_net *tn = tipc_net(net); 134 135 if (cmpxchg(&tn->node_addr, 0, addr)) 136 return; 137 tipc_set_node_addr(net, addr); 138 tipc_named_reinit(net); 139 tipc_sk_reinit(net); 140 tipc_mon_reinit_self(net); 141 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, 142 TIPC_CLUSTER_SCOPE, 0, addr); 143 } 144 145 static void tipc_net_finalize_work(struct work_struct *work) 146 { 147 struct tipc_net_work *fwork; 148 149 fwork = container_of(work, struct tipc_net_work, work); 150 tipc_net_finalize(fwork->net, fwork->addr); 151 kfree(fwork); 152 } 153 154 void tipc_sched_net_finalize(struct net *net, u32 addr) 155 { 156 struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC); 157 158 if (!fwork) 159 return; 160 INIT_WORK(&fwork->work, tipc_net_finalize_work); 161 fwork->net = net; 162 fwork->addr = addr; 163 schedule_work(&fwork->work); 164 } 165 166 void tipc_net_stop(struct net *net) 167 { 168 if (!tipc_own_id(net)) 169 return; 170 171 rtnl_lock(); 172 tipc_bearer_stop(net); 173 tipc_node_stop(net); 174 rtnl_unlock(); 175 176 pr_info("Left network mode\n"); 177 } 178 179 static int __tipc_nl_add_net(struct net *net, struct tipc_nl_msg *msg) 180 { 181 struct tipc_net *tn = net_generic(net, tipc_net_id); 182 u64 *w0 = (u64 *)&tn->node_id[0]; 183 u64 *w1 = (u64 *)&tn->node_id[8]; 184 struct nlattr *attrs; 185 void *hdr; 186 187 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 188 NLM_F_MULTI, TIPC_NL_NET_GET); 189 if (!hdr) 190 return -EMSGSIZE; 191 192 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NET); 193 if (!attrs) 194 goto msg_full; 195 196 if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id)) 197 goto attr_msg_full; 198 if (nla_put_u64_64bit(msg->skb, TIPC_NLA_NET_NODEID, *w0, 0)) 199 goto attr_msg_full; 200 if (nla_put_u64_64bit(msg->skb, TIPC_NLA_NET_NODEID_W1, *w1, 0)) 201 goto attr_msg_full; 202 nla_nest_end(msg->skb, attrs); 203 genlmsg_end(msg->skb, hdr); 204 205 return 0; 206 207 attr_msg_full: 208 nla_nest_cancel(msg->skb, attrs); 209 msg_full: 210 genlmsg_cancel(msg->skb, hdr); 211 212 return -EMSGSIZE; 213 } 214 215 int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb) 216 { 217 struct net *net = sock_net(skb->sk); 218 int err; 219 int done = cb->args[0]; 220 struct tipc_nl_msg msg; 221 222 if (done) 223 return 0; 224 225 msg.skb = skb; 226 msg.portid = NETLINK_CB(cb->skb).portid; 227 msg.seq = cb->nlh->nlmsg_seq; 228 229 err = __tipc_nl_add_net(net, &msg); 230 if (err) 231 goto out; 232 233 done = 1; 234 out: 235 cb->args[0] = done; 236 237 return skb->len; 238 } 239 240 int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) 241 { 242 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; 243 struct net *net = sock_net(skb->sk); 244 struct tipc_net *tn = tipc_net(net); 245 int err; 246 247 if (!info->attrs[TIPC_NLA_NET]) 248 return -EINVAL; 249 250 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX, 251 info->attrs[TIPC_NLA_NET], 252 tipc_nl_net_policy, info->extack); 253 254 if (err) 255 return err; 256 257 /* Can't change net id once TIPC has joined a network */ 258 if (tipc_own_addr(net)) 259 return -EPERM; 260 261 if (attrs[TIPC_NLA_NET_ID]) { 262 u32 val; 263 264 val = nla_get_u32(attrs[TIPC_NLA_NET_ID]); 265 if (val < 1 || val > 9999) 266 return -EINVAL; 267 268 tn->net_id = val; 269 } 270 271 if (attrs[TIPC_NLA_NET_ADDR]) { 272 u32 addr; 273 274 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); 275 if (!addr) 276 return -EINVAL; 277 tn->legacy_addr_format = true; 278 tipc_net_init(net, NULL, addr); 279 } 280 281 if (attrs[TIPC_NLA_NET_NODEID]) { 282 u8 node_id[NODE_ID_LEN]; 283 u64 *w0 = (u64 *)&node_id[0]; 284 u64 *w1 = (u64 *)&node_id[8]; 285 286 if (!attrs[TIPC_NLA_NET_NODEID_W1]) 287 return -EINVAL; 288 *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]); 289 *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]); 290 tipc_net_init(net, node_id, 0); 291 } 292 return 0; 293 } 294 295 int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) 296 { 297 int err; 298 299 rtnl_lock(); 300 err = __tipc_nl_net_set(skb, info); 301 rtnl_unlock(); 302 303 return err; 304 } 305