1 /* 2 * Copyright (c) 2007 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/types.h> 34 #include <linux/rbtree.h> 35 36 #include <asm-generic/bitops/le.h> 37 38 #include "rds.h" 39 40 /* 41 * This file implements the receive side of the unconventional congestion 42 * management in RDS. 43 * 44 * Messages waiting in the receive queue on the receiving socket are accounted 45 * against the sockets SO_RCVBUF option value. Only the payload bytes in the 46 * message are accounted for. If the number of bytes queued equals or exceeds 47 * rcvbuf then the socket is congested. All sends attempted to this socket's 48 * address should return block or return -EWOULDBLOCK. 49 * 50 * Applications are expected to be reasonably tuned such that this situation 51 * very rarely occurs. An application encountering this "back-pressure" is 52 * considered a bug. 53 * 54 * This is implemented by having each node maintain bitmaps which indicate 55 * which ports on bound addresses are congested. As the bitmap changes it is 56 * sent through all the connections which terminate in the local address of the 57 * bitmap which changed. 58 * 59 * The bitmaps are allocated as connections are brought up. This avoids 60 * allocation in the interrupt handling path which queues messages on sockets. 61 * The dense bitmaps let transports send the entire bitmap on any bitmap change 62 * reasonably efficiently. This is much easier to implement than some 63 * finer-grained communication of per-port congestion. The sender does a very 64 * inexpensive bit test to test if the port it's about to send to is congested 65 * or not. 66 */ 67 68 /* 69 * Interaction with poll is a tad tricky. We want all processes stuck in 70 * poll to wake up and check whether a congested destination became uncongested. 71 * The really sad thing is we have no idea which destinations the application 72 * wants to send to - we don't even know which rds_connections are involved. 73 * So until we implement a more flexible rds poll interface, we have to make 74 * do with this: 75 * We maintain a global counter that is incremented each time a congestion map 76 * update is received. Each rds socket tracks this value, and if rds_poll 77 * finds that the saved generation number is smaller than the global generation 78 * number, it wakes up the process. 79 */ 80 static atomic_t rds_cong_generation = ATOMIC_INIT(0); 81 82 /* 83 * Congestion monitoring 84 */ 85 static LIST_HEAD(rds_cong_monitor); 86 static DEFINE_RWLOCK(rds_cong_monitor_lock); 87 88 /* 89 * Yes, a global lock. It's used so infrequently that it's worth keeping it 90 * global to simplify the locking. It's only used in the following 91 * circumstances: 92 * 93 * - on connection buildup to associate a conn with its maps 94 * - on map changes to inform conns of a new map to send 95 * 96 * It's sadly ordered under the socket callback lock and the connection lock. 97 * Receive paths can mark ports congested from interrupt context so the 98 * lock masks interrupts. 99 */ 100 static DEFINE_SPINLOCK(rds_cong_lock); 101 static struct rb_root rds_cong_tree = RB_ROOT; 102 103 static struct rds_cong_map *rds_cong_tree_walk(__be32 addr, 104 struct rds_cong_map *insert) 105 { 106 struct rb_node **p = &rds_cong_tree.rb_node; 107 struct rb_node *parent = NULL; 108 struct rds_cong_map *map; 109 110 while (*p) { 111 parent = *p; 112 map = rb_entry(parent, struct rds_cong_map, m_rb_node); 113 114 if (addr < map->m_addr) 115 p = &(*p)->rb_left; 116 else if (addr > map->m_addr) 117 p = &(*p)->rb_right; 118 else 119 return map; 120 } 121 122 if (insert) { 123 rb_link_node(&insert->m_rb_node, parent, p); 124 rb_insert_color(&insert->m_rb_node, &rds_cong_tree); 125 } 126 return NULL; 127 } 128 129 /* 130 * There is only ever one bitmap for any address. Connections try and allocate 131 * these bitmaps in the process getting pointers to them. The bitmaps are only 132 * ever freed as the module is removed after all connections have been freed. 133 */ 134 static struct rds_cong_map *rds_cong_from_addr(__be32 addr) 135 { 136 struct rds_cong_map *map; 137 struct rds_cong_map *ret = NULL; 138 unsigned long zp; 139 unsigned long i; 140 unsigned long flags; 141 142 map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL); 143 if (map == NULL) 144 return NULL; 145 146 map->m_addr = addr; 147 init_waitqueue_head(&map->m_waitq); 148 INIT_LIST_HEAD(&map->m_conn_list); 149 150 for (i = 0; i < RDS_CONG_MAP_PAGES; i++) { 151 zp = get_zeroed_page(GFP_KERNEL); 152 if (zp == 0) 153 goto out; 154 map->m_page_addrs[i] = zp; 155 } 156 157 spin_lock_irqsave(&rds_cong_lock, flags); 158 ret = rds_cong_tree_walk(addr, map); 159 spin_unlock_irqrestore(&rds_cong_lock, flags); 160 161 if (ret == NULL) { 162 ret = map; 163 map = NULL; 164 } 165 166 out: 167 if (map) { 168 for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++) 169 free_page(map->m_page_addrs[i]); 170 kfree(map); 171 } 172 173 rdsdebug("map %p for addr %x\n", ret, be32_to_cpu(addr)); 174 175 return ret; 176 } 177 178 /* 179 * Put the conn on its local map's list. This is called when the conn is 180 * really added to the hash. It's nested under the rds_conn_lock, sadly. 181 */ 182 void rds_cong_add_conn(struct rds_connection *conn) 183 { 184 unsigned long flags; 185 186 rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong); 187 spin_lock_irqsave(&rds_cong_lock, flags); 188 list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list); 189 spin_unlock_irqrestore(&rds_cong_lock, flags); 190 } 191 192 void rds_cong_remove_conn(struct rds_connection *conn) 193 { 194 unsigned long flags; 195 196 rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong); 197 spin_lock_irqsave(&rds_cong_lock, flags); 198 list_del_init(&conn->c_map_item); 199 spin_unlock_irqrestore(&rds_cong_lock, flags); 200 } 201 202 int rds_cong_get_maps(struct rds_connection *conn) 203 { 204 conn->c_lcong = rds_cong_from_addr(conn->c_laddr); 205 conn->c_fcong = rds_cong_from_addr(conn->c_faddr); 206 207 if (conn->c_lcong == NULL || conn->c_fcong == NULL) 208 return -ENOMEM; 209 210 return 0; 211 } 212 213 void rds_cong_queue_updates(struct rds_cong_map *map) 214 { 215 struct rds_connection *conn; 216 unsigned long flags; 217 218 spin_lock_irqsave(&rds_cong_lock, flags); 219 220 list_for_each_entry(conn, &map->m_conn_list, c_map_item) { 221 if (conn->c_loopback) 222 continue; 223 if (!test_and_set_bit(0, &conn->c_map_queued)) { 224 rds_stats_inc(s_cong_update_queued); 225 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 226 } 227 } 228 229 spin_unlock_irqrestore(&rds_cong_lock, flags); 230 } 231 232 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask) 233 { 234 rdsdebug("waking map %p for %pI4\n", 235 map, &map->m_addr); 236 rds_stats_inc(s_cong_update_received); 237 atomic_inc(&rds_cong_generation); 238 if (waitqueue_active(&map->m_waitq)) 239 wake_up(&map->m_waitq); 240 if (waitqueue_active(&rds_poll_waitq)) 241 wake_up_all(&rds_poll_waitq); 242 243 if (portmask && !list_empty(&rds_cong_monitor)) { 244 unsigned long flags; 245 struct rds_sock *rs; 246 247 read_lock_irqsave(&rds_cong_monitor_lock, flags); 248 list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) { 249 spin_lock(&rs->rs_lock); 250 rs->rs_cong_notify |= (rs->rs_cong_mask & portmask); 251 rs->rs_cong_mask &= ~portmask; 252 spin_unlock(&rs->rs_lock); 253 if (rs->rs_cong_notify) 254 rds_wake_sk_sleep(rs); 255 } 256 read_unlock_irqrestore(&rds_cong_monitor_lock, flags); 257 } 258 } 259 EXPORT_SYMBOL_GPL(rds_cong_map_updated); 260 261 int rds_cong_updated_since(unsigned long *recent) 262 { 263 unsigned long gen = atomic_read(&rds_cong_generation); 264 265 if (likely(*recent == gen)) 266 return 0; 267 *recent = gen; 268 return 1; 269 } 270 271 /* 272 * We're called under the locking that protects the sockets receive buffer 273 * consumption. This makes it a lot easier for the caller to only call us 274 * when it knows that an existing set bit needs to be cleared, and vice versa. 275 * We can't block and we need to deal with concurrent sockets working against 276 * the same per-address map. 277 */ 278 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port) 279 { 280 unsigned long i; 281 unsigned long off; 282 283 rdsdebug("setting congestion for %pI4:%u in map %p\n", 284 &map->m_addr, ntohs(port), map); 285 286 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 287 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 288 289 generic___set_le_bit(off, (void *)map->m_page_addrs[i]); 290 } 291 292 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) 293 { 294 unsigned long i; 295 unsigned long off; 296 297 rdsdebug("clearing congestion for %pI4:%u in map %p\n", 298 &map->m_addr, ntohs(port), map); 299 300 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 301 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 302 303 generic___clear_le_bit(off, (void *)map->m_page_addrs[i]); 304 } 305 306 static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) 307 { 308 unsigned long i; 309 unsigned long off; 310 311 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 312 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 313 314 return generic_test_le_bit(off, (void *)map->m_page_addrs[i]); 315 } 316 317 void rds_cong_add_socket(struct rds_sock *rs) 318 { 319 unsigned long flags; 320 321 write_lock_irqsave(&rds_cong_monitor_lock, flags); 322 if (list_empty(&rs->rs_cong_list)) 323 list_add(&rs->rs_cong_list, &rds_cong_monitor); 324 write_unlock_irqrestore(&rds_cong_monitor_lock, flags); 325 } 326 327 void rds_cong_remove_socket(struct rds_sock *rs) 328 { 329 unsigned long flags; 330 struct rds_cong_map *map; 331 332 write_lock_irqsave(&rds_cong_monitor_lock, flags); 333 list_del_init(&rs->rs_cong_list); 334 write_unlock_irqrestore(&rds_cong_monitor_lock, flags); 335 336 /* update congestion map for now-closed port */ 337 spin_lock_irqsave(&rds_cong_lock, flags); 338 map = rds_cong_tree_walk(rs->rs_bound_addr, NULL); 339 spin_unlock_irqrestore(&rds_cong_lock, flags); 340 341 if (map && rds_cong_test_bit(map, rs->rs_bound_port)) { 342 rds_cong_clear_bit(map, rs->rs_bound_port); 343 rds_cong_queue_updates(map); 344 } 345 } 346 347 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, 348 struct rds_sock *rs) 349 { 350 if (!rds_cong_test_bit(map, port)) 351 return 0; 352 if (nonblock) { 353 if (rs && rs->rs_cong_monitor) { 354 unsigned long flags; 355 356 /* It would have been nice to have an atomic set_bit on 357 * a uint64_t. */ 358 spin_lock_irqsave(&rs->rs_lock, flags); 359 rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port)); 360 spin_unlock_irqrestore(&rs->rs_lock, flags); 361 362 /* Test again - a congestion update may have arrived in 363 * the meantime. */ 364 if (!rds_cong_test_bit(map, port)) 365 return 0; 366 } 367 rds_stats_inc(s_cong_send_error); 368 return -ENOBUFS; 369 } 370 371 rds_stats_inc(s_cong_send_blocked); 372 rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port)); 373 374 return wait_event_interruptible(map->m_waitq, 375 !rds_cong_test_bit(map, port)); 376 } 377 378 void rds_cong_exit(void) 379 { 380 struct rb_node *node; 381 struct rds_cong_map *map; 382 unsigned long i; 383 384 while ((node = rb_first(&rds_cong_tree))) { 385 map = rb_entry(node, struct rds_cong_map, m_rb_node); 386 rdsdebug("freeing map %p\n", map); 387 rb_erase(&map->m_rb_node, &rds_cong_tree); 388 for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++) 389 free_page(map->m_page_addrs[i]); 390 kfree(map); 391 } 392 } 393 394 /* 395 * Allocate a RDS message containing a congestion update. 396 */ 397 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn) 398 { 399 struct rds_cong_map *map = conn->c_lcong; 400 struct rds_message *rm; 401 402 rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES); 403 if (!IS_ERR(rm)) 404 rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP; 405 406 return rm; 407 } 408