1 /* 2 * xt_hashlimit - Netfilter module to limit the number of packets per time 3 * separately for each hashbucket (sourceip/sourceport/dstip/dstport) 4 * 5 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> 6 * (C) 2006-2012 Patrick McHardy <kaber@trash.net> 7 * Copyright © CC Computer Consultants GmbH, 2007 - 2008 8 * 9 * Development of this code was funded by Astaro AG, http://www.astaro.com/ 10 */ 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 #include <linux/module.h> 13 #include <linux/spinlock.h> 14 #include <linux/random.h> 15 #include <linux/jhash.h> 16 #include <linux/slab.h> 17 #include <linux/vmalloc.h> 18 #include <linux/proc_fs.h> 19 #include <linux/seq_file.h> 20 #include <linux/list.h> 21 #include <linux/skbuff.h> 22 #include <linux/mm.h> 23 #include <linux/in.h> 24 #include <linux/ip.h> 25 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 26 #include <linux/ipv6.h> 27 #include <net/ipv6.h> 28 #endif 29 30 #include <net/net_namespace.h> 31 #include <net/netns/generic.h> 32 33 #include <linux/netfilter/x_tables.h> 34 #include <linux/netfilter_ipv4/ip_tables.h> 35 #include <linux/netfilter_ipv6/ip6_tables.h> 36 #include <linux/netfilter/xt_hashlimit.h> 37 #include <linux/mutex.h> 38 39 MODULE_LICENSE("GPL"); 40 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 41 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); 42 MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match"); 43 MODULE_ALIAS("ipt_hashlimit"); 44 MODULE_ALIAS("ip6t_hashlimit"); 45 46 struct hashlimit_net { 47 struct hlist_head htables; 48 struct proc_dir_entry *ipt_hashlimit; 49 struct proc_dir_entry *ip6t_hashlimit; 50 }; 51 52 static int hashlimit_net_id; 53 static inline struct hashlimit_net *hashlimit_pernet(struct net *net) 54 { 55 return net_generic(net, hashlimit_net_id); 56 } 57 58 /* need to declare this at the top */ 59 static const struct file_operations dl_file_ops; 60 61 /* hash table crap */ 62 struct dsthash_dst { 63 union { 64 struct { 65 __be32 src; 66 __be32 dst; 67 } ip; 68 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 69 struct { 70 __be32 src[4]; 71 __be32 dst[4]; 72 } ip6; 73 #endif 74 }; 75 __be16 src_port; 76 __be16 dst_port; 77 }; 78 79 struct dsthash_ent { 80 /* static / read-only parts in the beginning */ 81 struct hlist_node node; 82 struct dsthash_dst dst; 83 84 /* modified structure members in the end */ 85 spinlock_t lock; 86 unsigned long expires; /* precalculated expiry time */ 87 struct { 88 unsigned long prev; /* last modification */ 89 u_int32_t credit; 90 u_int32_t credit_cap, cost; 91 } rateinfo; 92 struct rcu_head rcu; 93 }; 94 95 struct xt_hashlimit_htable { 96 struct hlist_node node; /* global list of all htables */ 97 int use; 98 u_int8_t family; 99 bool rnd_initialized; 100 101 struct hashlimit_cfg1 cfg; /* config */ 102 103 /* used internally */ 104 spinlock_t lock; /* lock for list_head */ 105 u_int32_t rnd; /* random seed for hash */ 106 unsigned int count; /* number entries in table */ 107 struct delayed_work gc_work; 108 109 /* seq_file stuff */ 110 struct proc_dir_entry *pde; 111 const char *name; 112 struct net *net; 113 114 struct hlist_head hash[0]; /* hashtable itself */ 115 }; 116 117 static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */ 118 static struct kmem_cache *hashlimit_cachep __read_mostly; 119 120 static inline bool dst_cmp(const struct dsthash_ent *ent, 121 const struct dsthash_dst *b) 122 { 123 return !memcmp(&ent->dst, b, sizeof(ent->dst)); 124 } 125 126 static u_int32_t 127 hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst) 128 { 129 u_int32_t hash = jhash2((const u32 *)dst, 130 sizeof(*dst)/sizeof(u32), 131 ht->rnd); 132 /* 133 * Instead of returning hash % ht->cfg.size (implying a divide) 134 * we return the high 32 bits of the (hash * ht->cfg.size) that will 135 * give results between [0 and cfg.size-1] and same hash distribution, 136 * but using a multiply, less expensive than a divide 137 */ 138 return reciprocal_scale(hash, ht->cfg.size); 139 } 140 141 static struct dsthash_ent * 142 dsthash_find(const struct xt_hashlimit_htable *ht, 143 const struct dsthash_dst *dst) 144 { 145 struct dsthash_ent *ent; 146 u_int32_t hash = hash_dst(ht, dst); 147 148 if (!hlist_empty(&ht->hash[hash])) { 149 hlist_for_each_entry_rcu(ent, &ht->hash[hash], node) 150 if (dst_cmp(ent, dst)) { 151 spin_lock(&ent->lock); 152 return ent; 153 } 154 } 155 return NULL; 156 } 157 158 /* allocate dsthash_ent, initialize dst, put in htable and lock it */ 159 static struct dsthash_ent * 160 dsthash_alloc_init(struct xt_hashlimit_htable *ht, 161 const struct dsthash_dst *dst, bool *race) 162 { 163 struct dsthash_ent *ent; 164 165 spin_lock(&ht->lock); 166 167 /* Two or more packets may race to create the same entry in the 168 * hashtable, double check if this packet lost race. 169 */ 170 ent = dsthash_find(ht, dst); 171 if (ent != NULL) { 172 spin_unlock(&ht->lock); 173 *race = true; 174 return ent; 175 } 176 177 /* initialize hash with random val at the time we allocate 178 * the first hashtable entry */ 179 if (unlikely(!ht->rnd_initialized)) { 180 get_random_bytes(&ht->rnd, sizeof(ht->rnd)); 181 ht->rnd_initialized = true; 182 } 183 184 if (ht->cfg.max && ht->count >= ht->cfg.max) { 185 /* FIXME: do something. question is what.. */ 186 net_err_ratelimited("max count of %u reached\n", ht->cfg.max); 187 ent = NULL; 188 } else 189 ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); 190 if (ent) { 191 memcpy(&ent->dst, dst, sizeof(ent->dst)); 192 spin_lock_init(&ent->lock); 193 194 spin_lock(&ent->lock); 195 hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]); 196 ht->count++; 197 } 198 spin_unlock(&ht->lock); 199 return ent; 200 } 201 202 static void dsthash_free_rcu(struct rcu_head *head) 203 { 204 struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu); 205 206 kmem_cache_free(hashlimit_cachep, ent); 207 } 208 209 static inline void 210 dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent) 211 { 212 hlist_del_rcu(&ent->node); 213 call_rcu_bh(&ent->rcu, dsthash_free_rcu); 214 ht->count--; 215 } 216 static void htable_gc(struct work_struct *work); 217 218 static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo, 219 u_int8_t family) 220 { 221 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 222 struct xt_hashlimit_htable *hinfo; 223 unsigned int size; 224 unsigned int i; 225 226 if (minfo->cfg.size) { 227 size = minfo->cfg.size; 228 } else { 229 size = (totalram_pages << PAGE_SHIFT) / 16384 / 230 sizeof(struct list_head); 231 if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE) 232 size = 8192; 233 if (size < 16) 234 size = 16; 235 } 236 /* FIXME: don't use vmalloc() here or anywhere else -HW */ 237 hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) + 238 sizeof(struct list_head) * size); 239 if (hinfo == NULL) 240 return -ENOMEM; 241 minfo->hinfo = hinfo; 242 243 /* copy match config into hashtable config */ 244 memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg)); 245 hinfo->cfg.size = size; 246 if (hinfo->cfg.max == 0) 247 hinfo->cfg.max = 8 * hinfo->cfg.size; 248 else if (hinfo->cfg.max < hinfo->cfg.size) 249 hinfo->cfg.max = hinfo->cfg.size; 250 251 for (i = 0; i < hinfo->cfg.size; i++) 252 INIT_HLIST_HEAD(&hinfo->hash[i]); 253 254 hinfo->use = 1; 255 hinfo->count = 0; 256 hinfo->family = family; 257 hinfo->rnd_initialized = false; 258 hinfo->name = kstrdup(minfo->name, GFP_KERNEL); 259 if (!hinfo->name) { 260 vfree(hinfo); 261 return -ENOMEM; 262 } 263 spin_lock_init(&hinfo->lock); 264 265 hinfo->pde = proc_create_data(minfo->name, 0, 266 (family == NFPROTO_IPV4) ? 267 hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit, 268 &dl_file_ops, hinfo); 269 if (hinfo->pde == NULL) { 270 kfree(hinfo->name); 271 vfree(hinfo); 272 return -ENOMEM; 273 } 274 hinfo->net = net; 275 276 INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc); 277 queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work, 278 msecs_to_jiffies(hinfo->cfg.gc_interval)); 279 280 hlist_add_head(&hinfo->node, &hashlimit_net->htables); 281 282 return 0; 283 } 284 285 static bool select_all(const struct xt_hashlimit_htable *ht, 286 const struct dsthash_ent *he) 287 { 288 return 1; 289 } 290 291 static bool select_gc(const struct xt_hashlimit_htable *ht, 292 const struct dsthash_ent *he) 293 { 294 return time_after_eq(jiffies, he->expires); 295 } 296 297 static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, 298 bool (*select)(const struct xt_hashlimit_htable *ht, 299 const struct dsthash_ent *he)) 300 { 301 unsigned int i; 302 303 for (i = 0; i < ht->cfg.size; i++) { 304 struct dsthash_ent *dh; 305 struct hlist_node *n; 306 307 spin_lock_bh(&ht->lock); 308 hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) { 309 if ((*select)(ht, dh)) 310 dsthash_free(ht, dh); 311 } 312 spin_unlock_bh(&ht->lock); 313 cond_resched(); 314 } 315 } 316 317 static void htable_gc(struct work_struct *work) 318 { 319 struct xt_hashlimit_htable *ht; 320 321 ht = container_of(work, struct xt_hashlimit_htable, gc_work.work); 322 323 htable_selective_cleanup(ht, select_gc); 324 325 queue_delayed_work(system_power_efficient_wq, 326 &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval)); 327 } 328 329 static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo) 330 { 331 struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net); 332 struct proc_dir_entry *parent; 333 334 if (hinfo->family == NFPROTO_IPV4) 335 parent = hashlimit_net->ipt_hashlimit; 336 else 337 parent = hashlimit_net->ip6t_hashlimit; 338 339 if (parent != NULL) 340 remove_proc_entry(hinfo->name, parent); 341 } 342 343 static void htable_destroy(struct xt_hashlimit_htable *hinfo) 344 { 345 cancel_delayed_work_sync(&hinfo->gc_work); 346 htable_remove_proc_entry(hinfo); 347 htable_selective_cleanup(hinfo, select_all); 348 kfree(hinfo->name); 349 vfree(hinfo); 350 } 351 352 static struct xt_hashlimit_htable *htable_find_get(struct net *net, 353 const char *name, 354 u_int8_t family) 355 { 356 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 357 struct xt_hashlimit_htable *hinfo; 358 359 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) { 360 if (!strcmp(name, hinfo->name) && 361 hinfo->family == family) { 362 hinfo->use++; 363 return hinfo; 364 } 365 } 366 return NULL; 367 } 368 369 static void htable_put(struct xt_hashlimit_htable *hinfo) 370 { 371 mutex_lock(&hashlimit_mutex); 372 if (--hinfo->use == 0) { 373 hlist_del(&hinfo->node); 374 htable_destroy(hinfo); 375 } 376 mutex_unlock(&hashlimit_mutex); 377 } 378 379 /* The algorithm used is the Simple Token Bucket Filter (TBF) 380 * see net/sched/sch_tbf.c in the linux source tree 381 */ 382 383 /* Rusty: This is my (non-mathematically-inclined) understanding of 384 this algorithm. The `average rate' in jiffies becomes your initial 385 amount of credit `credit' and the most credit you can ever have 386 `credit_cap'. The `peak rate' becomes the cost of passing the 387 test, `cost'. 388 389 `prev' tracks the last packet hit: you gain one credit per jiffy. 390 If you get credit balance more than this, the extra credit is 391 discarded. Every time the match passes, you lose `cost' credits; 392 if you don't have that many, the test fails. 393 394 See Alexey's formal explanation in net/sched/sch_tbf.c. 395 396 To get the maximum range, we multiply by this factor (ie. you get N 397 credits per jiffy). We want to allow a rate as low as 1 per day 398 (slowest userspace tool allows), which means 399 CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie. 400 */ 401 #define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24)) 402 403 /* Repeated shift and or gives us all 1s, final shift and add 1 gives 404 * us the power of 2 below the theoretical max, so GCC simply does a 405 * shift. */ 406 #define _POW2_BELOW2(x) ((x)|((x)>>1)) 407 #define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2)) 408 #define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4)) 409 #define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8)) 410 #define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16)) 411 #define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1) 412 413 #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) 414 415 /* in byte mode, the lowest possible rate is one packet/second. 416 * credit_cap is used as a counter that tells us how many times we can 417 * refill the "credits available" counter when it becomes empty. 418 */ 419 #define MAX_CPJ_BYTES (0xFFFFFFFF / HZ) 420 #define CREDITS_PER_JIFFY_BYTES POW2_BELOW32(MAX_CPJ_BYTES) 421 422 static u32 xt_hashlimit_len_to_chunks(u32 len) 423 { 424 return (len >> XT_HASHLIMIT_BYTE_SHIFT) + 1; 425 } 426 427 /* Precision saver. */ 428 static u32 user2credits(u32 user) 429 { 430 /* If multiplying would overflow... */ 431 if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) 432 /* Divide first. */ 433 return (user / XT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY; 434 435 return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE; 436 } 437 438 static u32 user2credits_byte(u32 user) 439 { 440 u64 us = user; 441 us *= HZ * CREDITS_PER_JIFFY_BYTES; 442 return (u32) (us >> 32); 443 } 444 445 static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode) 446 { 447 unsigned long delta = now - dh->rateinfo.prev; 448 u32 cap; 449 450 if (delta == 0) 451 return; 452 453 dh->rateinfo.prev = now; 454 455 if (mode & XT_HASHLIMIT_BYTES) { 456 u32 tmp = dh->rateinfo.credit; 457 dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta; 458 cap = CREDITS_PER_JIFFY_BYTES * HZ; 459 if (tmp >= dh->rateinfo.credit) {/* overflow */ 460 dh->rateinfo.credit = cap; 461 return; 462 } 463 } else { 464 dh->rateinfo.credit += delta * CREDITS_PER_JIFFY; 465 cap = dh->rateinfo.credit_cap; 466 } 467 if (dh->rateinfo.credit > cap) 468 dh->rateinfo.credit = cap; 469 } 470 471 static void rateinfo_init(struct dsthash_ent *dh, 472 struct xt_hashlimit_htable *hinfo) 473 { 474 dh->rateinfo.prev = jiffies; 475 if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) { 476 dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ; 477 dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg); 478 dh->rateinfo.credit_cap = hinfo->cfg.burst; 479 } else { 480 dh->rateinfo.credit = user2credits(hinfo->cfg.avg * 481 hinfo->cfg.burst); 482 dh->rateinfo.cost = user2credits(hinfo->cfg.avg); 483 dh->rateinfo.credit_cap = dh->rateinfo.credit; 484 } 485 } 486 487 static inline __be32 maskl(__be32 a, unsigned int l) 488 { 489 return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0; 490 } 491 492 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 493 static void hashlimit_ipv6_mask(__be32 *i, unsigned int p) 494 { 495 switch (p) { 496 case 0 ... 31: 497 i[0] = maskl(i[0], p); 498 i[1] = i[2] = i[3] = 0; 499 break; 500 case 32 ... 63: 501 i[1] = maskl(i[1], p - 32); 502 i[2] = i[3] = 0; 503 break; 504 case 64 ... 95: 505 i[2] = maskl(i[2], p - 64); 506 i[3] = 0; 507 break; 508 case 96 ... 127: 509 i[3] = maskl(i[3], p - 96); 510 break; 511 case 128: 512 break; 513 } 514 } 515 #endif 516 517 static int 518 hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo, 519 struct dsthash_dst *dst, 520 const struct sk_buff *skb, unsigned int protoff) 521 { 522 __be16 _ports[2], *ports; 523 u8 nexthdr; 524 int poff; 525 526 memset(dst, 0, sizeof(*dst)); 527 528 switch (hinfo->family) { 529 case NFPROTO_IPV4: 530 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) 531 dst->ip.dst = maskl(ip_hdr(skb)->daddr, 532 hinfo->cfg.dstmask); 533 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) 534 dst->ip.src = maskl(ip_hdr(skb)->saddr, 535 hinfo->cfg.srcmask); 536 537 if (!(hinfo->cfg.mode & 538 (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) 539 return 0; 540 nexthdr = ip_hdr(skb)->protocol; 541 break; 542 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 543 case NFPROTO_IPV6: 544 { 545 __be16 frag_off; 546 547 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) { 548 memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr, 549 sizeof(dst->ip6.dst)); 550 hashlimit_ipv6_mask(dst->ip6.dst, hinfo->cfg.dstmask); 551 } 552 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) { 553 memcpy(&dst->ip6.src, &ipv6_hdr(skb)->saddr, 554 sizeof(dst->ip6.src)); 555 hashlimit_ipv6_mask(dst->ip6.src, hinfo->cfg.srcmask); 556 } 557 558 if (!(hinfo->cfg.mode & 559 (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) 560 return 0; 561 nexthdr = ipv6_hdr(skb)->nexthdr; 562 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); 563 if ((int)protoff < 0) 564 return -1; 565 break; 566 } 567 #endif 568 default: 569 BUG(); 570 return 0; 571 } 572 573 poff = proto_ports_offset(nexthdr); 574 if (poff >= 0) { 575 ports = skb_header_pointer(skb, protoff + poff, sizeof(_ports), 576 &_ports); 577 } else { 578 _ports[0] = _ports[1] = 0; 579 ports = _ports; 580 } 581 if (!ports) 582 return -1; 583 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT) 584 dst->src_port = ports[0]; 585 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT) 586 dst->dst_port = ports[1]; 587 return 0; 588 } 589 590 static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh) 591 { 592 u64 tmp = xt_hashlimit_len_to_chunks(len); 593 tmp = tmp * dh->rateinfo.cost; 594 595 if (unlikely(tmp > CREDITS_PER_JIFFY_BYTES * HZ)) 596 tmp = CREDITS_PER_JIFFY_BYTES * HZ; 597 598 if (dh->rateinfo.credit < tmp && dh->rateinfo.credit_cap) { 599 dh->rateinfo.credit_cap--; 600 dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ; 601 } 602 return (u32) tmp; 603 } 604 605 static bool 606 hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) 607 { 608 const struct xt_hashlimit_mtinfo1 *info = par->matchinfo; 609 struct xt_hashlimit_htable *hinfo = info->hinfo; 610 unsigned long now = jiffies; 611 struct dsthash_ent *dh; 612 struct dsthash_dst dst; 613 bool race = false; 614 u32 cost; 615 616 if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) 617 goto hotdrop; 618 619 rcu_read_lock_bh(); 620 dh = dsthash_find(hinfo, &dst); 621 if (dh == NULL) { 622 dh = dsthash_alloc_init(hinfo, &dst, &race); 623 if (dh == NULL) { 624 rcu_read_unlock_bh(); 625 goto hotdrop; 626 } else if (race) { 627 /* Already got an entry, update expiration timeout */ 628 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); 629 rateinfo_recalc(dh, now, hinfo->cfg.mode); 630 } else { 631 dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire); 632 rateinfo_init(dh, hinfo); 633 } 634 } else { 635 /* update expiration timeout */ 636 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); 637 rateinfo_recalc(dh, now, hinfo->cfg.mode); 638 } 639 640 if (info->cfg.mode & XT_HASHLIMIT_BYTES) 641 cost = hashlimit_byte_cost(skb->len, dh); 642 else 643 cost = dh->rateinfo.cost; 644 645 if (dh->rateinfo.credit >= cost) { 646 /* below the limit */ 647 dh->rateinfo.credit -= cost; 648 spin_unlock(&dh->lock); 649 rcu_read_unlock_bh(); 650 return !(info->cfg.mode & XT_HASHLIMIT_INVERT); 651 } 652 653 spin_unlock(&dh->lock); 654 rcu_read_unlock_bh(); 655 /* default match is underlimit - so over the limit, we need to invert */ 656 return info->cfg.mode & XT_HASHLIMIT_INVERT; 657 658 hotdrop: 659 par->hotdrop = true; 660 return false; 661 } 662 663 static int hashlimit_mt_check(const struct xt_mtchk_param *par) 664 { 665 struct net *net = par->net; 666 struct xt_hashlimit_mtinfo1 *info = par->matchinfo; 667 int ret; 668 669 if (info->cfg.gc_interval == 0 || info->cfg.expire == 0) 670 return -EINVAL; 671 if (info->name[sizeof(info->name)-1] != '\0') 672 return -EINVAL; 673 if (par->family == NFPROTO_IPV4) { 674 if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32) 675 return -EINVAL; 676 } else { 677 if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128) 678 return -EINVAL; 679 } 680 681 if (info->cfg.mode & ~XT_HASHLIMIT_ALL) { 682 pr_info("Unknown mode mask %X, kernel too old?\n", 683 info->cfg.mode); 684 return -EINVAL; 685 } 686 687 /* Check for overflow. */ 688 if (info->cfg.mode & XT_HASHLIMIT_BYTES) { 689 if (user2credits_byte(info->cfg.avg) == 0) { 690 pr_info("overflow, rate too high: %u\n", info->cfg.avg); 691 return -EINVAL; 692 } 693 } else if (info->cfg.burst == 0 || 694 user2credits(info->cfg.avg * info->cfg.burst) < 695 user2credits(info->cfg.avg)) { 696 pr_info("overflow, try lower: %u/%u\n", 697 info->cfg.avg, info->cfg.burst); 698 return -ERANGE; 699 } 700 701 mutex_lock(&hashlimit_mutex); 702 info->hinfo = htable_find_get(net, info->name, par->family); 703 if (info->hinfo == NULL) { 704 ret = htable_create(net, info, par->family); 705 if (ret < 0) { 706 mutex_unlock(&hashlimit_mutex); 707 return ret; 708 } 709 } 710 mutex_unlock(&hashlimit_mutex); 711 return 0; 712 } 713 714 static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par) 715 { 716 const struct xt_hashlimit_mtinfo1 *info = par->matchinfo; 717 718 htable_put(info->hinfo); 719 } 720 721 static struct xt_match hashlimit_mt_reg[] __read_mostly = { 722 { 723 .name = "hashlimit", 724 .revision = 1, 725 .family = NFPROTO_IPV4, 726 .match = hashlimit_mt, 727 .matchsize = sizeof(struct xt_hashlimit_mtinfo1), 728 .checkentry = hashlimit_mt_check, 729 .destroy = hashlimit_mt_destroy, 730 .me = THIS_MODULE, 731 }, 732 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 733 { 734 .name = "hashlimit", 735 .revision = 1, 736 .family = NFPROTO_IPV6, 737 .match = hashlimit_mt, 738 .matchsize = sizeof(struct xt_hashlimit_mtinfo1), 739 .checkentry = hashlimit_mt_check, 740 .destroy = hashlimit_mt_destroy, 741 .me = THIS_MODULE, 742 }, 743 #endif 744 }; 745 746 /* PROC stuff */ 747 static void *dl_seq_start(struct seq_file *s, loff_t *pos) 748 __acquires(htable->lock) 749 { 750 struct xt_hashlimit_htable *htable = s->private; 751 unsigned int *bucket; 752 753 spin_lock_bh(&htable->lock); 754 if (*pos >= htable->cfg.size) 755 return NULL; 756 757 bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC); 758 if (!bucket) 759 return ERR_PTR(-ENOMEM); 760 761 *bucket = *pos; 762 return bucket; 763 } 764 765 static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) 766 { 767 struct xt_hashlimit_htable *htable = s->private; 768 unsigned int *bucket = (unsigned int *)v; 769 770 *pos = ++(*bucket); 771 if (*pos >= htable->cfg.size) { 772 kfree(v); 773 return NULL; 774 } 775 return bucket; 776 } 777 778 static void dl_seq_stop(struct seq_file *s, void *v) 779 __releases(htable->lock) 780 { 781 struct xt_hashlimit_htable *htable = s->private; 782 unsigned int *bucket = (unsigned int *)v; 783 784 if (!IS_ERR(bucket)) 785 kfree(bucket); 786 spin_unlock_bh(&htable->lock); 787 } 788 789 static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, 790 struct seq_file *s) 791 { 792 const struct xt_hashlimit_htable *ht = s->private; 793 794 spin_lock(&ent->lock); 795 /* recalculate to show accurate numbers */ 796 rateinfo_recalc(ent, jiffies, ht->cfg.mode); 797 798 switch (family) { 799 case NFPROTO_IPV4: 800 seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n", 801 (long)(ent->expires - jiffies)/HZ, 802 &ent->dst.ip.src, 803 ntohs(ent->dst.src_port), 804 &ent->dst.ip.dst, 805 ntohs(ent->dst.dst_port), 806 ent->rateinfo.credit, ent->rateinfo.credit_cap, 807 ent->rateinfo.cost); 808 break; 809 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 810 case NFPROTO_IPV6: 811 seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n", 812 (long)(ent->expires - jiffies)/HZ, 813 &ent->dst.ip6.src, 814 ntohs(ent->dst.src_port), 815 &ent->dst.ip6.dst, 816 ntohs(ent->dst.dst_port), 817 ent->rateinfo.credit, ent->rateinfo.credit_cap, 818 ent->rateinfo.cost); 819 break; 820 #endif 821 default: 822 BUG(); 823 } 824 spin_unlock(&ent->lock); 825 return seq_has_overflowed(s); 826 } 827 828 static int dl_seq_show(struct seq_file *s, void *v) 829 { 830 struct xt_hashlimit_htable *htable = s->private; 831 unsigned int *bucket = (unsigned int *)v; 832 struct dsthash_ent *ent; 833 834 if (!hlist_empty(&htable->hash[*bucket])) { 835 hlist_for_each_entry(ent, &htable->hash[*bucket], node) 836 if (dl_seq_real_show(ent, htable->family, s)) 837 return -1; 838 } 839 return 0; 840 } 841 842 static const struct seq_operations dl_seq_ops = { 843 .start = dl_seq_start, 844 .next = dl_seq_next, 845 .stop = dl_seq_stop, 846 .show = dl_seq_show 847 }; 848 849 static int dl_proc_open(struct inode *inode, struct file *file) 850 { 851 int ret = seq_open(file, &dl_seq_ops); 852 853 if (!ret) { 854 struct seq_file *sf = file->private_data; 855 sf->private = PDE_DATA(inode); 856 } 857 return ret; 858 } 859 860 static const struct file_operations dl_file_ops = { 861 .owner = THIS_MODULE, 862 .open = dl_proc_open, 863 .read = seq_read, 864 .llseek = seq_lseek, 865 .release = seq_release 866 }; 867 868 static int __net_init hashlimit_proc_net_init(struct net *net) 869 { 870 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 871 872 hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net); 873 if (!hashlimit_net->ipt_hashlimit) 874 return -ENOMEM; 875 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 876 hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net); 877 if (!hashlimit_net->ip6t_hashlimit) { 878 remove_proc_entry("ipt_hashlimit", net->proc_net); 879 return -ENOMEM; 880 } 881 #endif 882 return 0; 883 } 884 885 static void __net_exit hashlimit_proc_net_exit(struct net *net) 886 { 887 struct xt_hashlimit_htable *hinfo; 888 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 889 890 /* hashlimit_net_exit() is called before hashlimit_mt_destroy(). 891 * Make sure that the parent ipt_hashlimit and ip6t_hashlimit proc 892 * entries is empty before trying to remove it. 893 */ 894 mutex_lock(&hashlimit_mutex); 895 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) 896 htable_remove_proc_entry(hinfo); 897 hashlimit_net->ipt_hashlimit = NULL; 898 hashlimit_net->ip6t_hashlimit = NULL; 899 mutex_unlock(&hashlimit_mutex); 900 901 remove_proc_entry("ipt_hashlimit", net->proc_net); 902 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 903 remove_proc_entry("ip6t_hashlimit", net->proc_net); 904 #endif 905 } 906 907 static int __net_init hashlimit_net_init(struct net *net) 908 { 909 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); 910 911 INIT_HLIST_HEAD(&hashlimit_net->htables); 912 return hashlimit_proc_net_init(net); 913 } 914 915 static void __net_exit hashlimit_net_exit(struct net *net) 916 { 917 hashlimit_proc_net_exit(net); 918 } 919 920 static struct pernet_operations hashlimit_net_ops = { 921 .init = hashlimit_net_init, 922 .exit = hashlimit_net_exit, 923 .id = &hashlimit_net_id, 924 .size = sizeof(struct hashlimit_net), 925 }; 926 927 static int __init hashlimit_mt_init(void) 928 { 929 int err; 930 931 err = register_pernet_subsys(&hashlimit_net_ops); 932 if (err < 0) 933 return err; 934 err = xt_register_matches(hashlimit_mt_reg, 935 ARRAY_SIZE(hashlimit_mt_reg)); 936 if (err < 0) 937 goto err1; 938 939 err = -ENOMEM; 940 hashlimit_cachep = kmem_cache_create("xt_hashlimit", 941 sizeof(struct dsthash_ent), 0, 0, 942 NULL); 943 if (!hashlimit_cachep) { 944 pr_warn("unable to create slab cache\n"); 945 goto err2; 946 } 947 return 0; 948 949 err2: 950 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); 951 err1: 952 unregister_pernet_subsys(&hashlimit_net_ops); 953 return err; 954 955 } 956 957 static void __exit hashlimit_mt_exit(void) 958 { 959 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); 960 unregister_pernet_subsys(&hashlimit_net_ops); 961 962 rcu_barrier_bh(); 963 kmem_cache_destroy(hashlimit_cachep); 964 } 965 966 module_init(hashlimit_mt_init); 967 module_exit(hashlimit_mt_exit); 968