1 /* 2 * Monitoring code for network dropped packet alerts 3 * 4 * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com> 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/netdevice.h> 10 #include <linux/etherdevice.h> 11 #include <linux/string.h> 12 #include <linux/if_arp.h> 13 #include <linux/inetdevice.h> 14 #include <linux/inet.h> 15 #include <linux/interrupt.h> 16 #include <linux/netpoll.h> 17 #include <linux/sched.h> 18 #include <linux/delay.h> 19 #include <linux/types.h> 20 #include <linux/workqueue.h> 21 #include <linux/netlink.h> 22 #include <linux/net_dropmon.h> 23 #include <linux/percpu.h> 24 #include <linux/timer.h> 25 #include <linux/bitops.h> 26 #include <linux/slab.h> 27 #include <linux/module.h> 28 #include <net/genetlink.h> 29 #include <net/netevent.h> 30 31 #include <trace/events/skb.h> 32 #include <trace/events/napi.h> 33 34 #include <asm/unaligned.h> 35 36 #define TRACE_ON 1 37 #define TRACE_OFF 0 38 39 /* 40 * Globals, our netlink socket pointer 41 * and the work handle that will send up 42 * netlink alerts 43 */ 44 static int trace_state = TRACE_OFF; 45 static DEFINE_MUTEX(trace_state_mutex); 46 47 struct per_cpu_dm_data { 48 spinlock_t lock; 49 struct sk_buff *skb; 50 struct work_struct dm_alert_work; 51 struct timer_list send_timer; 52 }; 53 54 struct dm_hw_stat_delta { 55 struct net_device *dev; 56 unsigned long last_rx; 57 struct list_head list; 58 struct rcu_head rcu; 59 unsigned long last_drop_val; 60 }; 61 62 static struct genl_family net_drop_monitor_family = { 63 .id = GENL_ID_GENERATE, 64 .hdrsize = 0, 65 .name = "NET_DM", 66 .version = 2, 67 .maxattr = NET_DM_CMD_MAX, 68 }; 69 70 static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data); 71 72 static int dm_hit_limit = 64; 73 static int dm_delay = 1; 74 static unsigned long dm_hw_check_delta = 2*HZ; 75 static LIST_HEAD(hw_stats_list); 76 77 static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) 78 { 79 size_t al; 80 struct net_dm_alert_msg *msg; 81 struct nlattr *nla; 82 struct sk_buff *skb; 83 unsigned long flags; 84 85 al = sizeof(struct net_dm_alert_msg); 86 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 87 al += sizeof(struct nlattr); 88 89 skb = genlmsg_new(al, GFP_KERNEL); 90 91 if (skb) { 92 genlmsg_put(skb, 0, 0, &net_drop_monitor_family, 93 0, NET_DM_CMD_ALERT); 94 nla = nla_reserve(skb, NLA_UNSPEC, 95 sizeof(struct net_dm_alert_msg)); 96 msg = nla_data(nla); 97 memset(msg, 0, al); 98 } else { 99 mod_timer(&data->send_timer, jiffies + HZ / 10); 100 } 101 102 spin_lock_irqsave(&data->lock, flags); 103 swap(data->skb, skb); 104 spin_unlock_irqrestore(&data->lock, flags); 105 106 return skb; 107 } 108 109 static void send_dm_alert(struct work_struct *work) 110 { 111 struct sk_buff *skb; 112 struct per_cpu_dm_data *data; 113 114 data = container_of(work, struct per_cpu_dm_data, dm_alert_work); 115 116 skb = reset_per_cpu_data(data); 117 118 if (skb) 119 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); 120 } 121 122 /* 123 * This is the timer function to delay the sending of an alert 124 * in the event that more drops will arrive during the 125 * hysteresis period. 126 */ 127 static void sched_send_work(unsigned long _data) 128 { 129 struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data; 130 131 schedule_work(&data->dm_alert_work); 132 } 133 134 static void trace_drop_common(struct sk_buff *skb, void *location) 135 { 136 struct net_dm_alert_msg *msg; 137 struct nlmsghdr *nlh; 138 struct nlattr *nla; 139 int i; 140 struct sk_buff *dskb; 141 struct per_cpu_dm_data *data; 142 unsigned long flags; 143 144 local_irq_save(flags); 145 data = &__get_cpu_var(dm_cpu_data); 146 spin_lock(&data->lock); 147 dskb = data->skb; 148 149 if (!dskb) 150 goto out; 151 152 nlh = (struct nlmsghdr *)dskb->data; 153 nla = genlmsg_data(nlmsg_data(nlh)); 154 msg = nla_data(nla); 155 for (i = 0; i < msg->entries; i++) { 156 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 157 msg->points[i].count++; 158 goto out; 159 } 160 } 161 if (msg->entries == dm_hit_limit) 162 goto out; 163 /* 164 * We need to create a new entry 165 */ 166 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point)); 167 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); 168 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); 169 msg->points[msg->entries].count = 1; 170 msg->entries++; 171 172 if (!timer_pending(&data->send_timer)) { 173 data->send_timer.expires = jiffies + dm_delay * HZ; 174 add_timer(&data->send_timer); 175 } 176 177 out: 178 spin_unlock_irqrestore(&data->lock, flags); 179 } 180 181 static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) 182 { 183 trace_drop_common(skb, location); 184 } 185 186 static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi) 187 { 188 struct dm_hw_stat_delta *new_stat; 189 190 /* 191 * Don't check napi structures with no associated device 192 */ 193 if (!napi->dev) 194 return; 195 196 rcu_read_lock(); 197 list_for_each_entry_rcu(new_stat, &hw_stats_list, list) { 198 /* 199 * only add a note to our monitor buffer if: 200 * 1) this is the dev we received on 201 * 2) its after the last_rx delta 202 * 3) our rx_dropped count has gone up 203 */ 204 if ((new_stat->dev == napi->dev) && 205 (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) && 206 (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) { 207 trace_drop_common(NULL, NULL); 208 new_stat->last_drop_val = napi->dev->stats.rx_dropped; 209 new_stat->last_rx = jiffies; 210 break; 211 } 212 } 213 rcu_read_unlock(); 214 } 215 216 static int set_all_monitor_traces(int state) 217 { 218 int rc = 0; 219 struct dm_hw_stat_delta *new_stat = NULL; 220 struct dm_hw_stat_delta *temp; 221 222 mutex_lock(&trace_state_mutex); 223 224 if (state == trace_state) { 225 rc = -EAGAIN; 226 goto out_unlock; 227 } 228 229 switch (state) { 230 case TRACE_ON: 231 if (!try_module_get(THIS_MODULE)) { 232 rc = -ENODEV; 233 break; 234 } 235 236 rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL); 237 rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL); 238 break; 239 240 case TRACE_OFF: 241 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL); 242 rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL); 243 244 tracepoint_synchronize_unregister(); 245 246 /* 247 * Clean the device list 248 */ 249 list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) { 250 if (new_stat->dev == NULL) { 251 list_del_rcu(&new_stat->list); 252 kfree_rcu(new_stat, rcu); 253 } 254 } 255 256 module_put(THIS_MODULE); 257 258 break; 259 default: 260 rc = 1; 261 break; 262 } 263 264 if (!rc) 265 trace_state = state; 266 else 267 rc = -EINPROGRESS; 268 269 out_unlock: 270 mutex_unlock(&trace_state_mutex); 271 272 return rc; 273 } 274 275 276 static int net_dm_cmd_config(struct sk_buff *skb, 277 struct genl_info *info) 278 { 279 return -ENOTSUPP; 280 } 281 282 static int net_dm_cmd_trace(struct sk_buff *skb, 283 struct genl_info *info) 284 { 285 switch (info->genlhdr->cmd) { 286 case NET_DM_CMD_START: 287 return set_all_monitor_traces(TRACE_ON); 288 break; 289 case NET_DM_CMD_STOP: 290 return set_all_monitor_traces(TRACE_OFF); 291 break; 292 } 293 294 return -ENOTSUPP; 295 } 296 297 static int dropmon_net_event(struct notifier_block *ev_block, 298 unsigned long event, void *ptr) 299 { 300 struct net_device *dev = ptr; 301 struct dm_hw_stat_delta *new_stat = NULL; 302 struct dm_hw_stat_delta *tmp; 303 304 switch (event) { 305 case NETDEV_REGISTER: 306 new_stat = kzalloc(sizeof(struct dm_hw_stat_delta), GFP_KERNEL); 307 308 if (!new_stat) 309 goto out; 310 311 new_stat->dev = dev; 312 new_stat->last_rx = jiffies; 313 mutex_lock(&trace_state_mutex); 314 list_add_rcu(&new_stat->list, &hw_stats_list); 315 mutex_unlock(&trace_state_mutex); 316 break; 317 case NETDEV_UNREGISTER: 318 mutex_lock(&trace_state_mutex); 319 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { 320 if (new_stat->dev == dev) { 321 new_stat->dev = NULL; 322 if (trace_state == TRACE_OFF) { 323 list_del_rcu(&new_stat->list); 324 kfree_rcu(new_stat, rcu); 325 break; 326 } 327 } 328 } 329 mutex_unlock(&trace_state_mutex); 330 break; 331 } 332 out: 333 return NOTIFY_DONE; 334 } 335 336 static struct genl_ops dropmon_ops[] = { 337 { 338 .cmd = NET_DM_CMD_CONFIG, 339 .doit = net_dm_cmd_config, 340 }, 341 { 342 .cmd = NET_DM_CMD_START, 343 .doit = net_dm_cmd_trace, 344 }, 345 { 346 .cmd = NET_DM_CMD_STOP, 347 .doit = net_dm_cmd_trace, 348 }, 349 }; 350 351 static struct notifier_block dropmon_net_notifier = { 352 .notifier_call = dropmon_net_event 353 }; 354 355 static int __init init_net_drop_monitor(void) 356 { 357 struct per_cpu_dm_data *data; 358 int cpu, rc; 359 360 pr_info("Initializing network drop monitor service\n"); 361 362 if (sizeof(void *) > 8) { 363 pr_err("Unable to store program counters on this arch, Drop monitor failed\n"); 364 return -ENOSPC; 365 } 366 367 rc = genl_register_family_with_ops(&net_drop_monitor_family, 368 dropmon_ops, 369 ARRAY_SIZE(dropmon_ops)); 370 if (rc) { 371 pr_err("Could not create drop monitor netlink family\n"); 372 return rc; 373 } 374 375 rc = register_netdevice_notifier(&dropmon_net_notifier); 376 if (rc < 0) { 377 pr_crit("Failed to register netdevice notifier\n"); 378 goto out_unreg; 379 } 380 381 rc = 0; 382 383 for_each_possible_cpu(cpu) { 384 data = &per_cpu(dm_cpu_data, cpu); 385 INIT_WORK(&data->dm_alert_work, send_dm_alert); 386 init_timer(&data->send_timer); 387 data->send_timer.data = (unsigned long)data; 388 data->send_timer.function = sched_send_work; 389 spin_lock_init(&data->lock); 390 reset_per_cpu_data(data); 391 } 392 393 394 goto out; 395 396 out_unreg: 397 genl_unregister_family(&net_drop_monitor_family); 398 out: 399 return rc; 400 } 401 402 static void exit_net_drop_monitor(void) 403 { 404 struct per_cpu_dm_data *data; 405 int cpu; 406 407 BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier)); 408 409 /* 410 * Because of the module_get/put we do in the trace state change path 411 * we are guarnateed not to have any current users when we get here 412 * all we need to do is make sure that we don't have any running timers 413 * or pending schedule calls 414 */ 415 416 for_each_possible_cpu(cpu) { 417 data = &per_cpu(dm_cpu_data, cpu); 418 del_timer_sync(&data->send_timer); 419 cancel_work_sync(&data->dm_alert_work); 420 /* 421 * At this point, we should have exclusive access 422 * to this struct and can free the skb inside it 423 */ 424 kfree_skb(data->skb); 425 } 426 427 BUG_ON(genl_unregister_family(&net_drop_monitor_family)); 428 } 429 430 module_init(init_net_drop_monitor); 431 module_exit(exit_net_drop_monitor); 432 433 MODULE_LICENSE("GPL v2"); 434 MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>"); 435 MODULE_ALIAS_GENL_FAMILY("NET_DM"); 436