Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Monitoring code for network dropped packet alerts
4 *
5 * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com>
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/netdevice.h>
11#include <linux/etherdevice.h>
12#include <linux/string.h>
13#include <linux/if_arp.h>
14#include <linux/inetdevice.h>
15#include <linux/inet.h>
16#include <linux/interrupt.h>
17#include <linux/netpoll.h>
18#include <linux/sched.h>
19#include <linux/delay.h>
20#include <linux/types.h>
21#include <linux/workqueue.h>
22#include <linux/netlink.h>
23#include <linux/net_dropmon.h>
24#include <linux/bitfield.h>
25#include <linux/percpu.h>
26#include <linux/timer.h>
27#include <linux/bitops.h>
28#include <linux/slab.h>
29#include <linux/module.h>
30#include <net/genetlink.h>
31#include <net/netevent.h>
32#include <net/flow_offload.h>
33#include <net/dropreason.h>
34#include <net/devlink.h>
35
36#include <trace/events/skb.h>
37#include <trace/events/napi.h>
38#include <trace/events/devlink.h>
39
40#include <asm/unaligned.h>
41
42#define TRACE_ON 1
43#define TRACE_OFF 0
44
45/*
46 * Globals, our netlink socket pointer
47 * and the work handle that will send up
48 * netlink alerts
49 */
50static int trace_state = TRACE_OFF;
51static bool monitor_hw;
52
53/* net_dm_mutex
54 *
55 * An overall lock guarding every operation coming from userspace.
56 */
57static DEFINE_MUTEX(net_dm_mutex);
58
59struct net_dm_stats {
60 u64_stats_t dropped;
61 struct u64_stats_sync syncp;
62};
63
64#define NET_DM_MAX_HW_TRAP_NAME_LEN 40
65
66struct net_dm_hw_entry {
67 char trap_name[NET_DM_MAX_HW_TRAP_NAME_LEN];
68 u32 count;
69};
70
71struct net_dm_hw_entries {
72 u32 num_entries;
73 struct net_dm_hw_entry entries[];
74};
75
76struct per_cpu_dm_data {
77 spinlock_t lock; /* Protects 'skb', 'hw_entries' and
78 * 'send_timer'
79 */
80 union {
81 struct sk_buff *skb;
82 struct net_dm_hw_entries *hw_entries;
83 };
84 struct sk_buff_head drop_queue;
85 struct work_struct dm_alert_work;
86 struct timer_list send_timer;
87 struct net_dm_stats stats;
88};
89
90struct dm_hw_stat_delta {
91 unsigned long last_rx;
92 unsigned long last_drop_val;
93 struct rcu_head rcu;
94};
95
96static struct genl_family net_drop_monitor_family;
97
98static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
99static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_hw_cpu_data);
100
101static int dm_hit_limit = 64;
102static int dm_delay = 1;
103static unsigned long dm_hw_check_delta = 2*HZ;
104
105static enum net_dm_alert_mode net_dm_alert_mode = NET_DM_ALERT_MODE_SUMMARY;
106static u32 net_dm_trunc_len;
107static u32 net_dm_queue_len = 1000;
108
109struct net_dm_alert_ops {
110 void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb,
111 void *location,
112 enum skb_drop_reason reason);
113 void (*napi_poll_probe)(void *ignore, struct napi_struct *napi,
114 int work, int budget);
115 void (*work_item_func)(struct work_struct *work);
116 void (*hw_work_item_func)(struct work_struct *work);
117 void (*hw_trap_probe)(void *ignore, const struct devlink *devlink,
118 struct sk_buff *skb,
119 const struct devlink_trap_metadata *metadata);
120};
121
122struct net_dm_skb_cb {
123 union {
124 struct devlink_trap_metadata *hw_metadata;
125 void *pc;
126 };
127 enum skb_drop_reason reason;
128};
129
130#define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0]))
131
132static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
133{
134 size_t al;
135 struct net_dm_alert_msg *msg;
136 struct nlattr *nla;
137 struct sk_buff *skb;
138 unsigned long flags;
139 void *msg_header;
140
141 al = sizeof(struct net_dm_alert_msg);
142 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
143 al += sizeof(struct nlattr);
144
145 skb = genlmsg_new(al, GFP_KERNEL);
146
147 if (!skb)
148 goto err;
149
150 msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
151 0, NET_DM_CMD_ALERT);
152 if (!msg_header) {
153 nlmsg_free(skb);
154 skb = NULL;
155 goto err;
156 }
157 nla = nla_reserve(skb, NLA_UNSPEC,
158 sizeof(struct net_dm_alert_msg));
159 if (!nla) {
160 nlmsg_free(skb);
161 skb = NULL;
162 goto err;
163 }
164 msg = nla_data(nla);
165 memset(msg, 0, al);
166 goto out;
167
168err:
169 mod_timer(&data->send_timer, jiffies + HZ / 10);
170out:
171 spin_lock_irqsave(&data->lock, flags);
172 swap(data->skb, skb);
173 spin_unlock_irqrestore(&data->lock, flags);
174
175 if (skb) {
176 struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
177 struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
178
179 genlmsg_end(skb, genlmsg_data(gnlh));
180 }
181
182 return skb;
183}
184
185static const struct genl_multicast_group dropmon_mcgrps[] = {
186 { .name = "events", .flags = GENL_MCAST_CAP_SYS_ADMIN, },
187};
188
189static void send_dm_alert(struct work_struct *work)
190{
191 struct sk_buff *skb;
192 struct per_cpu_dm_data *data;
193
194 data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
195
196 skb = reset_per_cpu_data(data);
197
198 if (skb)
199 genlmsg_multicast(&net_drop_monitor_family, skb, 0,
200 0, GFP_KERNEL);
201}
202
203/*
204 * This is the timer function to delay the sending of an alert
205 * in the event that more drops will arrive during the
206 * hysteresis period.
207 */
208static void sched_send_work(struct timer_list *t)
209{
210 struct per_cpu_dm_data *data = from_timer(data, t, send_timer);
211
212 schedule_work(&data->dm_alert_work);
213}
214
215static void trace_drop_common(struct sk_buff *skb, void *location)
216{
217 struct net_dm_alert_msg *msg;
218 struct net_dm_drop_point *point;
219 struct nlmsghdr *nlh;
220 struct nlattr *nla;
221 int i;
222 struct sk_buff *dskb;
223 struct per_cpu_dm_data *data;
224 unsigned long flags;
225
226 local_irq_save(flags);
227 data = this_cpu_ptr(&dm_cpu_data);
228 spin_lock(&data->lock);
229 dskb = data->skb;
230
231 if (!dskb)
232 goto out;
233
234 nlh = (struct nlmsghdr *)dskb->data;
235 nla = genlmsg_data(nlmsg_data(nlh));
236 msg = nla_data(nla);
237 point = msg->points;
238 for (i = 0; i < msg->entries; i++) {
239 if (!memcmp(&location, &point->pc, sizeof(void *))) {
240 point->count++;
241 goto out;
242 }
243 point++;
244 }
245 if (msg->entries == dm_hit_limit)
246 goto out;
247 /*
248 * We need to create a new entry
249 */
250 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
251 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
252 memcpy(point->pc, &location, sizeof(void *));
253 point->count = 1;
254 msg->entries++;
255
256 if (!timer_pending(&data->send_timer)) {
257 data->send_timer.expires = jiffies + dm_delay * HZ;
258 add_timer(&data->send_timer);
259 }
260
261out:
262 spin_unlock_irqrestore(&data->lock, flags);
263}
264
265static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb,
266 void *location,
267 enum skb_drop_reason reason)
268{
269 trace_drop_common(skb, location);
270}
271
272static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
273 int work, int budget)
274{
275 struct net_device *dev = napi->dev;
276 struct dm_hw_stat_delta *stat;
277 /*
278 * Don't check napi structures with no associated device
279 */
280 if (!dev)
281 return;
282
283 rcu_read_lock();
284 stat = rcu_dereference(dev->dm_private);
285 if (stat) {
286 /*
287 * only add a note to our monitor buffer if:
288 * 1) its after the last_rx delta
289 * 2) our rx_dropped count has gone up
290 */
291 if (time_after(jiffies, stat->last_rx + dm_hw_check_delta) &&
292 (dev->stats.rx_dropped != stat->last_drop_val)) {
293 trace_drop_common(NULL, NULL);
294 stat->last_drop_val = dev->stats.rx_dropped;
295 stat->last_rx = jiffies;
296 }
297 }
298 rcu_read_unlock();
299}
300
301static struct net_dm_hw_entries *
302net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
303{
304 struct net_dm_hw_entries *hw_entries;
305 unsigned long flags;
306
307 hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit),
308 GFP_KERNEL);
309 if (!hw_entries) {
310 /* If the memory allocation failed, we try to perform another
311 * allocation in 1/10 second. Otherwise, the probe function
312 * will constantly bail out.
313 */
314 mod_timer(&hw_data->send_timer, jiffies + HZ / 10);
315 }
316
317 spin_lock_irqsave(&hw_data->lock, flags);
318 swap(hw_data->hw_entries, hw_entries);
319 spin_unlock_irqrestore(&hw_data->lock, flags);
320
321 return hw_entries;
322}
323
324static int net_dm_hw_entry_put(struct sk_buff *msg,
325 const struct net_dm_hw_entry *hw_entry)
326{
327 struct nlattr *attr;
328
329 attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRY);
330 if (!attr)
331 return -EMSGSIZE;
332
333 if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME, hw_entry->trap_name))
334 goto nla_put_failure;
335
336 if (nla_put_u32(msg, NET_DM_ATTR_HW_TRAP_COUNT, hw_entry->count))
337 goto nla_put_failure;
338
339 nla_nest_end(msg, attr);
340
341 return 0;
342
343nla_put_failure:
344 nla_nest_cancel(msg, attr);
345 return -EMSGSIZE;
346}
347
348static int net_dm_hw_entries_put(struct sk_buff *msg,
349 const struct net_dm_hw_entries *hw_entries)
350{
351 struct nlattr *attr;
352 int i;
353
354 attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRIES);
355 if (!attr)
356 return -EMSGSIZE;
357
358 for (i = 0; i < hw_entries->num_entries; i++) {
359 int rc;
360
361 rc = net_dm_hw_entry_put(msg, &hw_entries->entries[i]);
362 if (rc)
363 goto nla_put_failure;
364 }
365
366 nla_nest_end(msg, attr);
367
368 return 0;
369
370nla_put_failure:
371 nla_nest_cancel(msg, attr);
372 return -EMSGSIZE;
373}
374
375static int
376net_dm_hw_summary_report_fill(struct sk_buff *msg,
377 const struct net_dm_hw_entries *hw_entries)
378{
379 struct net_dm_alert_msg anc_hdr = { 0 };
380 void *hdr;
381 int rc;
382
383 hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
384 NET_DM_CMD_ALERT);
385 if (!hdr)
386 return -EMSGSIZE;
387
388 /* We need to put the ancillary header in order not to break user
389 * space.
390 */
391 if (nla_put(msg, NLA_UNSPEC, sizeof(anc_hdr), &anc_hdr))
392 goto nla_put_failure;
393
394 rc = net_dm_hw_entries_put(msg, hw_entries);
395 if (rc)
396 goto nla_put_failure;
397
398 genlmsg_end(msg, hdr);
399
400 return 0;
401
402nla_put_failure:
403 genlmsg_cancel(msg, hdr);
404 return -EMSGSIZE;
405}
406
407static void net_dm_hw_summary_work(struct work_struct *work)
408{
409 struct net_dm_hw_entries *hw_entries;
410 struct per_cpu_dm_data *hw_data;
411 struct sk_buff *msg;
412 int rc;
413
414 hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
415
416 hw_entries = net_dm_hw_reset_per_cpu_data(hw_data);
417 if (!hw_entries)
418 return;
419
420 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
421 if (!msg)
422 goto out;
423
424 rc = net_dm_hw_summary_report_fill(msg, hw_entries);
425 if (rc) {
426 nlmsg_free(msg);
427 goto out;
428 }
429
430 genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
431
432out:
433 kfree(hw_entries);
434}
435
436static void
437net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
438 struct sk_buff *skb,
439 const struct devlink_trap_metadata *metadata)
440{
441 struct net_dm_hw_entries *hw_entries;
442 struct net_dm_hw_entry *hw_entry;
443 struct per_cpu_dm_data *hw_data;
444 unsigned long flags;
445 int i;
446
447 if (metadata->trap_type == DEVLINK_TRAP_TYPE_CONTROL)
448 return;
449
450 hw_data = this_cpu_ptr(&dm_hw_cpu_data);
451 spin_lock_irqsave(&hw_data->lock, flags);
452 hw_entries = hw_data->hw_entries;
453
454 if (!hw_entries)
455 goto out;
456
457 for (i = 0; i < hw_entries->num_entries; i++) {
458 hw_entry = &hw_entries->entries[i];
459 if (!strncmp(hw_entry->trap_name, metadata->trap_name,
460 NET_DM_MAX_HW_TRAP_NAME_LEN - 1)) {
461 hw_entry->count++;
462 goto out;
463 }
464 }
465 if (WARN_ON_ONCE(hw_entries->num_entries == dm_hit_limit))
466 goto out;
467
468 hw_entry = &hw_entries->entries[hw_entries->num_entries];
469 strscpy(hw_entry->trap_name, metadata->trap_name,
470 NET_DM_MAX_HW_TRAP_NAME_LEN - 1);
471 hw_entry->count = 1;
472 hw_entries->num_entries++;
473
474 if (!timer_pending(&hw_data->send_timer)) {
475 hw_data->send_timer.expires = jiffies + dm_delay * HZ;
476 add_timer(&hw_data->send_timer);
477 }
478
479out:
480 spin_unlock_irqrestore(&hw_data->lock, flags);
481}
482
483static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
484 .kfree_skb_probe = trace_kfree_skb_hit,
485 .napi_poll_probe = trace_napi_poll_hit,
486 .work_item_func = send_dm_alert,
487 .hw_work_item_func = net_dm_hw_summary_work,
488 .hw_trap_probe = net_dm_hw_trap_summary_probe,
489};
490
491static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
492 struct sk_buff *skb,
493 void *location,
494 enum skb_drop_reason reason)
495{
496 ktime_t tstamp = ktime_get_real();
497 struct per_cpu_dm_data *data;
498 struct net_dm_skb_cb *cb;
499 struct sk_buff *nskb;
500 unsigned long flags;
501
502 if (!skb_mac_header_was_set(skb))
503 return;
504
505 nskb = skb_clone(skb, GFP_ATOMIC);
506 if (!nskb)
507 return;
508
509 cb = NET_DM_SKB_CB(nskb);
510 cb->reason = reason;
511 cb->pc = location;
512 /* Override the timestamp because we care about the time when the
513 * packet was dropped.
514 */
515 nskb->tstamp = tstamp;
516
517 data = this_cpu_ptr(&dm_cpu_data);
518
519 spin_lock_irqsave(&data->drop_queue.lock, flags);
520 if (skb_queue_len(&data->drop_queue) < net_dm_queue_len)
521 __skb_queue_tail(&data->drop_queue, nskb);
522 else
523 goto unlock_free;
524 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
525
526 schedule_work(&data->dm_alert_work);
527
528 return;
529
530unlock_free:
531 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
532 u64_stats_update_begin(&data->stats.syncp);
533 u64_stats_inc(&data->stats.dropped);
534 u64_stats_update_end(&data->stats.syncp);
535 consume_skb(nskb);
536}
537
538static void net_dm_packet_trace_napi_poll_hit(void *ignore,
539 struct napi_struct *napi,
540 int work, int budget)
541{
542}
543
544static size_t net_dm_in_port_size(void)
545{
546 /* NET_DM_ATTR_IN_PORT nest */
547 return nla_total_size(0) +
548 /* NET_DM_ATTR_PORT_NETDEV_IFINDEX */
549 nla_total_size(sizeof(u32)) +
550 /* NET_DM_ATTR_PORT_NETDEV_NAME */
551 nla_total_size(IFNAMSIZ + 1);
552}
553
554#define NET_DM_MAX_SYMBOL_LEN 40
555#define NET_DM_MAX_REASON_LEN 50
556
557static size_t net_dm_packet_report_size(size_t payload_len)
558{
559 size_t size;
560
561 size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize);
562
563 return NLMSG_ALIGN(size) +
564 /* NET_DM_ATTR_ORIGIN */
565 nla_total_size(sizeof(u16)) +
566 /* NET_DM_ATTR_PC */
567 nla_total_size(sizeof(u64)) +
568 /* NET_DM_ATTR_SYMBOL */
569 nla_total_size(NET_DM_MAX_SYMBOL_LEN + 1) +
570 /* NET_DM_ATTR_IN_PORT */
571 net_dm_in_port_size() +
572 /* NET_DM_ATTR_TIMESTAMP */
573 nla_total_size(sizeof(u64)) +
574 /* NET_DM_ATTR_ORIG_LEN */
575 nla_total_size(sizeof(u32)) +
576 /* NET_DM_ATTR_PROTO */
577 nla_total_size(sizeof(u16)) +
578 /* NET_DM_ATTR_REASON */
579 nla_total_size(NET_DM_MAX_REASON_LEN + 1) +
580 /* NET_DM_ATTR_PAYLOAD */
581 nla_total_size(payload_len);
582}
583
584static int net_dm_packet_report_in_port_put(struct sk_buff *msg, int ifindex,
585 const char *name)
586{
587 struct nlattr *attr;
588
589 attr = nla_nest_start(msg, NET_DM_ATTR_IN_PORT);
590 if (!attr)
591 return -EMSGSIZE;
592
593 if (ifindex &&
594 nla_put_u32(msg, NET_DM_ATTR_PORT_NETDEV_IFINDEX, ifindex))
595 goto nla_put_failure;
596
597 if (name && nla_put_string(msg, NET_DM_ATTR_PORT_NETDEV_NAME, name))
598 goto nla_put_failure;
599
600 nla_nest_end(msg, attr);
601
602 return 0;
603
604nla_put_failure:
605 nla_nest_cancel(msg, attr);
606 return -EMSGSIZE;
607}
608
609static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb,
610 size_t payload_len)
611{
612 struct net_dm_skb_cb *cb = NET_DM_SKB_CB(skb);
613 const struct drop_reason_list *list = NULL;
614 unsigned int subsys, subsys_reason;
615 char buf[NET_DM_MAX_SYMBOL_LEN];
616 struct nlattr *attr;
617 void *hdr;
618 int rc;
619
620 hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
621 NET_DM_CMD_PACKET_ALERT);
622 if (!hdr)
623 return -EMSGSIZE;
624
625 if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_SW))
626 goto nla_put_failure;
627
628 if (nla_put_u64_64bit(msg, NET_DM_ATTR_PC, (u64)(uintptr_t)cb->pc,
629 NET_DM_ATTR_PAD))
630 goto nla_put_failure;
631
632 rcu_read_lock();
633 subsys = u32_get_bits(cb->reason, SKB_DROP_REASON_SUBSYS_MASK);
634 if (subsys < SKB_DROP_REASON_SUBSYS_NUM)
635 list = rcu_dereference(drop_reasons_by_subsys[subsys]);
636 subsys_reason = cb->reason & ~SKB_DROP_REASON_SUBSYS_MASK;
637 if (!list ||
638 subsys_reason >= list->n_reasons ||
639 !list->reasons[subsys_reason] ||
640 strlen(list->reasons[subsys_reason]) > NET_DM_MAX_REASON_LEN) {
641 list = rcu_dereference(drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_CORE]);
642 subsys_reason = SKB_DROP_REASON_NOT_SPECIFIED;
643 }
644 if (nla_put_string(msg, NET_DM_ATTR_REASON,
645 list->reasons[subsys_reason])) {
646 rcu_read_unlock();
647 goto nla_put_failure;
648 }
649 rcu_read_unlock();
650
651 snprintf(buf, sizeof(buf), "%pS", cb->pc);
652 if (nla_put_string(msg, NET_DM_ATTR_SYMBOL, buf))
653 goto nla_put_failure;
654
655 rc = net_dm_packet_report_in_port_put(msg, skb->skb_iif, NULL);
656 if (rc)
657 goto nla_put_failure;
658
659 if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP,
660 ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
661 goto nla_put_failure;
662
663 if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len))
664 goto nla_put_failure;
665
666 if (!payload_len)
667 goto out;
668
669 if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol)))
670 goto nla_put_failure;
671
672 attr = skb_put(msg, nla_total_size(payload_len));
673 attr->nla_type = NET_DM_ATTR_PAYLOAD;
674 attr->nla_len = nla_attr_size(payload_len);
675 if (skb_copy_bits(skb, 0, nla_data(attr), payload_len))
676 goto nla_put_failure;
677
678out:
679 genlmsg_end(msg, hdr);
680
681 return 0;
682
683nla_put_failure:
684 genlmsg_cancel(msg, hdr);
685 return -EMSGSIZE;
686}
687
688#define NET_DM_MAX_PACKET_SIZE (0xffff - NLA_HDRLEN - NLA_ALIGNTO)
689
690static void net_dm_packet_report(struct sk_buff *skb)
691{
692 struct sk_buff *msg;
693 size_t payload_len;
694 int rc;
695
696 /* Make sure we start copying the packet from the MAC header */
697 if (skb->data > skb_mac_header(skb))
698 skb_push(skb, skb->data - skb_mac_header(skb));
699 else
700 skb_pull(skb, skb_mac_header(skb) - skb->data);
701
702 /* Ensure packet fits inside a single netlink attribute */
703 payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE);
704 if (net_dm_trunc_len)
705 payload_len = min_t(size_t, net_dm_trunc_len, payload_len);
706
707 msg = nlmsg_new(net_dm_packet_report_size(payload_len), GFP_KERNEL);
708 if (!msg)
709 goto out;
710
711 rc = net_dm_packet_report_fill(msg, skb, payload_len);
712 if (rc) {
713 nlmsg_free(msg);
714 goto out;
715 }
716
717 genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
718
719out:
720 consume_skb(skb);
721}
722
723static void net_dm_packet_work(struct work_struct *work)
724{
725 struct per_cpu_dm_data *data;
726 struct sk_buff_head list;
727 struct sk_buff *skb;
728 unsigned long flags;
729
730 data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
731
732 __skb_queue_head_init(&list);
733
734 spin_lock_irqsave(&data->drop_queue.lock, flags);
735 skb_queue_splice_tail_init(&data->drop_queue, &list);
736 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
737
738 while ((skb = __skb_dequeue(&list)))
739 net_dm_packet_report(skb);
740}
741
742static size_t
743net_dm_flow_action_cookie_size(const struct devlink_trap_metadata *hw_metadata)
744{
745 return hw_metadata->fa_cookie ?
746 nla_total_size(hw_metadata->fa_cookie->cookie_len) : 0;
747}
748
749static size_t
750net_dm_hw_packet_report_size(size_t payload_len,
751 const struct devlink_trap_metadata *hw_metadata)
752{
753 size_t size;
754
755 size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize);
756
757 return NLMSG_ALIGN(size) +
758 /* NET_DM_ATTR_ORIGIN */
759 nla_total_size(sizeof(u16)) +
760 /* NET_DM_ATTR_HW_TRAP_GROUP_NAME */
761 nla_total_size(strlen(hw_metadata->trap_group_name) + 1) +
762 /* NET_DM_ATTR_HW_TRAP_NAME */
763 nla_total_size(strlen(hw_metadata->trap_name) + 1) +
764 /* NET_DM_ATTR_IN_PORT */
765 net_dm_in_port_size() +
766 /* NET_DM_ATTR_FLOW_ACTION_COOKIE */
767 net_dm_flow_action_cookie_size(hw_metadata) +
768 /* NET_DM_ATTR_TIMESTAMP */
769 nla_total_size(sizeof(u64)) +
770 /* NET_DM_ATTR_ORIG_LEN */
771 nla_total_size(sizeof(u32)) +
772 /* NET_DM_ATTR_PROTO */
773 nla_total_size(sizeof(u16)) +
774 /* NET_DM_ATTR_PAYLOAD */
775 nla_total_size(payload_len);
776}
777
778static int net_dm_hw_packet_report_fill(struct sk_buff *msg,
779 struct sk_buff *skb, size_t payload_len)
780{
781 struct devlink_trap_metadata *hw_metadata;
782 struct nlattr *attr;
783 void *hdr;
784
785 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
786
787 hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
788 NET_DM_CMD_PACKET_ALERT);
789 if (!hdr)
790 return -EMSGSIZE;
791
792 if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_HW))
793 goto nla_put_failure;
794
795 if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_GROUP_NAME,
796 hw_metadata->trap_group_name))
797 goto nla_put_failure;
798
799 if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME,
800 hw_metadata->trap_name))
801 goto nla_put_failure;
802
803 if (hw_metadata->input_dev) {
804 struct net_device *dev = hw_metadata->input_dev;
805 int rc;
806
807 rc = net_dm_packet_report_in_port_put(msg, dev->ifindex,
808 dev->name);
809 if (rc)
810 goto nla_put_failure;
811 }
812
813 if (hw_metadata->fa_cookie &&
814 nla_put(msg, NET_DM_ATTR_FLOW_ACTION_COOKIE,
815 hw_metadata->fa_cookie->cookie_len,
816 hw_metadata->fa_cookie->cookie))
817 goto nla_put_failure;
818
819 if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP,
820 ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
821 goto nla_put_failure;
822
823 if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len))
824 goto nla_put_failure;
825
826 if (!payload_len)
827 goto out;
828
829 if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol)))
830 goto nla_put_failure;
831
832 attr = skb_put(msg, nla_total_size(payload_len));
833 attr->nla_type = NET_DM_ATTR_PAYLOAD;
834 attr->nla_len = nla_attr_size(payload_len);
835 if (skb_copy_bits(skb, 0, nla_data(attr), payload_len))
836 goto nla_put_failure;
837
838out:
839 genlmsg_end(msg, hdr);
840
841 return 0;
842
843nla_put_failure:
844 genlmsg_cancel(msg, hdr);
845 return -EMSGSIZE;
846}
847
848static struct devlink_trap_metadata *
849net_dm_hw_metadata_copy(const struct devlink_trap_metadata *metadata)
850{
851 const struct flow_action_cookie *fa_cookie;
852 struct devlink_trap_metadata *hw_metadata;
853 const char *trap_group_name;
854 const char *trap_name;
855
856 hw_metadata = kzalloc(sizeof(*hw_metadata), GFP_ATOMIC);
857 if (!hw_metadata)
858 return NULL;
859
860 trap_group_name = kstrdup(metadata->trap_group_name, GFP_ATOMIC);
861 if (!trap_group_name)
862 goto free_hw_metadata;
863 hw_metadata->trap_group_name = trap_group_name;
864
865 trap_name = kstrdup(metadata->trap_name, GFP_ATOMIC);
866 if (!trap_name)
867 goto free_trap_group;
868 hw_metadata->trap_name = trap_name;
869
870 if (metadata->fa_cookie) {
871 size_t cookie_size = sizeof(*fa_cookie) +
872 metadata->fa_cookie->cookie_len;
873
874 fa_cookie = kmemdup(metadata->fa_cookie, cookie_size,
875 GFP_ATOMIC);
876 if (!fa_cookie)
877 goto free_trap_name;
878 hw_metadata->fa_cookie = fa_cookie;
879 }
880
881 hw_metadata->input_dev = metadata->input_dev;
882 netdev_hold(hw_metadata->input_dev, &hw_metadata->dev_tracker,
883 GFP_ATOMIC);
884
885 return hw_metadata;
886
887free_trap_name:
888 kfree(trap_name);
889free_trap_group:
890 kfree(trap_group_name);
891free_hw_metadata:
892 kfree(hw_metadata);
893 return NULL;
894}
895
896static void
897net_dm_hw_metadata_free(struct devlink_trap_metadata *hw_metadata)
898{
899 netdev_put(hw_metadata->input_dev, &hw_metadata->dev_tracker);
900 kfree(hw_metadata->fa_cookie);
901 kfree(hw_metadata->trap_name);
902 kfree(hw_metadata->trap_group_name);
903 kfree(hw_metadata);
904}
905
906static void net_dm_hw_packet_report(struct sk_buff *skb)
907{
908 struct devlink_trap_metadata *hw_metadata;
909 struct sk_buff *msg;
910 size_t payload_len;
911 int rc;
912
913 if (skb->data > skb_mac_header(skb))
914 skb_push(skb, skb->data - skb_mac_header(skb));
915 else
916 skb_pull(skb, skb_mac_header(skb) - skb->data);
917
918 payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE);
919 if (net_dm_trunc_len)
920 payload_len = min_t(size_t, net_dm_trunc_len, payload_len);
921
922 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
923 msg = nlmsg_new(net_dm_hw_packet_report_size(payload_len, hw_metadata),
924 GFP_KERNEL);
925 if (!msg)
926 goto out;
927
928 rc = net_dm_hw_packet_report_fill(msg, skb, payload_len);
929 if (rc) {
930 nlmsg_free(msg);
931 goto out;
932 }
933
934 genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
935
936out:
937 net_dm_hw_metadata_free(NET_DM_SKB_CB(skb)->hw_metadata);
938 consume_skb(skb);
939}
940
941static void net_dm_hw_packet_work(struct work_struct *work)
942{
943 struct per_cpu_dm_data *hw_data;
944 struct sk_buff_head list;
945 struct sk_buff *skb;
946 unsigned long flags;
947
948 hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
949
950 __skb_queue_head_init(&list);
951
952 spin_lock_irqsave(&hw_data->drop_queue.lock, flags);
953 skb_queue_splice_tail_init(&hw_data->drop_queue, &list);
954 spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
955
956 while ((skb = __skb_dequeue(&list)))
957 net_dm_hw_packet_report(skb);
958}
959
960static void
961net_dm_hw_trap_packet_probe(void *ignore, const struct devlink *devlink,
962 struct sk_buff *skb,
963 const struct devlink_trap_metadata *metadata)
964{
965 struct devlink_trap_metadata *n_hw_metadata;
966 ktime_t tstamp = ktime_get_real();
967 struct per_cpu_dm_data *hw_data;
968 struct sk_buff *nskb;
969 unsigned long flags;
970
971 if (metadata->trap_type == DEVLINK_TRAP_TYPE_CONTROL)
972 return;
973
974 if (!skb_mac_header_was_set(skb))
975 return;
976
977 nskb = skb_clone(skb, GFP_ATOMIC);
978 if (!nskb)
979 return;
980
981 n_hw_metadata = net_dm_hw_metadata_copy(metadata);
982 if (!n_hw_metadata)
983 goto free;
984
985 NET_DM_SKB_CB(nskb)->hw_metadata = n_hw_metadata;
986 nskb->tstamp = tstamp;
987
988 hw_data = this_cpu_ptr(&dm_hw_cpu_data);
989
990 spin_lock_irqsave(&hw_data->drop_queue.lock, flags);
991 if (skb_queue_len(&hw_data->drop_queue) < net_dm_queue_len)
992 __skb_queue_tail(&hw_data->drop_queue, nskb);
993 else
994 goto unlock_free;
995 spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
996
997 schedule_work(&hw_data->dm_alert_work);
998
999 return;
1000
1001unlock_free:
1002 spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
1003 u64_stats_update_begin(&hw_data->stats.syncp);
1004 u64_stats_inc(&hw_data->stats.dropped);
1005 u64_stats_update_end(&hw_data->stats.syncp);
1006 net_dm_hw_metadata_free(n_hw_metadata);
1007free:
1008 consume_skb(nskb);
1009}
1010
1011static const struct net_dm_alert_ops net_dm_alert_packet_ops = {
1012 .kfree_skb_probe = net_dm_packet_trace_kfree_skb_hit,
1013 .napi_poll_probe = net_dm_packet_trace_napi_poll_hit,
1014 .work_item_func = net_dm_packet_work,
1015 .hw_work_item_func = net_dm_hw_packet_work,
1016 .hw_trap_probe = net_dm_hw_trap_packet_probe,
1017};
1018
1019static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = {
1020 [NET_DM_ALERT_MODE_SUMMARY] = &net_dm_alert_summary_ops,
1021 [NET_DM_ALERT_MODE_PACKET] = &net_dm_alert_packet_ops,
1022};
1023
1024#if IS_ENABLED(CONFIG_NET_DEVLINK)
1025static int net_dm_hw_probe_register(const struct net_dm_alert_ops *ops)
1026{
1027 return register_trace_devlink_trap_report(ops->hw_trap_probe, NULL);
1028}
1029
1030static void net_dm_hw_probe_unregister(const struct net_dm_alert_ops *ops)
1031{
1032 unregister_trace_devlink_trap_report(ops->hw_trap_probe, NULL);
1033 tracepoint_synchronize_unregister();
1034}
1035#else
1036static int net_dm_hw_probe_register(const struct net_dm_alert_ops *ops)
1037{
1038 return -EOPNOTSUPP;
1039}
1040
1041static void net_dm_hw_probe_unregister(const struct net_dm_alert_ops *ops)
1042{
1043}
1044#endif
1045
1046static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack)
1047{
1048 const struct net_dm_alert_ops *ops;
1049 int cpu, rc;
1050
1051 if (monitor_hw) {
1052 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already enabled");
1053 return -EAGAIN;
1054 }
1055
1056 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1057
1058 if (!try_module_get(THIS_MODULE)) {
1059 NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module");
1060 return -ENODEV;
1061 }
1062
1063 for_each_possible_cpu(cpu) {
1064 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1065 struct net_dm_hw_entries *hw_entries;
1066
1067 INIT_WORK(&hw_data->dm_alert_work, ops->hw_work_item_func);
1068 timer_setup(&hw_data->send_timer, sched_send_work, 0);
1069 hw_entries = net_dm_hw_reset_per_cpu_data(hw_data);
1070 kfree(hw_entries);
1071 }
1072
1073 rc = net_dm_hw_probe_register(ops);
1074 if (rc) {
1075 NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to devlink_trap_probe() tracepoint");
1076 goto err_module_put;
1077 }
1078
1079 monitor_hw = true;
1080
1081 return 0;
1082
1083err_module_put:
1084 for_each_possible_cpu(cpu) {
1085 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1086 struct sk_buff *skb;
1087
1088 del_timer_sync(&hw_data->send_timer);
1089 cancel_work_sync(&hw_data->dm_alert_work);
1090 while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
1091 struct devlink_trap_metadata *hw_metadata;
1092
1093 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
1094 net_dm_hw_metadata_free(hw_metadata);
1095 consume_skb(skb);
1096 }
1097 }
1098 module_put(THIS_MODULE);
1099 return rc;
1100}
1101
1102static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack)
1103{
1104 const struct net_dm_alert_ops *ops;
1105 int cpu;
1106
1107 if (!monitor_hw) {
1108 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled");
1109 return;
1110 }
1111
1112 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1113
1114 monitor_hw = false;
1115
1116 net_dm_hw_probe_unregister(ops);
1117
1118 for_each_possible_cpu(cpu) {
1119 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1120 struct sk_buff *skb;
1121
1122 del_timer_sync(&hw_data->send_timer);
1123 cancel_work_sync(&hw_data->dm_alert_work);
1124 while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
1125 struct devlink_trap_metadata *hw_metadata;
1126
1127 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
1128 net_dm_hw_metadata_free(hw_metadata);
1129 consume_skb(skb);
1130 }
1131 }
1132
1133 module_put(THIS_MODULE);
1134}
1135
1136static int net_dm_trace_on_set(struct netlink_ext_ack *extack)
1137{
1138 const struct net_dm_alert_ops *ops;
1139 int cpu, rc;
1140
1141 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1142
1143 if (!try_module_get(THIS_MODULE)) {
1144 NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module");
1145 return -ENODEV;
1146 }
1147
1148 for_each_possible_cpu(cpu) {
1149 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1150 struct sk_buff *skb;
1151
1152 INIT_WORK(&data->dm_alert_work, ops->work_item_func);
1153 timer_setup(&data->send_timer, sched_send_work, 0);
1154 /* Allocate a new per-CPU skb for the summary alert message and
1155 * free the old one which might contain stale data from
1156 * previous tracing.
1157 */
1158 skb = reset_per_cpu_data(data);
1159 consume_skb(skb);
1160 }
1161
1162 rc = register_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1163 if (rc) {
1164 NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to kfree_skb() tracepoint");
1165 goto err_module_put;
1166 }
1167
1168 rc = register_trace_napi_poll(ops->napi_poll_probe, NULL);
1169 if (rc) {
1170 NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to napi_poll() tracepoint");
1171 goto err_unregister_trace;
1172 }
1173
1174 return 0;
1175
1176err_unregister_trace:
1177 unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1178err_module_put:
1179 for_each_possible_cpu(cpu) {
1180 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1181 struct sk_buff *skb;
1182
1183 del_timer_sync(&data->send_timer);
1184 cancel_work_sync(&data->dm_alert_work);
1185 while ((skb = __skb_dequeue(&data->drop_queue)))
1186 consume_skb(skb);
1187 }
1188 module_put(THIS_MODULE);
1189 return rc;
1190}
1191
1192static void net_dm_trace_off_set(void)
1193{
1194 const struct net_dm_alert_ops *ops;
1195 int cpu;
1196
1197 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1198
1199 unregister_trace_napi_poll(ops->napi_poll_probe, NULL);
1200 unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1201
1202 tracepoint_synchronize_unregister();
1203
1204 /* Make sure we do not send notifications to user space after request
1205 * to stop tracing returns.
1206 */
1207 for_each_possible_cpu(cpu) {
1208 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1209 struct sk_buff *skb;
1210
1211 del_timer_sync(&data->send_timer);
1212 cancel_work_sync(&data->dm_alert_work);
1213 while ((skb = __skb_dequeue(&data->drop_queue)))
1214 consume_skb(skb);
1215 }
1216
1217 module_put(THIS_MODULE);
1218}
1219
1220static int set_all_monitor_traces(int state, struct netlink_ext_ack *extack)
1221{
1222 int rc = 0;
1223
1224 if (state == trace_state) {
1225 NL_SET_ERR_MSG_MOD(extack, "Trace state already set to requested state");
1226 return -EAGAIN;
1227 }
1228
1229 switch (state) {
1230 case TRACE_ON:
1231 rc = net_dm_trace_on_set(extack);
1232 break;
1233 case TRACE_OFF:
1234 net_dm_trace_off_set();
1235 break;
1236 default:
1237 rc = 1;
1238 break;
1239 }
1240
1241 if (!rc)
1242 trace_state = state;
1243 else
1244 rc = -EINPROGRESS;
1245
1246 return rc;
1247}
1248
1249static bool net_dm_is_monitoring(void)
1250{
1251 return trace_state == TRACE_ON || monitor_hw;
1252}
1253
1254static int net_dm_alert_mode_get_from_info(struct genl_info *info,
1255 enum net_dm_alert_mode *p_alert_mode)
1256{
1257 u8 val;
1258
1259 val = nla_get_u8(info->attrs[NET_DM_ATTR_ALERT_MODE]);
1260
1261 switch (val) {
1262 case NET_DM_ALERT_MODE_SUMMARY:
1263 case NET_DM_ALERT_MODE_PACKET:
1264 *p_alert_mode = val;
1265 break;
1266 default:
1267 return -EINVAL;
1268 }
1269
1270 return 0;
1271}
1272
1273static int net_dm_alert_mode_set(struct genl_info *info)
1274{
1275 struct netlink_ext_ack *extack = info->extack;
1276 enum net_dm_alert_mode alert_mode;
1277 int rc;
1278
1279 if (!info->attrs[NET_DM_ATTR_ALERT_MODE])
1280 return 0;
1281
1282 rc = net_dm_alert_mode_get_from_info(info, &alert_mode);
1283 if (rc) {
1284 NL_SET_ERR_MSG_MOD(extack, "Invalid alert mode");
1285 return -EINVAL;
1286 }
1287
1288 net_dm_alert_mode = alert_mode;
1289
1290 return 0;
1291}
1292
1293static void net_dm_trunc_len_set(struct genl_info *info)
1294{
1295 if (!info->attrs[NET_DM_ATTR_TRUNC_LEN])
1296 return;
1297
1298 net_dm_trunc_len = nla_get_u32(info->attrs[NET_DM_ATTR_TRUNC_LEN]);
1299}
1300
1301static void net_dm_queue_len_set(struct genl_info *info)
1302{
1303 if (!info->attrs[NET_DM_ATTR_QUEUE_LEN])
1304 return;
1305
1306 net_dm_queue_len = nla_get_u32(info->attrs[NET_DM_ATTR_QUEUE_LEN]);
1307}
1308
1309static int net_dm_cmd_config(struct sk_buff *skb,
1310 struct genl_info *info)
1311{
1312 struct netlink_ext_ack *extack = info->extack;
1313 int rc;
1314
1315 if (net_dm_is_monitoring()) {
1316 NL_SET_ERR_MSG_MOD(extack, "Cannot configure drop monitor during monitoring");
1317 return -EBUSY;
1318 }
1319
1320 rc = net_dm_alert_mode_set(info);
1321 if (rc)
1322 return rc;
1323
1324 net_dm_trunc_len_set(info);
1325
1326 net_dm_queue_len_set(info);
1327
1328 return 0;
1329}
1330
1331static int net_dm_monitor_start(bool set_sw, bool set_hw,
1332 struct netlink_ext_ack *extack)
1333{
1334 bool sw_set = false;
1335 int rc;
1336
1337 if (set_sw) {
1338 rc = set_all_monitor_traces(TRACE_ON, extack);
1339 if (rc)
1340 return rc;
1341 sw_set = true;
1342 }
1343
1344 if (set_hw) {
1345 rc = net_dm_hw_monitor_start(extack);
1346 if (rc)
1347 goto err_monitor_hw;
1348 }
1349
1350 return 0;
1351
1352err_monitor_hw:
1353 if (sw_set)
1354 set_all_monitor_traces(TRACE_OFF, extack);
1355 return rc;
1356}
1357
1358static void net_dm_monitor_stop(bool set_sw, bool set_hw,
1359 struct netlink_ext_ack *extack)
1360{
1361 if (set_hw)
1362 net_dm_hw_monitor_stop(extack);
1363 if (set_sw)
1364 set_all_monitor_traces(TRACE_OFF, extack);
1365}
1366
1367static int net_dm_cmd_trace(struct sk_buff *skb,
1368 struct genl_info *info)
1369{
1370 bool set_sw = !!info->attrs[NET_DM_ATTR_SW_DROPS];
1371 bool set_hw = !!info->attrs[NET_DM_ATTR_HW_DROPS];
1372 struct netlink_ext_ack *extack = info->extack;
1373
1374 /* To maintain backward compatibility, we start / stop monitoring of
1375 * software drops if no flag is specified.
1376 */
1377 if (!set_sw && !set_hw)
1378 set_sw = true;
1379
1380 switch (info->genlhdr->cmd) {
1381 case NET_DM_CMD_START:
1382 return net_dm_monitor_start(set_sw, set_hw, extack);
1383 case NET_DM_CMD_STOP:
1384 net_dm_monitor_stop(set_sw, set_hw, extack);
1385 return 0;
1386 }
1387
1388 return -EOPNOTSUPP;
1389}
1390
1391static int net_dm_config_fill(struct sk_buff *msg, struct genl_info *info)
1392{
1393 void *hdr;
1394
1395 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1396 &net_drop_monitor_family, 0, NET_DM_CMD_CONFIG_NEW);
1397 if (!hdr)
1398 return -EMSGSIZE;
1399
1400 if (nla_put_u8(msg, NET_DM_ATTR_ALERT_MODE, net_dm_alert_mode))
1401 goto nla_put_failure;
1402
1403 if (nla_put_u32(msg, NET_DM_ATTR_TRUNC_LEN, net_dm_trunc_len))
1404 goto nla_put_failure;
1405
1406 if (nla_put_u32(msg, NET_DM_ATTR_QUEUE_LEN, net_dm_queue_len))
1407 goto nla_put_failure;
1408
1409 genlmsg_end(msg, hdr);
1410
1411 return 0;
1412
1413nla_put_failure:
1414 genlmsg_cancel(msg, hdr);
1415 return -EMSGSIZE;
1416}
1417
1418static int net_dm_cmd_config_get(struct sk_buff *skb, struct genl_info *info)
1419{
1420 struct sk_buff *msg;
1421 int rc;
1422
1423 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1424 if (!msg)
1425 return -ENOMEM;
1426
1427 rc = net_dm_config_fill(msg, info);
1428 if (rc)
1429 goto free_msg;
1430
1431 return genlmsg_reply(msg, info);
1432
1433free_msg:
1434 nlmsg_free(msg);
1435 return rc;
1436}
1437
1438static void net_dm_stats_read(struct net_dm_stats *stats)
1439{
1440 int cpu;
1441
1442 memset(stats, 0, sizeof(*stats));
1443 for_each_possible_cpu(cpu) {
1444 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1445 struct net_dm_stats *cpu_stats = &data->stats;
1446 unsigned int start;
1447 u64 dropped;
1448
1449 do {
1450 start = u64_stats_fetch_begin(&cpu_stats->syncp);
1451 dropped = u64_stats_read(&cpu_stats->dropped);
1452 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
1453
1454 u64_stats_add(&stats->dropped, dropped);
1455 }
1456}
1457
1458static int net_dm_stats_put(struct sk_buff *msg)
1459{
1460 struct net_dm_stats stats;
1461 struct nlattr *attr;
1462
1463 net_dm_stats_read(&stats);
1464
1465 attr = nla_nest_start(msg, NET_DM_ATTR_STATS);
1466 if (!attr)
1467 return -EMSGSIZE;
1468
1469 if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
1470 u64_stats_read(&stats.dropped), NET_DM_ATTR_PAD))
1471 goto nla_put_failure;
1472
1473 nla_nest_end(msg, attr);
1474
1475 return 0;
1476
1477nla_put_failure:
1478 nla_nest_cancel(msg, attr);
1479 return -EMSGSIZE;
1480}
1481
1482static void net_dm_hw_stats_read(struct net_dm_stats *stats)
1483{
1484 int cpu;
1485
1486 memset(stats, 0, sizeof(*stats));
1487 for_each_possible_cpu(cpu) {
1488 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1489 struct net_dm_stats *cpu_stats = &hw_data->stats;
1490 unsigned int start;
1491 u64 dropped;
1492
1493 do {
1494 start = u64_stats_fetch_begin(&cpu_stats->syncp);
1495 dropped = u64_stats_read(&cpu_stats->dropped);
1496 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
1497
1498 u64_stats_add(&stats->dropped, dropped);
1499 }
1500}
1501
1502static int net_dm_hw_stats_put(struct sk_buff *msg)
1503{
1504 struct net_dm_stats stats;
1505 struct nlattr *attr;
1506
1507 net_dm_hw_stats_read(&stats);
1508
1509 attr = nla_nest_start(msg, NET_DM_ATTR_HW_STATS);
1510 if (!attr)
1511 return -EMSGSIZE;
1512
1513 if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
1514 u64_stats_read(&stats.dropped), NET_DM_ATTR_PAD))
1515 goto nla_put_failure;
1516
1517 nla_nest_end(msg, attr);
1518
1519 return 0;
1520
1521nla_put_failure:
1522 nla_nest_cancel(msg, attr);
1523 return -EMSGSIZE;
1524}
1525
1526static int net_dm_stats_fill(struct sk_buff *msg, struct genl_info *info)
1527{
1528 void *hdr;
1529 int rc;
1530
1531 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1532 &net_drop_monitor_family, 0, NET_DM_CMD_STATS_NEW);
1533 if (!hdr)
1534 return -EMSGSIZE;
1535
1536 rc = net_dm_stats_put(msg);
1537 if (rc)
1538 goto nla_put_failure;
1539
1540 rc = net_dm_hw_stats_put(msg);
1541 if (rc)
1542 goto nla_put_failure;
1543
1544 genlmsg_end(msg, hdr);
1545
1546 return 0;
1547
1548nla_put_failure:
1549 genlmsg_cancel(msg, hdr);
1550 return -EMSGSIZE;
1551}
1552
1553static int net_dm_cmd_stats_get(struct sk_buff *skb, struct genl_info *info)
1554{
1555 struct sk_buff *msg;
1556 int rc;
1557
1558 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1559 if (!msg)
1560 return -ENOMEM;
1561
1562 rc = net_dm_stats_fill(msg, info);
1563 if (rc)
1564 goto free_msg;
1565
1566 return genlmsg_reply(msg, info);
1567
1568free_msg:
1569 nlmsg_free(msg);
1570 return rc;
1571}
1572
1573static int dropmon_net_event(struct notifier_block *ev_block,
1574 unsigned long event, void *ptr)
1575{
1576 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1577 struct dm_hw_stat_delta *stat;
1578
1579 switch (event) {
1580 case NETDEV_REGISTER:
1581 if (WARN_ON_ONCE(rtnl_dereference(dev->dm_private)))
1582 break;
1583 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1584 if (!stat)
1585 break;
1586
1587 stat->last_rx = jiffies;
1588 rcu_assign_pointer(dev->dm_private, stat);
1589
1590 break;
1591 case NETDEV_UNREGISTER:
1592 stat = rtnl_dereference(dev->dm_private);
1593 if (stat) {
1594 rcu_assign_pointer(dev->dm_private, NULL);
1595 kfree_rcu(stat, rcu);
1596 }
1597 break;
1598 }
1599 return NOTIFY_DONE;
1600}
1601
1602static const struct nla_policy net_dm_nl_policy[NET_DM_ATTR_MAX + 1] = {
1603 [NET_DM_ATTR_UNSPEC] = { .strict_start_type = NET_DM_ATTR_UNSPEC + 1 },
1604 [NET_DM_ATTR_ALERT_MODE] = { .type = NLA_U8 },
1605 [NET_DM_ATTR_TRUNC_LEN] = { .type = NLA_U32 },
1606 [NET_DM_ATTR_QUEUE_LEN] = { .type = NLA_U32 },
1607 [NET_DM_ATTR_SW_DROPS] = {. type = NLA_FLAG },
1608 [NET_DM_ATTR_HW_DROPS] = {. type = NLA_FLAG },
1609};
1610
1611static const struct genl_small_ops dropmon_ops[] = {
1612 {
1613 .cmd = NET_DM_CMD_CONFIG,
1614 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1615 .doit = net_dm_cmd_config,
1616 .flags = GENL_ADMIN_PERM,
1617 },
1618 {
1619 .cmd = NET_DM_CMD_START,
1620 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1621 .doit = net_dm_cmd_trace,
1622 .flags = GENL_ADMIN_PERM,
1623 },
1624 {
1625 .cmd = NET_DM_CMD_STOP,
1626 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1627 .doit = net_dm_cmd_trace,
1628 .flags = GENL_ADMIN_PERM,
1629 },
1630 {
1631 .cmd = NET_DM_CMD_CONFIG_GET,
1632 .doit = net_dm_cmd_config_get,
1633 },
1634 {
1635 .cmd = NET_DM_CMD_STATS_GET,
1636 .doit = net_dm_cmd_stats_get,
1637 },
1638};
1639
1640static int net_dm_nl_pre_doit(const struct genl_split_ops *ops,
1641 struct sk_buff *skb, struct genl_info *info)
1642{
1643 mutex_lock(&net_dm_mutex);
1644
1645 return 0;
1646}
1647
1648static void net_dm_nl_post_doit(const struct genl_split_ops *ops,
1649 struct sk_buff *skb, struct genl_info *info)
1650{
1651 mutex_unlock(&net_dm_mutex);
1652}
1653
1654static struct genl_family net_drop_monitor_family __ro_after_init = {
1655 .hdrsize = 0,
1656 .name = "NET_DM",
1657 .version = 2,
1658 .maxattr = NET_DM_ATTR_MAX,
1659 .policy = net_dm_nl_policy,
1660 .pre_doit = net_dm_nl_pre_doit,
1661 .post_doit = net_dm_nl_post_doit,
1662 .module = THIS_MODULE,
1663 .small_ops = dropmon_ops,
1664 .n_small_ops = ARRAY_SIZE(dropmon_ops),
1665 .resv_start_op = NET_DM_CMD_STATS_GET + 1,
1666 .mcgrps = dropmon_mcgrps,
1667 .n_mcgrps = ARRAY_SIZE(dropmon_mcgrps),
1668};
1669
1670static struct notifier_block dropmon_net_notifier = {
1671 .notifier_call = dropmon_net_event
1672};
1673
1674static void __net_dm_cpu_data_init(struct per_cpu_dm_data *data)
1675{
1676 spin_lock_init(&data->lock);
1677 skb_queue_head_init(&data->drop_queue);
1678 u64_stats_init(&data->stats.syncp);
1679}
1680
1681static void __net_dm_cpu_data_fini(struct per_cpu_dm_data *data)
1682{
1683 WARN_ON(!skb_queue_empty(&data->drop_queue));
1684}
1685
1686static void net_dm_cpu_data_init(int cpu)
1687{
1688 struct per_cpu_dm_data *data;
1689
1690 data = &per_cpu(dm_cpu_data, cpu);
1691 __net_dm_cpu_data_init(data);
1692}
1693
1694static void net_dm_cpu_data_fini(int cpu)
1695{
1696 struct per_cpu_dm_data *data;
1697
1698 data = &per_cpu(dm_cpu_data, cpu);
1699 /* At this point, we should have exclusive access
1700 * to this struct and can free the skb inside it.
1701 */
1702 consume_skb(data->skb);
1703 __net_dm_cpu_data_fini(data);
1704}
1705
1706static void net_dm_hw_cpu_data_init(int cpu)
1707{
1708 struct per_cpu_dm_data *hw_data;
1709
1710 hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1711 __net_dm_cpu_data_init(hw_data);
1712}
1713
1714static void net_dm_hw_cpu_data_fini(int cpu)
1715{
1716 struct per_cpu_dm_data *hw_data;
1717
1718 hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1719 kfree(hw_data->hw_entries);
1720 __net_dm_cpu_data_fini(hw_data);
1721}
1722
1723static int __init init_net_drop_monitor(void)
1724{
1725 int cpu, rc;
1726
1727 pr_info("Initializing network drop monitor service\n");
1728
1729 if (sizeof(void *) > 8) {
1730 pr_err("Unable to store program counters on this arch, Drop monitor failed\n");
1731 return -ENOSPC;
1732 }
1733
1734 rc = genl_register_family(&net_drop_monitor_family);
1735 if (rc) {
1736 pr_err("Could not create drop monitor netlink family\n");
1737 return rc;
1738 }
1739 WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
1740
1741 rc = register_netdevice_notifier(&dropmon_net_notifier);
1742 if (rc < 0) {
1743 pr_crit("Failed to register netdevice notifier\n");
1744 goto out_unreg;
1745 }
1746
1747 rc = 0;
1748
1749 for_each_possible_cpu(cpu) {
1750 net_dm_cpu_data_init(cpu);
1751 net_dm_hw_cpu_data_init(cpu);
1752 }
1753
1754 goto out;
1755
1756out_unreg:
1757 genl_unregister_family(&net_drop_monitor_family);
1758out:
1759 return rc;
1760}
1761
1762static void exit_net_drop_monitor(void)
1763{
1764 int cpu;
1765
1766 BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
1767
1768 /*
1769 * Because of the module_get/put we do in the trace state change path
1770 * we are guaranteed not to have any current users when we get here
1771 */
1772
1773 for_each_possible_cpu(cpu) {
1774 net_dm_hw_cpu_data_fini(cpu);
1775 net_dm_cpu_data_fini(cpu);
1776 }
1777
1778 BUG_ON(genl_unregister_family(&net_drop_monitor_family));
1779}
1780
1781module_init(init_net_drop_monitor);
1782module_exit(exit_net_drop_monitor);
1783
1784MODULE_LICENSE("GPL v2");
1785MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
1786MODULE_ALIAS_GENL_FAMILY("NET_DM");
1787MODULE_DESCRIPTION("Monitoring code for network dropped packet alerts");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Monitoring code for network dropped packet alerts
4 *
5 * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com>
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/netdevice.h>
11#include <linux/etherdevice.h>
12#include <linux/string.h>
13#include <linux/if_arp.h>
14#include <linux/inetdevice.h>
15#include <linux/inet.h>
16#include <linux/interrupt.h>
17#include <linux/netpoll.h>
18#include <linux/sched.h>
19#include <linux/delay.h>
20#include <linux/types.h>
21#include <linux/workqueue.h>
22#include <linux/netlink.h>
23#include <linux/net_dropmon.h>
24#include <linux/percpu.h>
25#include <linux/timer.h>
26#include <linux/bitops.h>
27#include <linux/slab.h>
28#include <linux/module.h>
29#include <net/drop_monitor.h>
30#include <net/genetlink.h>
31#include <net/netevent.h>
32
33#include <trace/events/skb.h>
34#include <trace/events/napi.h>
35
36#include <asm/unaligned.h>
37
38#define TRACE_ON 1
39#define TRACE_OFF 0
40
41/*
42 * Globals, our netlink socket pointer
43 * and the work handle that will send up
44 * netlink alerts
45 */
46static int trace_state = TRACE_OFF;
47static bool monitor_hw;
48
49/* net_dm_mutex
50 *
51 * An overall lock guarding every operation coming from userspace.
52 * It also guards the global 'hw_stats_list' list.
53 */
54static DEFINE_MUTEX(net_dm_mutex);
55
56struct net_dm_stats {
57 u64 dropped;
58 struct u64_stats_sync syncp;
59};
60
61#define NET_DM_MAX_HW_TRAP_NAME_LEN 40
62
63struct net_dm_hw_entry {
64 char trap_name[NET_DM_MAX_HW_TRAP_NAME_LEN];
65 u32 count;
66};
67
68struct net_dm_hw_entries {
69 u32 num_entries;
70 struct net_dm_hw_entry entries[0];
71};
72
73struct per_cpu_dm_data {
74 spinlock_t lock; /* Protects 'skb', 'hw_entries' and
75 * 'send_timer'
76 */
77 union {
78 struct sk_buff *skb;
79 struct net_dm_hw_entries *hw_entries;
80 };
81 struct sk_buff_head drop_queue;
82 struct work_struct dm_alert_work;
83 struct timer_list send_timer;
84 struct net_dm_stats stats;
85};
86
87struct dm_hw_stat_delta {
88 struct net_device *dev;
89 unsigned long last_rx;
90 struct list_head list;
91 struct rcu_head rcu;
92 unsigned long last_drop_val;
93};
94
95static struct genl_family net_drop_monitor_family;
96
97static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
98static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_hw_cpu_data);
99
100static int dm_hit_limit = 64;
101static int dm_delay = 1;
102static unsigned long dm_hw_check_delta = 2*HZ;
103static LIST_HEAD(hw_stats_list);
104
105static enum net_dm_alert_mode net_dm_alert_mode = NET_DM_ALERT_MODE_SUMMARY;
106static u32 net_dm_trunc_len;
107static u32 net_dm_queue_len = 1000;
108
109struct net_dm_alert_ops {
110 void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb,
111 void *location);
112 void (*napi_poll_probe)(void *ignore, struct napi_struct *napi,
113 int work, int budget);
114 void (*work_item_func)(struct work_struct *work);
115 void (*hw_work_item_func)(struct work_struct *work);
116 void (*hw_probe)(struct sk_buff *skb,
117 const struct net_dm_hw_metadata *hw_metadata);
118};
119
120struct net_dm_skb_cb {
121 union {
122 struct net_dm_hw_metadata *hw_metadata;
123 void *pc;
124 };
125};
126
127#define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0]))
128
129static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
130{
131 size_t al;
132 struct net_dm_alert_msg *msg;
133 struct nlattr *nla;
134 struct sk_buff *skb;
135 unsigned long flags;
136 void *msg_header;
137
138 al = sizeof(struct net_dm_alert_msg);
139 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
140 al += sizeof(struct nlattr);
141
142 skb = genlmsg_new(al, GFP_KERNEL);
143
144 if (!skb)
145 goto err;
146
147 msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
148 0, NET_DM_CMD_ALERT);
149 if (!msg_header) {
150 nlmsg_free(skb);
151 skb = NULL;
152 goto err;
153 }
154 nla = nla_reserve(skb, NLA_UNSPEC,
155 sizeof(struct net_dm_alert_msg));
156 if (!nla) {
157 nlmsg_free(skb);
158 skb = NULL;
159 goto err;
160 }
161 msg = nla_data(nla);
162 memset(msg, 0, al);
163 goto out;
164
165err:
166 mod_timer(&data->send_timer, jiffies + HZ / 10);
167out:
168 spin_lock_irqsave(&data->lock, flags);
169 swap(data->skb, skb);
170 spin_unlock_irqrestore(&data->lock, flags);
171
172 if (skb) {
173 struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
174 struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
175
176 genlmsg_end(skb, genlmsg_data(gnlh));
177 }
178
179 return skb;
180}
181
182static const struct genl_multicast_group dropmon_mcgrps[] = {
183 { .name = "events", },
184};
185
186static void send_dm_alert(struct work_struct *work)
187{
188 struct sk_buff *skb;
189 struct per_cpu_dm_data *data;
190
191 data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
192
193 skb = reset_per_cpu_data(data);
194
195 if (skb)
196 genlmsg_multicast(&net_drop_monitor_family, skb, 0,
197 0, GFP_KERNEL);
198}
199
200/*
201 * This is the timer function to delay the sending of an alert
202 * in the event that more drops will arrive during the
203 * hysteresis period.
204 */
205static void sched_send_work(struct timer_list *t)
206{
207 struct per_cpu_dm_data *data = from_timer(data, t, send_timer);
208
209 schedule_work(&data->dm_alert_work);
210}
211
212static void trace_drop_common(struct sk_buff *skb, void *location)
213{
214 struct net_dm_alert_msg *msg;
215 struct nlmsghdr *nlh;
216 struct nlattr *nla;
217 int i;
218 struct sk_buff *dskb;
219 struct per_cpu_dm_data *data;
220 unsigned long flags;
221
222 local_irq_save(flags);
223 data = this_cpu_ptr(&dm_cpu_data);
224 spin_lock(&data->lock);
225 dskb = data->skb;
226
227 if (!dskb)
228 goto out;
229
230 nlh = (struct nlmsghdr *)dskb->data;
231 nla = genlmsg_data(nlmsg_data(nlh));
232 msg = nla_data(nla);
233 for (i = 0; i < msg->entries; i++) {
234 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
235 msg->points[i].count++;
236 goto out;
237 }
238 }
239 if (msg->entries == dm_hit_limit)
240 goto out;
241 /*
242 * We need to create a new entry
243 */
244 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
245 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
246 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
247 msg->points[msg->entries].count = 1;
248 msg->entries++;
249
250 if (!timer_pending(&data->send_timer)) {
251 data->send_timer.expires = jiffies + dm_delay * HZ;
252 add_timer(&data->send_timer);
253 }
254
255out:
256 spin_unlock_irqrestore(&data->lock, flags);
257}
258
259static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
260{
261 trace_drop_common(skb, location);
262}
263
264static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
265 int work, int budget)
266{
267 struct dm_hw_stat_delta *new_stat;
268
269 /*
270 * Don't check napi structures with no associated device
271 */
272 if (!napi->dev)
273 return;
274
275 rcu_read_lock();
276 list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
277 /*
278 * only add a note to our monitor buffer if:
279 * 1) this is the dev we received on
280 * 2) its after the last_rx delta
281 * 3) our rx_dropped count has gone up
282 */
283 if ((new_stat->dev == napi->dev) &&
284 (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
285 (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
286 trace_drop_common(NULL, NULL);
287 new_stat->last_drop_val = napi->dev->stats.rx_dropped;
288 new_stat->last_rx = jiffies;
289 break;
290 }
291 }
292 rcu_read_unlock();
293}
294
295static struct net_dm_hw_entries *
296net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
297{
298 struct net_dm_hw_entries *hw_entries;
299 unsigned long flags;
300
301 hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit),
302 GFP_KERNEL);
303 if (!hw_entries) {
304 /* If the memory allocation failed, we try to perform another
305 * allocation in 1/10 second. Otherwise, the probe function
306 * will constantly bail out.
307 */
308 mod_timer(&hw_data->send_timer, jiffies + HZ / 10);
309 }
310
311 spin_lock_irqsave(&hw_data->lock, flags);
312 swap(hw_data->hw_entries, hw_entries);
313 spin_unlock_irqrestore(&hw_data->lock, flags);
314
315 return hw_entries;
316}
317
318static int net_dm_hw_entry_put(struct sk_buff *msg,
319 const struct net_dm_hw_entry *hw_entry)
320{
321 struct nlattr *attr;
322
323 attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRY);
324 if (!attr)
325 return -EMSGSIZE;
326
327 if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME, hw_entry->trap_name))
328 goto nla_put_failure;
329
330 if (nla_put_u32(msg, NET_DM_ATTR_HW_TRAP_COUNT, hw_entry->count))
331 goto nla_put_failure;
332
333 nla_nest_end(msg, attr);
334
335 return 0;
336
337nla_put_failure:
338 nla_nest_cancel(msg, attr);
339 return -EMSGSIZE;
340}
341
342static int net_dm_hw_entries_put(struct sk_buff *msg,
343 const struct net_dm_hw_entries *hw_entries)
344{
345 struct nlattr *attr;
346 int i;
347
348 attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRIES);
349 if (!attr)
350 return -EMSGSIZE;
351
352 for (i = 0; i < hw_entries->num_entries; i++) {
353 int rc;
354
355 rc = net_dm_hw_entry_put(msg, &hw_entries->entries[i]);
356 if (rc)
357 goto nla_put_failure;
358 }
359
360 nla_nest_end(msg, attr);
361
362 return 0;
363
364nla_put_failure:
365 nla_nest_cancel(msg, attr);
366 return -EMSGSIZE;
367}
368
369static int
370net_dm_hw_summary_report_fill(struct sk_buff *msg,
371 const struct net_dm_hw_entries *hw_entries)
372{
373 struct net_dm_alert_msg anc_hdr = { 0 };
374 void *hdr;
375 int rc;
376
377 hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
378 NET_DM_CMD_ALERT);
379 if (!hdr)
380 return -EMSGSIZE;
381
382 /* We need to put the ancillary header in order not to break user
383 * space.
384 */
385 if (nla_put(msg, NLA_UNSPEC, sizeof(anc_hdr), &anc_hdr))
386 goto nla_put_failure;
387
388 rc = net_dm_hw_entries_put(msg, hw_entries);
389 if (rc)
390 goto nla_put_failure;
391
392 genlmsg_end(msg, hdr);
393
394 return 0;
395
396nla_put_failure:
397 genlmsg_cancel(msg, hdr);
398 return -EMSGSIZE;
399}
400
401static void net_dm_hw_summary_work(struct work_struct *work)
402{
403 struct net_dm_hw_entries *hw_entries;
404 struct per_cpu_dm_data *hw_data;
405 struct sk_buff *msg;
406 int rc;
407
408 hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
409
410 hw_entries = net_dm_hw_reset_per_cpu_data(hw_data);
411 if (!hw_entries)
412 return;
413
414 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
415 if (!msg)
416 goto out;
417
418 rc = net_dm_hw_summary_report_fill(msg, hw_entries);
419 if (rc) {
420 nlmsg_free(msg);
421 goto out;
422 }
423
424 genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
425
426out:
427 kfree(hw_entries);
428}
429
430static void
431net_dm_hw_summary_probe(struct sk_buff *skb,
432 const struct net_dm_hw_metadata *hw_metadata)
433{
434 struct net_dm_hw_entries *hw_entries;
435 struct net_dm_hw_entry *hw_entry;
436 struct per_cpu_dm_data *hw_data;
437 unsigned long flags;
438 int i;
439
440 hw_data = this_cpu_ptr(&dm_hw_cpu_data);
441 spin_lock_irqsave(&hw_data->lock, flags);
442 hw_entries = hw_data->hw_entries;
443
444 if (!hw_entries)
445 goto out;
446
447 for (i = 0; i < hw_entries->num_entries; i++) {
448 hw_entry = &hw_entries->entries[i];
449 if (!strncmp(hw_entry->trap_name, hw_metadata->trap_name,
450 NET_DM_MAX_HW_TRAP_NAME_LEN - 1)) {
451 hw_entry->count++;
452 goto out;
453 }
454 }
455 if (WARN_ON_ONCE(hw_entries->num_entries == dm_hit_limit))
456 goto out;
457
458 hw_entry = &hw_entries->entries[hw_entries->num_entries];
459 strlcpy(hw_entry->trap_name, hw_metadata->trap_name,
460 NET_DM_MAX_HW_TRAP_NAME_LEN - 1);
461 hw_entry->count = 1;
462 hw_entries->num_entries++;
463
464 if (!timer_pending(&hw_data->send_timer)) {
465 hw_data->send_timer.expires = jiffies + dm_delay * HZ;
466 add_timer(&hw_data->send_timer);
467 }
468
469out:
470 spin_unlock_irqrestore(&hw_data->lock, flags);
471}
472
473static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
474 .kfree_skb_probe = trace_kfree_skb_hit,
475 .napi_poll_probe = trace_napi_poll_hit,
476 .work_item_func = send_dm_alert,
477 .hw_work_item_func = net_dm_hw_summary_work,
478 .hw_probe = net_dm_hw_summary_probe,
479};
480
481static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
482 struct sk_buff *skb,
483 void *location)
484{
485 ktime_t tstamp = ktime_get_real();
486 struct per_cpu_dm_data *data;
487 struct sk_buff *nskb;
488 unsigned long flags;
489
490 if (!skb_mac_header_was_set(skb))
491 return;
492
493 nskb = skb_clone(skb, GFP_ATOMIC);
494 if (!nskb)
495 return;
496
497 NET_DM_SKB_CB(nskb)->pc = location;
498 /* Override the timestamp because we care about the time when the
499 * packet was dropped.
500 */
501 nskb->tstamp = tstamp;
502
503 data = this_cpu_ptr(&dm_cpu_data);
504
505 spin_lock_irqsave(&data->drop_queue.lock, flags);
506 if (skb_queue_len(&data->drop_queue) < net_dm_queue_len)
507 __skb_queue_tail(&data->drop_queue, nskb);
508 else
509 goto unlock_free;
510 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
511
512 schedule_work(&data->dm_alert_work);
513
514 return;
515
516unlock_free:
517 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
518 u64_stats_update_begin(&data->stats.syncp);
519 data->stats.dropped++;
520 u64_stats_update_end(&data->stats.syncp);
521 consume_skb(nskb);
522}
523
524static void net_dm_packet_trace_napi_poll_hit(void *ignore,
525 struct napi_struct *napi,
526 int work, int budget)
527{
528}
529
530static size_t net_dm_in_port_size(void)
531{
532 /* NET_DM_ATTR_IN_PORT nest */
533 return nla_total_size(0) +
534 /* NET_DM_ATTR_PORT_NETDEV_IFINDEX */
535 nla_total_size(sizeof(u32)) +
536 /* NET_DM_ATTR_PORT_NETDEV_NAME */
537 nla_total_size(IFNAMSIZ + 1);
538}
539
540#define NET_DM_MAX_SYMBOL_LEN 40
541
542static size_t net_dm_packet_report_size(size_t payload_len)
543{
544 size_t size;
545
546 size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize);
547
548 return NLMSG_ALIGN(size) +
549 /* NET_DM_ATTR_ORIGIN */
550 nla_total_size(sizeof(u16)) +
551 /* NET_DM_ATTR_PC */
552 nla_total_size(sizeof(u64)) +
553 /* NET_DM_ATTR_SYMBOL */
554 nla_total_size(NET_DM_MAX_SYMBOL_LEN + 1) +
555 /* NET_DM_ATTR_IN_PORT */
556 net_dm_in_port_size() +
557 /* NET_DM_ATTR_TIMESTAMP */
558 nla_total_size(sizeof(u64)) +
559 /* NET_DM_ATTR_ORIG_LEN */
560 nla_total_size(sizeof(u32)) +
561 /* NET_DM_ATTR_PROTO */
562 nla_total_size(sizeof(u16)) +
563 /* NET_DM_ATTR_PAYLOAD */
564 nla_total_size(payload_len);
565}
566
567static int net_dm_packet_report_in_port_put(struct sk_buff *msg, int ifindex,
568 const char *name)
569{
570 struct nlattr *attr;
571
572 attr = nla_nest_start(msg, NET_DM_ATTR_IN_PORT);
573 if (!attr)
574 return -EMSGSIZE;
575
576 if (ifindex &&
577 nla_put_u32(msg, NET_DM_ATTR_PORT_NETDEV_IFINDEX, ifindex))
578 goto nla_put_failure;
579
580 if (name && nla_put_string(msg, NET_DM_ATTR_PORT_NETDEV_NAME, name))
581 goto nla_put_failure;
582
583 nla_nest_end(msg, attr);
584
585 return 0;
586
587nla_put_failure:
588 nla_nest_cancel(msg, attr);
589 return -EMSGSIZE;
590}
591
592static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb,
593 size_t payload_len)
594{
595 u64 pc = (u64)(uintptr_t) NET_DM_SKB_CB(skb)->pc;
596 char buf[NET_DM_MAX_SYMBOL_LEN];
597 struct nlattr *attr;
598 void *hdr;
599 int rc;
600
601 hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
602 NET_DM_CMD_PACKET_ALERT);
603 if (!hdr)
604 return -EMSGSIZE;
605
606 if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_SW))
607 goto nla_put_failure;
608
609 if (nla_put_u64_64bit(msg, NET_DM_ATTR_PC, pc, NET_DM_ATTR_PAD))
610 goto nla_put_failure;
611
612 snprintf(buf, sizeof(buf), "%pS", NET_DM_SKB_CB(skb)->pc);
613 if (nla_put_string(msg, NET_DM_ATTR_SYMBOL, buf))
614 goto nla_put_failure;
615
616 rc = net_dm_packet_report_in_port_put(msg, skb->skb_iif, NULL);
617 if (rc)
618 goto nla_put_failure;
619
620 if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP,
621 ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
622 goto nla_put_failure;
623
624 if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len))
625 goto nla_put_failure;
626
627 if (!payload_len)
628 goto out;
629
630 if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol)))
631 goto nla_put_failure;
632
633 attr = skb_put(msg, nla_total_size(payload_len));
634 attr->nla_type = NET_DM_ATTR_PAYLOAD;
635 attr->nla_len = nla_attr_size(payload_len);
636 if (skb_copy_bits(skb, 0, nla_data(attr), payload_len))
637 goto nla_put_failure;
638
639out:
640 genlmsg_end(msg, hdr);
641
642 return 0;
643
644nla_put_failure:
645 genlmsg_cancel(msg, hdr);
646 return -EMSGSIZE;
647}
648
649#define NET_DM_MAX_PACKET_SIZE (0xffff - NLA_HDRLEN - NLA_ALIGNTO)
650
651static void net_dm_packet_report(struct sk_buff *skb)
652{
653 struct sk_buff *msg;
654 size_t payload_len;
655 int rc;
656
657 /* Make sure we start copying the packet from the MAC header */
658 if (skb->data > skb_mac_header(skb))
659 skb_push(skb, skb->data - skb_mac_header(skb));
660 else
661 skb_pull(skb, skb_mac_header(skb) - skb->data);
662
663 /* Ensure packet fits inside a single netlink attribute */
664 payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE);
665 if (net_dm_trunc_len)
666 payload_len = min_t(size_t, net_dm_trunc_len, payload_len);
667
668 msg = nlmsg_new(net_dm_packet_report_size(payload_len), GFP_KERNEL);
669 if (!msg)
670 goto out;
671
672 rc = net_dm_packet_report_fill(msg, skb, payload_len);
673 if (rc) {
674 nlmsg_free(msg);
675 goto out;
676 }
677
678 genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
679
680out:
681 consume_skb(skb);
682}
683
684static void net_dm_packet_work(struct work_struct *work)
685{
686 struct per_cpu_dm_data *data;
687 struct sk_buff_head list;
688 struct sk_buff *skb;
689 unsigned long flags;
690
691 data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
692
693 __skb_queue_head_init(&list);
694
695 spin_lock_irqsave(&data->drop_queue.lock, flags);
696 skb_queue_splice_tail_init(&data->drop_queue, &list);
697 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
698
699 while ((skb = __skb_dequeue(&list)))
700 net_dm_packet_report(skb);
701}
702
703static size_t
704net_dm_hw_packet_report_size(size_t payload_len,
705 const struct net_dm_hw_metadata *hw_metadata)
706{
707 size_t size;
708
709 size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize);
710
711 return NLMSG_ALIGN(size) +
712 /* NET_DM_ATTR_ORIGIN */
713 nla_total_size(sizeof(u16)) +
714 /* NET_DM_ATTR_HW_TRAP_GROUP_NAME */
715 nla_total_size(strlen(hw_metadata->trap_group_name) + 1) +
716 /* NET_DM_ATTR_HW_TRAP_NAME */
717 nla_total_size(strlen(hw_metadata->trap_name) + 1) +
718 /* NET_DM_ATTR_IN_PORT */
719 net_dm_in_port_size() +
720 /* NET_DM_ATTR_TIMESTAMP */
721 nla_total_size(sizeof(u64)) +
722 /* NET_DM_ATTR_ORIG_LEN */
723 nla_total_size(sizeof(u32)) +
724 /* NET_DM_ATTR_PROTO */
725 nla_total_size(sizeof(u16)) +
726 /* NET_DM_ATTR_PAYLOAD */
727 nla_total_size(payload_len);
728}
729
730static int net_dm_hw_packet_report_fill(struct sk_buff *msg,
731 struct sk_buff *skb, size_t payload_len)
732{
733 struct net_dm_hw_metadata *hw_metadata;
734 struct nlattr *attr;
735 void *hdr;
736
737 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
738
739 hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0,
740 NET_DM_CMD_PACKET_ALERT);
741 if (!hdr)
742 return -EMSGSIZE;
743
744 if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_HW))
745 goto nla_put_failure;
746
747 if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_GROUP_NAME,
748 hw_metadata->trap_group_name))
749 goto nla_put_failure;
750
751 if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME,
752 hw_metadata->trap_name))
753 goto nla_put_failure;
754
755 if (hw_metadata->input_dev) {
756 struct net_device *dev = hw_metadata->input_dev;
757 int rc;
758
759 rc = net_dm_packet_report_in_port_put(msg, dev->ifindex,
760 dev->name);
761 if (rc)
762 goto nla_put_failure;
763 }
764
765 if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP,
766 ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
767 goto nla_put_failure;
768
769 if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len))
770 goto nla_put_failure;
771
772 if (!payload_len)
773 goto out;
774
775 if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol)))
776 goto nla_put_failure;
777
778 attr = skb_put(msg, nla_total_size(payload_len));
779 attr->nla_type = NET_DM_ATTR_PAYLOAD;
780 attr->nla_len = nla_attr_size(payload_len);
781 if (skb_copy_bits(skb, 0, nla_data(attr), payload_len))
782 goto nla_put_failure;
783
784out:
785 genlmsg_end(msg, hdr);
786
787 return 0;
788
789nla_put_failure:
790 genlmsg_cancel(msg, hdr);
791 return -EMSGSIZE;
792}
793
794static struct net_dm_hw_metadata *
795net_dm_hw_metadata_clone(const struct net_dm_hw_metadata *hw_metadata)
796{
797 struct net_dm_hw_metadata *n_hw_metadata;
798 const char *trap_group_name;
799 const char *trap_name;
800
801 n_hw_metadata = kmalloc(sizeof(*hw_metadata), GFP_ATOMIC);
802 if (!n_hw_metadata)
803 return NULL;
804
805 trap_group_name = kmemdup(hw_metadata->trap_group_name,
806 strlen(hw_metadata->trap_group_name) + 1,
807 GFP_ATOMIC | __GFP_ZERO);
808 if (!trap_group_name)
809 goto free_hw_metadata;
810 n_hw_metadata->trap_group_name = trap_group_name;
811
812 trap_name = kmemdup(hw_metadata->trap_name,
813 strlen(hw_metadata->trap_name) + 1,
814 GFP_ATOMIC | __GFP_ZERO);
815 if (!trap_name)
816 goto free_trap_group;
817 n_hw_metadata->trap_name = trap_name;
818
819 n_hw_metadata->input_dev = hw_metadata->input_dev;
820 if (n_hw_metadata->input_dev)
821 dev_hold(n_hw_metadata->input_dev);
822
823 return n_hw_metadata;
824
825free_trap_group:
826 kfree(trap_group_name);
827free_hw_metadata:
828 kfree(n_hw_metadata);
829 return NULL;
830}
831
832static void
833net_dm_hw_metadata_free(const struct net_dm_hw_metadata *hw_metadata)
834{
835 if (hw_metadata->input_dev)
836 dev_put(hw_metadata->input_dev);
837 kfree(hw_metadata->trap_name);
838 kfree(hw_metadata->trap_group_name);
839 kfree(hw_metadata);
840}
841
842static void net_dm_hw_packet_report(struct sk_buff *skb)
843{
844 struct net_dm_hw_metadata *hw_metadata;
845 struct sk_buff *msg;
846 size_t payload_len;
847 int rc;
848
849 if (skb->data > skb_mac_header(skb))
850 skb_push(skb, skb->data - skb_mac_header(skb));
851 else
852 skb_pull(skb, skb_mac_header(skb) - skb->data);
853
854 payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE);
855 if (net_dm_trunc_len)
856 payload_len = min_t(size_t, net_dm_trunc_len, payload_len);
857
858 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
859 msg = nlmsg_new(net_dm_hw_packet_report_size(payload_len, hw_metadata),
860 GFP_KERNEL);
861 if (!msg)
862 goto out;
863
864 rc = net_dm_hw_packet_report_fill(msg, skb, payload_len);
865 if (rc) {
866 nlmsg_free(msg);
867 goto out;
868 }
869
870 genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL);
871
872out:
873 net_dm_hw_metadata_free(NET_DM_SKB_CB(skb)->hw_metadata);
874 consume_skb(skb);
875}
876
877static void net_dm_hw_packet_work(struct work_struct *work)
878{
879 struct per_cpu_dm_data *hw_data;
880 struct sk_buff_head list;
881 struct sk_buff *skb;
882 unsigned long flags;
883
884 hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
885
886 __skb_queue_head_init(&list);
887
888 spin_lock_irqsave(&hw_data->drop_queue.lock, flags);
889 skb_queue_splice_tail_init(&hw_data->drop_queue, &list);
890 spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
891
892 while ((skb = __skb_dequeue(&list)))
893 net_dm_hw_packet_report(skb);
894}
895
896static void
897net_dm_hw_packet_probe(struct sk_buff *skb,
898 const struct net_dm_hw_metadata *hw_metadata)
899{
900 struct net_dm_hw_metadata *n_hw_metadata;
901 ktime_t tstamp = ktime_get_real();
902 struct per_cpu_dm_data *hw_data;
903 struct sk_buff *nskb;
904 unsigned long flags;
905
906 if (!skb_mac_header_was_set(skb))
907 return;
908
909 nskb = skb_clone(skb, GFP_ATOMIC);
910 if (!nskb)
911 return;
912
913 n_hw_metadata = net_dm_hw_metadata_clone(hw_metadata);
914 if (!n_hw_metadata)
915 goto free;
916
917 NET_DM_SKB_CB(nskb)->hw_metadata = n_hw_metadata;
918 nskb->tstamp = tstamp;
919
920 hw_data = this_cpu_ptr(&dm_hw_cpu_data);
921
922 spin_lock_irqsave(&hw_data->drop_queue.lock, flags);
923 if (skb_queue_len(&hw_data->drop_queue) < net_dm_queue_len)
924 __skb_queue_tail(&hw_data->drop_queue, nskb);
925 else
926 goto unlock_free;
927 spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
928
929 schedule_work(&hw_data->dm_alert_work);
930
931 return;
932
933unlock_free:
934 spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags);
935 u64_stats_update_begin(&hw_data->stats.syncp);
936 hw_data->stats.dropped++;
937 u64_stats_update_end(&hw_data->stats.syncp);
938 net_dm_hw_metadata_free(n_hw_metadata);
939free:
940 consume_skb(nskb);
941}
942
943static const struct net_dm_alert_ops net_dm_alert_packet_ops = {
944 .kfree_skb_probe = net_dm_packet_trace_kfree_skb_hit,
945 .napi_poll_probe = net_dm_packet_trace_napi_poll_hit,
946 .work_item_func = net_dm_packet_work,
947 .hw_work_item_func = net_dm_hw_packet_work,
948 .hw_probe = net_dm_hw_packet_probe,
949};
950
951static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = {
952 [NET_DM_ALERT_MODE_SUMMARY] = &net_dm_alert_summary_ops,
953 [NET_DM_ALERT_MODE_PACKET] = &net_dm_alert_packet_ops,
954};
955
956void net_dm_hw_report(struct sk_buff *skb,
957 const struct net_dm_hw_metadata *hw_metadata)
958{
959 rcu_read_lock();
960
961 if (!monitor_hw)
962 goto out;
963
964 net_dm_alert_ops_arr[net_dm_alert_mode]->hw_probe(skb, hw_metadata);
965
966out:
967 rcu_read_unlock();
968}
969EXPORT_SYMBOL_GPL(net_dm_hw_report);
970
971static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack)
972{
973 const struct net_dm_alert_ops *ops;
974 int cpu;
975
976 if (monitor_hw) {
977 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already enabled");
978 return -EAGAIN;
979 }
980
981 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
982
983 if (!try_module_get(THIS_MODULE)) {
984 NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module");
985 return -ENODEV;
986 }
987
988 for_each_possible_cpu(cpu) {
989 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
990 struct net_dm_hw_entries *hw_entries;
991
992 INIT_WORK(&hw_data->dm_alert_work, ops->hw_work_item_func);
993 timer_setup(&hw_data->send_timer, sched_send_work, 0);
994 hw_entries = net_dm_hw_reset_per_cpu_data(hw_data);
995 kfree(hw_entries);
996 }
997
998 monitor_hw = true;
999
1000 return 0;
1001}
1002
1003static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack)
1004{
1005 int cpu;
1006
1007 if (!monitor_hw)
1008 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled");
1009
1010 monitor_hw = false;
1011
1012 /* After this call returns we are guaranteed that no CPU is processing
1013 * any hardware drops.
1014 */
1015 synchronize_rcu();
1016
1017 for_each_possible_cpu(cpu) {
1018 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1019 struct sk_buff *skb;
1020
1021 del_timer_sync(&hw_data->send_timer);
1022 cancel_work_sync(&hw_data->dm_alert_work);
1023 while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
1024 struct net_dm_hw_metadata *hw_metadata;
1025
1026 hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
1027 net_dm_hw_metadata_free(hw_metadata);
1028 consume_skb(skb);
1029 }
1030 }
1031
1032 module_put(THIS_MODULE);
1033}
1034
1035static int net_dm_trace_on_set(struct netlink_ext_ack *extack)
1036{
1037 const struct net_dm_alert_ops *ops;
1038 int cpu, rc;
1039
1040 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1041
1042 if (!try_module_get(THIS_MODULE)) {
1043 NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module");
1044 return -ENODEV;
1045 }
1046
1047 for_each_possible_cpu(cpu) {
1048 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1049 struct sk_buff *skb;
1050
1051 INIT_WORK(&data->dm_alert_work, ops->work_item_func);
1052 timer_setup(&data->send_timer, sched_send_work, 0);
1053 /* Allocate a new per-CPU skb for the summary alert message and
1054 * free the old one which might contain stale data from
1055 * previous tracing.
1056 */
1057 skb = reset_per_cpu_data(data);
1058 consume_skb(skb);
1059 }
1060
1061 rc = register_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1062 if (rc) {
1063 NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to kfree_skb() tracepoint");
1064 goto err_module_put;
1065 }
1066
1067 rc = register_trace_napi_poll(ops->napi_poll_probe, NULL);
1068 if (rc) {
1069 NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to napi_poll() tracepoint");
1070 goto err_unregister_trace;
1071 }
1072
1073 return 0;
1074
1075err_unregister_trace:
1076 unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1077err_module_put:
1078 module_put(THIS_MODULE);
1079 return rc;
1080}
1081
1082static void net_dm_trace_off_set(void)
1083{
1084 struct dm_hw_stat_delta *new_stat, *temp;
1085 const struct net_dm_alert_ops *ops;
1086 int cpu;
1087
1088 ops = net_dm_alert_ops_arr[net_dm_alert_mode];
1089
1090 unregister_trace_napi_poll(ops->napi_poll_probe, NULL);
1091 unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
1092
1093 tracepoint_synchronize_unregister();
1094
1095 /* Make sure we do not send notifications to user space after request
1096 * to stop tracing returns.
1097 */
1098 for_each_possible_cpu(cpu) {
1099 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1100 struct sk_buff *skb;
1101
1102 del_timer_sync(&data->send_timer);
1103 cancel_work_sync(&data->dm_alert_work);
1104 while ((skb = __skb_dequeue(&data->drop_queue)))
1105 consume_skb(skb);
1106 }
1107
1108 list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) {
1109 if (new_stat->dev == NULL) {
1110 list_del_rcu(&new_stat->list);
1111 kfree_rcu(new_stat, rcu);
1112 }
1113 }
1114
1115 module_put(THIS_MODULE);
1116}
1117
1118static int set_all_monitor_traces(int state, struct netlink_ext_ack *extack)
1119{
1120 int rc = 0;
1121
1122 if (state == trace_state) {
1123 NL_SET_ERR_MSG_MOD(extack, "Trace state already set to requested state");
1124 return -EAGAIN;
1125 }
1126
1127 switch (state) {
1128 case TRACE_ON:
1129 rc = net_dm_trace_on_set(extack);
1130 break;
1131 case TRACE_OFF:
1132 net_dm_trace_off_set();
1133 break;
1134 default:
1135 rc = 1;
1136 break;
1137 }
1138
1139 if (!rc)
1140 trace_state = state;
1141 else
1142 rc = -EINPROGRESS;
1143
1144 return rc;
1145}
1146
1147static bool net_dm_is_monitoring(void)
1148{
1149 return trace_state == TRACE_ON || monitor_hw;
1150}
1151
1152static int net_dm_alert_mode_get_from_info(struct genl_info *info,
1153 enum net_dm_alert_mode *p_alert_mode)
1154{
1155 u8 val;
1156
1157 val = nla_get_u8(info->attrs[NET_DM_ATTR_ALERT_MODE]);
1158
1159 switch (val) {
1160 case NET_DM_ALERT_MODE_SUMMARY: /* fall-through */
1161 case NET_DM_ALERT_MODE_PACKET:
1162 *p_alert_mode = val;
1163 break;
1164 default:
1165 return -EINVAL;
1166 }
1167
1168 return 0;
1169}
1170
1171static int net_dm_alert_mode_set(struct genl_info *info)
1172{
1173 struct netlink_ext_ack *extack = info->extack;
1174 enum net_dm_alert_mode alert_mode;
1175 int rc;
1176
1177 if (!info->attrs[NET_DM_ATTR_ALERT_MODE])
1178 return 0;
1179
1180 rc = net_dm_alert_mode_get_from_info(info, &alert_mode);
1181 if (rc) {
1182 NL_SET_ERR_MSG_MOD(extack, "Invalid alert mode");
1183 return -EINVAL;
1184 }
1185
1186 net_dm_alert_mode = alert_mode;
1187
1188 return 0;
1189}
1190
1191static void net_dm_trunc_len_set(struct genl_info *info)
1192{
1193 if (!info->attrs[NET_DM_ATTR_TRUNC_LEN])
1194 return;
1195
1196 net_dm_trunc_len = nla_get_u32(info->attrs[NET_DM_ATTR_TRUNC_LEN]);
1197}
1198
1199static void net_dm_queue_len_set(struct genl_info *info)
1200{
1201 if (!info->attrs[NET_DM_ATTR_QUEUE_LEN])
1202 return;
1203
1204 net_dm_queue_len = nla_get_u32(info->attrs[NET_DM_ATTR_QUEUE_LEN]);
1205}
1206
1207static int net_dm_cmd_config(struct sk_buff *skb,
1208 struct genl_info *info)
1209{
1210 struct netlink_ext_ack *extack = info->extack;
1211 int rc;
1212
1213 if (net_dm_is_monitoring()) {
1214 NL_SET_ERR_MSG_MOD(extack, "Cannot configure drop monitor during monitoring");
1215 return -EBUSY;
1216 }
1217
1218 rc = net_dm_alert_mode_set(info);
1219 if (rc)
1220 return rc;
1221
1222 net_dm_trunc_len_set(info);
1223
1224 net_dm_queue_len_set(info);
1225
1226 return 0;
1227}
1228
1229static int net_dm_monitor_start(bool set_sw, bool set_hw,
1230 struct netlink_ext_ack *extack)
1231{
1232 bool sw_set = false;
1233 int rc;
1234
1235 if (set_sw) {
1236 rc = set_all_monitor_traces(TRACE_ON, extack);
1237 if (rc)
1238 return rc;
1239 sw_set = true;
1240 }
1241
1242 if (set_hw) {
1243 rc = net_dm_hw_monitor_start(extack);
1244 if (rc)
1245 goto err_monitor_hw;
1246 }
1247
1248 return 0;
1249
1250err_monitor_hw:
1251 if (sw_set)
1252 set_all_monitor_traces(TRACE_OFF, extack);
1253 return rc;
1254}
1255
1256static void net_dm_monitor_stop(bool set_sw, bool set_hw,
1257 struct netlink_ext_ack *extack)
1258{
1259 if (set_hw)
1260 net_dm_hw_monitor_stop(extack);
1261 if (set_sw)
1262 set_all_monitor_traces(TRACE_OFF, extack);
1263}
1264
1265static int net_dm_cmd_trace(struct sk_buff *skb,
1266 struct genl_info *info)
1267{
1268 bool set_sw = !!info->attrs[NET_DM_ATTR_SW_DROPS];
1269 bool set_hw = !!info->attrs[NET_DM_ATTR_HW_DROPS];
1270 struct netlink_ext_ack *extack = info->extack;
1271
1272 /* To maintain backward compatibility, we start / stop monitoring of
1273 * software drops if no flag is specified.
1274 */
1275 if (!set_sw && !set_hw)
1276 set_sw = true;
1277
1278 switch (info->genlhdr->cmd) {
1279 case NET_DM_CMD_START:
1280 return net_dm_monitor_start(set_sw, set_hw, extack);
1281 case NET_DM_CMD_STOP:
1282 net_dm_monitor_stop(set_sw, set_hw, extack);
1283 return 0;
1284 }
1285
1286 return -EOPNOTSUPP;
1287}
1288
1289static int net_dm_config_fill(struct sk_buff *msg, struct genl_info *info)
1290{
1291 void *hdr;
1292
1293 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1294 &net_drop_monitor_family, 0, NET_DM_CMD_CONFIG_NEW);
1295 if (!hdr)
1296 return -EMSGSIZE;
1297
1298 if (nla_put_u8(msg, NET_DM_ATTR_ALERT_MODE, net_dm_alert_mode))
1299 goto nla_put_failure;
1300
1301 if (nla_put_u32(msg, NET_DM_ATTR_TRUNC_LEN, net_dm_trunc_len))
1302 goto nla_put_failure;
1303
1304 if (nla_put_u32(msg, NET_DM_ATTR_QUEUE_LEN, net_dm_queue_len))
1305 goto nla_put_failure;
1306
1307 genlmsg_end(msg, hdr);
1308
1309 return 0;
1310
1311nla_put_failure:
1312 genlmsg_cancel(msg, hdr);
1313 return -EMSGSIZE;
1314}
1315
1316static int net_dm_cmd_config_get(struct sk_buff *skb, struct genl_info *info)
1317{
1318 struct sk_buff *msg;
1319 int rc;
1320
1321 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1322 if (!msg)
1323 return -ENOMEM;
1324
1325 rc = net_dm_config_fill(msg, info);
1326 if (rc)
1327 goto free_msg;
1328
1329 return genlmsg_reply(msg, info);
1330
1331free_msg:
1332 nlmsg_free(msg);
1333 return rc;
1334}
1335
1336static void net_dm_stats_read(struct net_dm_stats *stats)
1337{
1338 int cpu;
1339
1340 memset(stats, 0, sizeof(*stats));
1341 for_each_possible_cpu(cpu) {
1342 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
1343 struct net_dm_stats *cpu_stats = &data->stats;
1344 unsigned int start;
1345 u64 dropped;
1346
1347 do {
1348 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1349 dropped = cpu_stats->dropped;
1350 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1351
1352 stats->dropped += dropped;
1353 }
1354}
1355
1356static int net_dm_stats_put(struct sk_buff *msg)
1357{
1358 struct net_dm_stats stats;
1359 struct nlattr *attr;
1360
1361 net_dm_stats_read(&stats);
1362
1363 attr = nla_nest_start(msg, NET_DM_ATTR_STATS);
1364 if (!attr)
1365 return -EMSGSIZE;
1366
1367 if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
1368 stats.dropped, NET_DM_ATTR_PAD))
1369 goto nla_put_failure;
1370
1371 nla_nest_end(msg, attr);
1372
1373 return 0;
1374
1375nla_put_failure:
1376 nla_nest_cancel(msg, attr);
1377 return -EMSGSIZE;
1378}
1379
1380static void net_dm_hw_stats_read(struct net_dm_stats *stats)
1381{
1382 int cpu;
1383
1384 memset(stats, 0, sizeof(*stats));
1385 for_each_possible_cpu(cpu) {
1386 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1387 struct net_dm_stats *cpu_stats = &hw_data->stats;
1388 unsigned int start;
1389 u64 dropped;
1390
1391 do {
1392 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1393 dropped = cpu_stats->dropped;
1394 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1395
1396 stats->dropped += dropped;
1397 }
1398}
1399
1400static int net_dm_hw_stats_put(struct sk_buff *msg)
1401{
1402 struct net_dm_stats stats;
1403 struct nlattr *attr;
1404
1405 net_dm_hw_stats_read(&stats);
1406
1407 attr = nla_nest_start(msg, NET_DM_ATTR_HW_STATS);
1408 if (!attr)
1409 return -EMSGSIZE;
1410
1411 if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
1412 stats.dropped, NET_DM_ATTR_PAD))
1413 goto nla_put_failure;
1414
1415 nla_nest_end(msg, attr);
1416
1417 return 0;
1418
1419nla_put_failure:
1420 nla_nest_cancel(msg, attr);
1421 return -EMSGSIZE;
1422}
1423
1424static int net_dm_stats_fill(struct sk_buff *msg, struct genl_info *info)
1425{
1426 void *hdr;
1427 int rc;
1428
1429 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1430 &net_drop_monitor_family, 0, NET_DM_CMD_STATS_NEW);
1431 if (!hdr)
1432 return -EMSGSIZE;
1433
1434 rc = net_dm_stats_put(msg);
1435 if (rc)
1436 goto nla_put_failure;
1437
1438 rc = net_dm_hw_stats_put(msg);
1439 if (rc)
1440 goto nla_put_failure;
1441
1442 genlmsg_end(msg, hdr);
1443
1444 return 0;
1445
1446nla_put_failure:
1447 genlmsg_cancel(msg, hdr);
1448 return -EMSGSIZE;
1449}
1450
1451static int net_dm_cmd_stats_get(struct sk_buff *skb, struct genl_info *info)
1452{
1453 struct sk_buff *msg;
1454 int rc;
1455
1456 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1457 if (!msg)
1458 return -ENOMEM;
1459
1460 rc = net_dm_stats_fill(msg, info);
1461 if (rc)
1462 goto free_msg;
1463
1464 return genlmsg_reply(msg, info);
1465
1466free_msg:
1467 nlmsg_free(msg);
1468 return rc;
1469}
1470
1471static int dropmon_net_event(struct notifier_block *ev_block,
1472 unsigned long event, void *ptr)
1473{
1474 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1475 struct dm_hw_stat_delta *new_stat = NULL;
1476 struct dm_hw_stat_delta *tmp;
1477
1478 switch (event) {
1479 case NETDEV_REGISTER:
1480 new_stat = kzalloc(sizeof(struct dm_hw_stat_delta), GFP_KERNEL);
1481
1482 if (!new_stat)
1483 goto out;
1484
1485 new_stat->dev = dev;
1486 new_stat->last_rx = jiffies;
1487 mutex_lock(&net_dm_mutex);
1488 list_add_rcu(&new_stat->list, &hw_stats_list);
1489 mutex_unlock(&net_dm_mutex);
1490 break;
1491 case NETDEV_UNREGISTER:
1492 mutex_lock(&net_dm_mutex);
1493 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
1494 if (new_stat->dev == dev) {
1495 new_stat->dev = NULL;
1496 if (trace_state == TRACE_OFF) {
1497 list_del_rcu(&new_stat->list);
1498 kfree_rcu(new_stat, rcu);
1499 break;
1500 }
1501 }
1502 }
1503 mutex_unlock(&net_dm_mutex);
1504 break;
1505 }
1506out:
1507 return NOTIFY_DONE;
1508}
1509
1510static const struct nla_policy net_dm_nl_policy[NET_DM_ATTR_MAX + 1] = {
1511 [NET_DM_ATTR_UNSPEC] = { .strict_start_type = NET_DM_ATTR_UNSPEC + 1 },
1512 [NET_DM_ATTR_ALERT_MODE] = { .type = NLA_U8 },
1513 [NET_DM_ATTR_TRUNC_LEN] = { .type = NLA_U32 },
1514 [NET_DM_ATTR_QUEUE_LEN] = { .type = NLA_U32 },
1515 [NET_DM_ATTR_SW_DROPS] = {. type = NLA_FLAG },
1516 [NET_DM_ATTR_HW_DROPS] = {. type = NLA_FLAG },
1517};
1518
1519static const struct genl_ops dropmon_ops[] = {
1520 {
1521 .cmd = NET_DM_CMD_CONFIG,
1522 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1523 .doit = net_dm_cmd_config,
1524 .flags = GENL_ADMIN_PERM,
1525 },
1526 {
1527 .cmd = NET_DM_CMD_START,
1528 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1529 .doit = net_dm_cmd_trace,
1530 },
1531 {
1532 .cmd = NET_DM_CMD_STOP,
1533 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1534 .doit = net_dm_cmd_trace,
1535 },
1536 {
1537 .cmd = NET_DM_CMD_CONFIG_GET,
1538 .doit = net_dm_cmd_config_get,
1539 },
1540 {
1541 .cmd = NET_DM_CMD_STATS_GET,
1542 .doit = net_dm_cmd_stats_get,
1543 },
1544};
1545
1546static int net_dm_nl_pre_doit(const struct genl_ops *ops,
1547 struct sk_buff *skb, struct genl_info *info)
1548{
1549 mutex_lock(&net_dm_mutex);
1550
1551 return 0;
1552}
1553
1554static void net_dm_nl_post_doit(const struct genl_ops *ops,
1555 struct sk_buff *skb, struct genl_info *info)
1556{
1557 mutex_unlock(&net_dm_mutex);
1558}
1559
1560static struct genl_family net_drop_monitor_family __ro_after_init = {
1561 .hdrsize = 0,
1562 .name = "NET_DM",
1563 .version = 2,
1564 .maxattr = NET_DM_ATTR_MAX,
1565 .policy = net_dm_nl_policy,
1566 .pre_doit = net_dm_nl_pre_doit,
1567 .post_doit = net_dm_nl_post_doit,
1568 .module = THIS_MODULE,
1569 .ops = dropmon_ops,
1570 .n_ops = ARRAY_SIZE(dropmon_ops),
1571 .mcgrps = dropmon_mcgrps,
1572 .n_mcgrps = ARRAY_SIZE(dropmon_mcgrps),
1573};
1574
1575static struct notifier_block dropmon_net_notifier = {
1576 .notifier_call = dropmon_net_event
1577};
1578
1579static void __net_dm_cpu_data_init(struct per_cpu_dm_data *data)
1580{
1581 spin_lock_init(&data->lock);
1582 skb_queue_head_init(&data->drop_queue);
1583 u64_stats_init(&data->stats.syncp);
1584}
1585
1586static void __net_dm_cpu_data_fini(struct per_cpu_dm_data *data)
1587{
1588 WARN_ON(!skb_queue_empty(&data->drop_queue));
1589}
1590
1591static void net_dm_cpu_data_init(int cpu)
1592{
1593 struct per_cpu_dm_data *data;
1594
1595 data = &per_cpu(dm_cpu_data, cpu);
1596 __net_dm_cpu_data_init(data);
1597}
1598
1599static void net_dm_cpu_data_fini(int cpu)
1600{
1601 struct per_cpu_dm_data *data;
1602
1603 data = &per_cpu(dm_cpu_data, cpu);
1604 /* At this point, we should have exclusive access
1605 * to this struct and can free the skb inside it.
1606 */
1607 consume_skb(data->skb);
1608 __net_dm_cpu_data_fini(data);
1609}
1610
1611static void net_dm_hw_cpu_data_init(int cpu)
1612{
1613 struct per_cpu_dm_data *hw_data;
1614
1615 hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1616 __net_dm_cpu_data_init(hw_data);
1617}
1618
1619static void net_dm_hw_cpu_data_fini(int cpu)
1620{
1621 struct per_cpu_dm_data *hw_data;
1622
1623 hw_data = &per_cpu(dm_hw_cpu_data, cpu);
1624 kfree(hw_data->hw_entries);
1625 __net_dm_cpu_data_fini(hw_data);
1626}
1627
1628static int __init init_net_drop_monitor(void)
1629{
1630 int cpu, rc;
1631
1632 pr_info("Initializing network drop monitor service\n");
1633
1634 if (sizeof(void *) > 8) {
1635 pr_err("Unable to store program counters on this arch, Drop monitor failed\n");
1636 return -ENOSPC;
1637 }
1638
1639 rc = genl_register_family(&net_drop_monitor_family);
1640 if (rc) {
1641 pr_err("Could not create drop monitor netlink family\n");
1642 return rc;
1643 }
1644 WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
1645
1646 rc = register_netdevice_notifier(&dropmon_net_notifier);
1647 if (rc < 0) {
1648 pr_crit("Failed to register netdevice notifier\n");
1649 goto out_unreg;
1650 }
1651
1652 rc = 0;
1653
1654 for_each_possible_cpu(cpu) {
1655 net_dm_cpu_data_init(cpu);
1656 net_dm_hw_cpu_data_init(cpu);
1657 }
1658
1659 goto out;
1660
1661out_unreg:
1662 genl_unregister_family(&net_drop_monitor_family);
1663out:
1664 return rc;
1665}
1666
1667static void exit_net_drop_monitor(void)
1668{
1669 int cpu;
1670
1671 BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
1672
1673 /*
1674 * Because of the module_get/put we do in the trace state change path
1675 * we are guarnateed not to have any current users when we get here
1676 */
1677
1678 for_each_possible_cpu(cpu) {
1679 net_dm_hw_cpu_data_fini(cpu);
1680 net_dm_cpu_data_fini(cpu);
1681 }
1682
1683 BUG_ON(genl_unregister_family(&net_drop_monitor_family));
1684}
1685
1686module_init(init_net_drop_monitor);
1687module_exit(exit_net_drop_monitor);
1688
1689MODULE_LICENSE("GPL v2");
1690MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
1691MODULE_ALIAS_GENL_FAMILY("NET_DM");