Linux Audio

Check our new training course

Loading...
v4.17
  1/* Linux multicast routing support
  2 * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
  3 */
  4
 
  5#include <linux/mroute_base.h>
  6
  7/* Sets everything common except 'dev', since that is done under locking */
  8void vif_device_init(struct vif_device *v,
  9		     struct net_device *dev,
 10		     unsigned long rate_limit,
 11		     unsigned char threshold,
 12		     unsigned short flags,
 13		     unsigned short get_iflink_mask)
 14{
 15	v->dev = NULL;
 16	v->bytes_in = 0;
 17	v->bytes_out = 0;
 18	v->pkt_in = 0;
 19	v->pkt_out = 0;
 20	v->rate_limit = rate_limit;
 21	v->flags = flags;
 22	v->threshold = threshold;
 23	if (v->flags & get_iflink_mask)
 24		v->link = dev_get_iflink(dev);
 25	else
 26		v->link = dev->ifindex;
 27}
 28EXPORT_SYMBOL(vif_device_init);
 29
 30struct mr_table *
 31mr_table_alloc(struct net *net, u32 id,
 32	       struct mr_table_ops *ops,
 33	       void (*expire_func)(struct timer_list *t),
 34	       void (*table_set)(struct mr_table *mrt,
 35				 struct net *net))
 36{
 37	struct mr_table *mrt;
 
 38
 39	mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
 40	if (!mrt)
 41		return NULL;
 42	mrt->id = id;
 43	write_pnet(&mrt->net, net);
 44
 45	mrt->ops = *ops;
 46	if (rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params)) {
 
 47		kfree(mrt);
 48		return NULL;
 49	}
 50	INIT_LIST_HEAD(&mrt->mfc_cache_list);
 51	INIT_LIST_HEAD(&mrt->mfc_unres_queue);
 52
 53	timer_setup(&mrt->ipmr_expire_timer, expire_func, 0);
 54
 55	mrt->mroute_reg_vif_num = -1;
 56	table_set(mrt, net);
 57	return mrt;
 58}
 59EXPORT_SYMBOL(mr_table_alloc);
 60
 61void *mr_mfc_find_parent(struct mr_table *mrt, void *hasharg, int parent)
 62{
 63	struct rhlist_head *tmp, *list;
 64	struct mr_mfc *c;
 65
 66	list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
 67	rhl_for_each_entry_rcu(c, tmp, list, mnode)
 68		if (parent == -1 || parent == c->mfc_parent)
 69			return c;
 70
 71	return NULL;
 72}
 73EXPORT_SYMBOL(mr_mfc_find_parent);
 74
 75void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi)
 76{
 77	struct rhlist_head *tmp, *list;
 78	struct mr_mfc *c;
 79
 80	list = rhltable_lookup(&mrt->mfc_hash, mrt->ops.cmparg_any,
 81			       *mrt->ops.rht_params);
 82	rhl_for_each_entry_rcu(c, tmp, list, mnode)
 83		if (c->mfc_un.res.ttls[vifi] < 255)
 84			return c;
 85
 86	return NULL;
 87}
 88EXPORT_SYMBOL(mr_mfc_find_any_parent);
 89
 90void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg)
 91{
 92	struct rhlist_head *tmp, *list;
 93	struct mr_mfc *c, *proxy;
 94
 95	list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
 96	rhl_for_each_entry_rcu(c, tmp, list, mnode) {
 97		if (c->mfc_un.res.ttls[vifi] < 255)
 98			return c;
 99
100		/* It's ok if the vifi is part of the static tree */
101		proxy = mr_mfc_find_any_parent(mrt, c->mfc_parent);
102		if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
103			return c;
104	}
105
106	return mr_mfc_find_any_parent(mrt, vifi);
107}
108EXPORT_SYMBOL(mr_mfc_find_any);
109
110#ifdef CONFIG_PROC_FS
111void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos)
112{
113	struct mr_table *mrt = iter->mrt;
114
115	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
116		if (!VIF_EXISTS(mrt, iter->ct))
117			continue;
118		if (pos-- == 0)
119			return &mrt->vif_table[iter->ct];
120	}
121	return NULL;
122}
123EXPORT_SYMBOL(mr_vif_seq_idx);
124
125void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
126{
127	struct mr_vif_iter *iter = seq->private;
128	struct net *net = seq_file_net(seq);
129	struct mr_table *mrt = iter->mrt;
130
131	++*pos;
132	if (v == SEQ_START_TOKEN)
133		return mr_vif_seq_idx(net, iter, 0);
134
135	while (++iter->ct < mrt->maxvif) {
136		if (!VIF_EXISTS(mrt, iter->ct))
137			continue;
138		return &mrt->vif_table[iter->ct];
139	}
140	return NULL;
141}
142EXPORT_SYMBOL(mr_vif_seq_next);
143
144void *mr_mfc_seq_idx(struct net *net,
145		     struct mr_mfc_iter *it, loff_t pos)
146{
147	struct mr_table *mrt = it->mrt;
148	struct mr_mfc *mfc;
149
150	rcu_read_lock();
151	it->cache = &mrt->mfc_cache_list;
152	list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
153		if (pos-- == 0)
154			return mfc;
155	rcu_read_unlock();
156
157	spin_lock_bh(it->lock);
158	it->cache = &mrt->mfc_unres_queue;
159	list_for_each_entry(mfc, it->cache, list)
160		if (pos-- == 0)
161			return mfc;
162	spin_unlock_bh(it->lock);
163
164	it->cache = NULL;
165	return NULL;
166}
167EXPORT_SYMBOL(mr_mfc_seq_idx);
168
169void *mr_mfc_seq_next(struct seq_file *seq, void *v,
170		      loff_t *pos)
171{
172	struct mr_mfc_iter *it = seq->private;
173	struct net *net = seq_file_net(seq);
174	struct mr_table *mrt = it->mrt;
175	struct mr_mfc *c = v;
176
177	++*pos;
178
179	if (v == SEQ_START_TOKEN)
180		return mr_mfc_seq_idx(net, seq->private, 0);
181
182	if (c->list.next != it->cache)
183		return list_entry(c->list.next, struct mr_mfc, list);
184
185	if (it->cache == &mrt->mfc_unres_queue)
186		goto end_of_list;
187
188	/* exhausted cache_array, show unresolved */
189	rcu_read_unlock();
190	it->cache = &mrt->mfc_unres_queue;
191
192	spin_lock_bh(it->lock);
193	if (!list_empty(it->cache))
194		return list_first_entry(it->cache, struct mr_mfc, list);
195
196end_of_list:
197	spin_unlock_bh(it->lock);
198	it->cache = NULL;
199
200	return NULL;
201}
202EXPORT_SYMBOL(mr_mfc_seq_next);
203#endif
204
205int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
206		   struct mr_mfc *c, struct rtmsg *rtm)
207{
 
208	struct rta_mfc_stats mfcs;
209	struct nlattr *mp_attr;
210	struct rtnexthop *nhp;
211	unsigned long lastuse;
212	int ct;
213
214	/* If cache is unresolved, don't try to parse IIF and OIF */
215	if (c->mfc_parent >= MAXVIFS) {
216		rtm->rtm_flags |= RTNH_F_UNRESOLVED;
217		return -ENOENT;
218	}
219
220	if (VIF_EXISTS(mrt, c->mfc_parent) &&
221	    nla_put_u32(skb, RTA_IIF,
222			mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
 
223		return -EMSGSIZE;
 
 
224
225	if (c->mfc_flags & MFC_OFFLOAD)
226		rtm->rtm_flags |= RTNH_F_OFFLOAD;
227
228	mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
229	if (!mp_attr)
230		return -EMSGSIZE;
231
 
232	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
233		if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
234			struct vif_device *vif;
 
 
235
236			nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
237			if (!nhp) {
 
238				nla_nest_cancel(skb, mp_attr);
239				return -EMSGSIZE;
240			}
241
242			nhp->rtnh_flags = 0;
243			nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
244			vif = &mrt->vif_table[ct];
245			nhp->rtnh_ifindex = vif->dev->ifindex;
246			nhp->rtnh_len = sizeof(*nhp);
247		}
248	}
 
249
250	nla_nest_end(skb, mp_attr);
251
252	lastuse = READ_ONCE(c->mfc_un.res.lastuse);
253	lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
254
255	mfcs.mfcs_packets = c->mfc_un.res.pkt;
256	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
257	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
258	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
259	    nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
260			      RTA_PAD))
261		return -EMSGSIZE;
262
263	rtm->rtm_type = RTN_MULTICAST;
264	return 1;
265}
266EXPORT_SYMBOL(mr_fill_mroute);
267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
269		     struct mr_table *(*iter)(struct net *net,
270					      struct mr_table *mrt),
271		     int (*fill)(struct mr_table *mrt,
272				 struct sk_buff *skb,
273				 u32 portid, u32 seq, struct mr_mfc *c,
274				 int cmd, int flags),
275		     spinlock_t *lock)
276{
277	unsigned int t = 0, e = 0, s_t = cb->args[0], s_e = cb->args[1];
278	struct net *net = sock_net(skb->sk);
279	struct mr_table *mrt;
280	struct mr_mfc *mfc;
 
 
 
 
 
 
 
 
 
281
282	rcu_read_lock();
283	for (mrt = iter(net, NULL); mrt; mrt = iter(net, mrt)) {
284		if (t < s_t)
285			goto next_table;
286		list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
287			if (e < s_e)
288				goto next_entry;
289			if (fill(mrt, skb, NETLINK_CB(cb->skb).portid,
290				 cb->nlh->nlmsg_seq, mfc,
291				 RTM_NEWROUTE, NLM_F_MULTI) < 0)
292				goto done;
293next_entry:
294			e++;
295		}
296		e = 0;
297		s_e = 0;
298
299		spin_lock_bh(lock);
300		list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
301			if (e < s_e)
302				goto next_entry2;
303			if (fill(mrt, skb, NETLINK_CB(cb->skb).portid,
304				 cb->nlh->nlmsg_seq, mfc,
305				 RTM_NEWROUTE, NLM_F_MULTI) < 0) {
306				spin_unlock_bh(lock);
307				goto done;
308			}
309next_entry2:
310			e++;
311		}
312		spin_unlock_bh(lock);
313		e = 0;
314		s_e = 0;
315next_table:
316		t++;
317	}
318done:
319	rcu_read_unlock();
320
321	cb->args[1] = e;
322	cb->args[0] = t;
323
324	return skb->len;
325}
326EXPORT_SYMBOL(mr_rtm_dumproute);
327
328int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
329	    int (*rules_dump)(struct net *net,
330			      struct notifier_block *nb),
 
331	    struct mr_table *(*mr_iter)(struct net *net,
332					struct mr_table *mrt),
333	    rwlock_t *mrt_lock)
334{
335	struct mr_table *mrt;
336	int err;
337
338	err = rules_dump(net, nb);
339	if (err)
340		return err;
341
342	for (mrt = mr_iter(net, NULL); mrt; mrt = mr_iter(net, mrt)) {
343		struct vif_device *v = &mrt->vif_table[0];
 
344		struct mr_mfc *mfc;
345		int vifi;
346
347		/* Notifiy on table VIF entries */
348		read_lock(mrt_lock);
349		for (vifi = 0; vifi < mrt->maxvif; vifi++, v++) {
350			if (!v->dev)
 
351				continue;
352
353			mr_call_vif_notifier(nb, net, family,
354					     FIB_EVENT_VIF_ADD,
355					     v, vifi, mrt->id);
 
 
 
356		}
357		read_unlock(mrt_lock);
 
 
 
358
359		/* Notify on table MFC entries */
360		list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
361			mr_call_mfc_notifier(nb, net, family,
362					     FIB_EVENT_ENTRY_ADD,
363					     mfc, mrt->id);
 
 
 
364	}
365
366	return 0;
367}
368EXPORT_SYMBOL(mr_dump);
v6.13.7
  1/* Linux multicast routing support
  2 * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
  3 */
  4
  5#include <linux/rhashtable.h>
  6#include <linux/mroute_base.h>
  7
  8/* Sets everything common except 'dev', since that is done under locking */
  9void vif_device_init(struct vif_device *v,
 10		     struct net_device *dev,
 11		     unsigned long rate_limit,
 12		     unsigned char threshold,
 13		     unsigned short flags,
 14		     unsigned short get_iflink_mask)
 15{
 16	RCU_INIT_POINTER(v->dev, NULL);
 17	v->bytes_in = 0;
 18	v->bytes_out = 0;
 19	v->pkt_in = 0;
 20	v->pkt_out = 0;
 21	v->rate_limit = rate_limit;
 22	v->flags = flags;
 23	v->threshold = threshold;
 24	if (v->flags & get_iflink_mask)
 25		v->link = dev_get_iflink(dev);
 26	else
 27		v->link = dev->ifindex;
 28}
 29EXPORT_SYMBOL(vif_device_init);
 30
 31struct mr_table *
 32mr_table_alloc(struct net *net, u32 id,
 33	       struct mr_table_ops *ops,
 34	       void (*expire_func)(struct timer_list *t),
 35	       void (*table_set)(struct mr_table *mrt,
 36				 struct net *net))
 37{
 38	struct mr_table *mrt;
 39	int err;
 40
 41	mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
 42	if (!mrt)
 43		return ERR_PTR(-ENOMEM);
 44	mrt->id = id;
 45	write_pnet(&mrt->net, net);
 46
 47	mrt->ops = *ops;
 48	err = rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params);
 49	if (err) {
 50		kfree(mrt);
 51		return ERR_PTR(err);
 52	}
 53	INIT_LIST_HEAD(&mrt->mfc_cache_list);
 54	INIT_LIST_HEAD(&mrt->mfc_unres_queue);
 55
 56	timer_setup(&mrt->ipmr_expire_timer, expire_func, 0);
 57
 58	mrt->mroute_reg_vif_num = -1;
 59	table_set(mrt, net);
 60	return mrt;
 61}
 62EXPORT_SYMBOL(mr_table_alloc);
 63
 64void *mr_mfc_find_parent(struct mr_table *mrt, void *hasharg, int parent)
 65{
 66	struct rhlist_head *tmp, *list;
 67	struct mr_mfc *c;
 68
 69	list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
 70	rhl_for_each_entry_rcu(c, tmp, list, mnode)
 71		if (parent == -1 || parent == c->mfc_parent)
 72			return c;
 73
 74	return NULL;
 75}
 76EXPORT_SYMBOL(mr_mfc_find_parent);
 77
 78void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi)
 79{
 80	struct rhlist_head *tmp, *list;
 81	struct mr_mfc *c;
 82
 83	list = rhltable_lookup(&mrt->mfc_hash, mrt->ops.cmparg_any,
 84			       *mrt->ops.rht_params);
 85	rhl_for_each_entry_rcu(c, tmp, list, mnode)
 86		if (c->mfc_un.res.ttls[vifi] < 255)
 87			return c;
 88
 89	return NULL;
 90}
 91EXPORT_SYMBOL(mr_mfc_find_any_parent);
 92
 93void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg)
 94{
 95	struct rhlist_head *tmp, *list;
 96	struct mr_mfc *c, *proxy;
 97
 98	list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
 99	rhl_for_each_entry_rcu(c, tmp, list, mnode) {
100		if (c->mfc_un.res.ttls[vifi] < 255)
101			return c;
102
103		/* It's ok if the vifi is part of the static tree */
104		proxy = mr_mfc_find_any_parent(mrt, c->mfc_parent);
105		if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
106			return c;
107	}
108
109	return mr_mfc_find_any_parent(mrt, vifi);
110}
111EXPORT_SYMBOL(mr_mfc_find_any);
112
113#ifdef CONFIG_PROC_FS
114void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos)
115{
116	struct mr_table *mrt = iter->mrt;
117
118	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
119		if (!VIF_EXISTS(mrt, iter->ct))
120			continue;
121		if (pos-- == 0)
122			return &mrt->vif_table[iter->ct];
123	}
124	return NULL;
125}
126EXPORT_SYMBOL(mr_vif_seq_idx);
127
128void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
129{
130	struct mr_vif_iter *iter = seq->private;
131	struct net *net = seq_file_net(seq);
132	struct mr_table *mrt = iter->mrt;
133
134	++*pos;
135	if (v == SEQ_START_TOKEN)
136		return mr_vif_seq_idx(net, iter, 0);
137
138	while (++iter->ct < mrt->maxvif) {
139		if (!VIF_EXISTS(mrt, iter->ct))
140			continue;
141		return &mrt->vif_table[iter->ct];
142	}
143	return NULL;
144}
145EXPORT_SYMBOL(mr_vif_seq_next);
146
147void *mr_mfc_seq_idx(struct net *net,
148		     struct mr_mfc_iter *it, loff_t pos)
149{
150	struct mr_table *mrt = it->mrt;
151	struct mr_mfc *mfc;
152
153	rcu_read_lock();
154	it->cache = &mrt->mfc_cache_list;
155	list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
156		if (pos-- == 0)
157			return mfc;
158	rcu_read_unlock();
159
160	spin_lock_bh(it->lock);
161	it->cache = &mrt->mfc_unres_queue;
162	list_for_each_entry(mfc, it->cache, list)
163		if (pos-- == 0)
164			return mfc;
165	spin_unlock_bh(it->lock);
166
167	it->cache = NULL;
168	return NULL;
169}
170EXPORT_SYMBOL(mr_mfc_seq_idx);
171
172void *mr_mfc_seq_next(struct seq_file *seq, void *v,
173		      loff_t *pos)
174{
175	struct mr_mfc_iter *it = seq->private;
176	struct net *net = seq_file_net(seq);
177	struct mr_table *mrt = it->mrt;
178	struct mr_mfc *c = v;
179
180	++*pos;
181
182	if (v == SEQ_START_TOKEN)
183		return mr_mfc_seq_idx(net, seq->private, 0);
184
185	if (c->list.next != it->cache)
186		return list_entry(c->list.next, struct mr_mfc, list);
187
188	if (it->cache == &mrt->mfc_unres_queue)
189		goto end_of_list;
190
191	/* exhausted cache_array, show unresolved */
192	rcu_read_unlock();
193	it->cache = &mrt->mfc_unres_queue;
194
195	spin_lock_bh(it->lock);
196	if (!list_empty(it->cache))
197		return list_first_entry(it->cache, struct mr_mfc, list);
198
199end_of_list:
200	spin_unlock_bh(it->lock);
201	it->cache = NULL;
202
203	return NULL;
204}
205EXPORT_SYMBOL(mr_mfc_seq_next);
206#endif
207
208int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
209		   struct mr_mfc *c, struct rtmsg *rtm)
210{
211	struct net_device *vif_dev;
212	struct rta_mfc_stats mfcs;
213	struct nlattr *mp_attr;
214	struct rtnexthop *nhp;
215	unsigned long lastuse;
216	int ct;
217
218	/* If cache is unresolved, don't try to parse IIF and OIF */
219	if (c->mfc_parent >= MAXVIFS) {
220		rtm->rtm_flags |= RTNH_F_UNRESOLVED;
221		return -ENOENT;
222	}
223
224	rcu_read_lock();
225	vif_dev = rcu_dereference(mrt->vif_table[c->mfc_parent].dev);
226	if (vif_dev && nla_put_u32(skb, RTA_IIF, vif_dev->ifindex) < 0) {
227		rcu_read_unlock();
228		return -EMSGSIZE;
229	}
230	rcu_read_unlock();
231
232	if (c->mfc_flags & MFC_OFFLOAD)
233		rtm->rtm_flags |= RTNH_F_OFFLOAD;
234
235	mp_attr = nla_nest_start_noflag(skb, RTA_MULTIPATH);
236	if (!mp_attr)
237		return -EMSGSIZE;
238
239	rcu_read_lock();
240	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
241		struct vif_device *vif = &mrt->vif_table[ct];
242
243		vif_dev = rcu_dereference(vif->dev);
244		if (vif_dev && c->mfc_un.res.ttls[ct] < 255) {
245
246			nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
247			if (!nhp) {
248				rcu_read_unlock();
249				nla_nest_cancel(skb, mp_attr);
250				return -EMSGSIZE;
251			}
252
253			nhp->rtnh_flags = 0;
254			nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
255			nhp->rtnh_ifindex = vif_dev->ifindex;
 
256			nhp->rtnh_len = sizeof(*nhp);
257		}
258	}
259	rcu_read_unlock();
260
261	nla_nest_end(skb, mp_attr);
262
263	lastuse = READ_ONCE(c->mfc_un.res.lastuse);
264	lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
265
266	mfcs.mfcs_packets = atomic_long_read(&c->mfc_un.res.pkt);
267	mfcs.mfcs_bytes = atomic_long_read(&c->mfc_un.res.bytes);
268	mfcs.mfcs_wrong_if = atomic_long_read(&c->mfc_un.res.wrong_if);
269	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
270	    nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
271			      RTA_PAD))
272		return -EMSGSIZE;
273
274	rtm->rtm_type = RTN_MULTICAST;
275	return 1;
276}
277EXPORT_SYMBOL(mr_fill_mroute);
278
279static bool mr_mfc_uses_dev(const struct mr_table *mrt,
280			    const struct mr_mfc *c,
281			    const struct net_device *dev)
282{
283	int ct;
284
285	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
286		const struct net_device *vif_dev;
287		const struct vif_device *vif;
288
289		vif = &mrt->vif_table[ct];
290		vif_dev = rcu_access_pointer(vif->dev);
291		if (vif_dev && c->mfc_un.res.ttls[ct] < 255 &&
292		    vif_dev == dev)
293			return true;
294	}
295	return false;
296}
297
298int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
299		  struct netlink_callback *cb,
300		  int (*fill)(struct mr_table *mrt, struct sk_buff *skb,
301			      u32 portid, u32 seq, struct mr_mfc *c,
302			      int cmd, int flags),
303		  spinlock_t *lock, struct fib_dump_filter *filter)
304{
305	unsigned int e = 0, s_e = cb->args[1];
306	unsigned int flags = NLM_F_MULTI;
307	struct mr_mfc *mfc;
308	int err;
309
310	if (filter->filter_set)
311		flags |= NLM_F_DUMP_FILTERED;
312
313	list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list,
314				lockdep_rtnl_is_held()) {
315		if (e < s_e)
316			goto next_entry;
317		if (filter->dev &&
318		    !mr_mfc_uses_dev(mrt, mfc, filter->dev))
319			goto next_entry;
320
321		err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
322			   cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
323		if (err < 0)
324			goto out;
325next_entry:
326		e++;
327	}
328
329	spin_lock_bh(lock);
330	list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
331		if (e < s_e)
332			goto next_entry2;
333
334		err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
335			   cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
336		if (err < 0) {
337			spin_unlock_bh(lock);
338			goto out;
339		}
340next_entry2:
341		e++;
342	}
343	spin_unlock_bh(lock);
344	err = 0;
345out:
346	cb->args[1] = e;
347	return err;
348}
349EXPORT_SYMBOL(mr_table_dump);
350
351int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
352		     struct mr_table *(*iter)(struct net *net,
353					      struct mr_table *mrt),
354		     int (*fill)(struct mr_table *mrt,
355				 struct sk_buff *skb,
356				 u32 portid, u32 seq, struct mr_mfc *c,
357				 int cmd, int flags),
358		     spinlock_t *lock, struct fib_dump_filter *filter)
359{
360	unsigned int t = 0, s_t = cb->args[0];
361	struct net *net = sock_net(skb->sk);
362	struct mr_table *mrt;
363	int err;
364
365	/* multicast does not track protocol or have route type other
366	 * than RTN_MULTICAST
367	 */
368	if (filter->filter_set) {
369		if (filter->protocol || filter->flags ||
370		    (filter->rt_type && filter->rt_type != RTN_MULTICAST))
371			return skb->len;
372	}
373
374	rcu_read_lock();
375	for (mrt = iter(net, NULL); mrt; mrt = iter(net, mrt)) {
376		if (t < s_t)
377			goto next_table;
 
 
 
 
 
 
 
 
 
 
 
 
378
379		err = mr_table_dump(mrt, skb, cb, fill, lock, filter);
380		if (err < 0)
381			break;
382		cb->args[1] = 0;
 
 
 
 
 
 
 
 
 
 
 
 
383next_table:
384		t++;
385	}
 
386	rcu_read_unlock();
387
 
388	cb->args[0] = t;
389
390	return skb->len;
391}
392EXPORT_SYMBOL(mr_rtm_dumproute);
393
394int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
395	    int (*rules_dump)(struct net *net,
396			      struct notifier_block *nb,
397			      struct netlink_ext_ack *extack),
398	    struct mr_table *(*mr_iter)(struct net *net,
399					struct mr_table *mrt),
400	    struct netlink_ext_ack *extack)
401{
402	struct mr_table *mrt;
403	int err;
404
405	err = rules_dump(net, nb, extack);
406	if (err)
407		return err;
408
409	for (mrt = mr_iter(net, NULL); mrt; mrt = mr_iter(net, mrt)) {
410		struct vif_device *v = &mrt->vif_table[0];
411		struct net_device *vif_dev;
412		struct mr_mfc *mfc;
413		int vifi;
414
415		/* Notifiy on table VIF entries */
416		rcu_read_lock();
417		for (vifi = 0; vifi < mrt->maxvif; vifi++, v++) {
418			vif_dev = rcu_dereference(v->dev);
419			if (!vif_dev)
420				continue;
421
422			err = mr_call_vif_notifier(nb, family,
423						   FIB_EVENT_VIF_ADD, v,
424						   vif_dev, vifi,
425						   mrt->id, extack);
426			if (err)
427				break;
428		}
429		rcu_read_unlock();
430
431		if (err)
432			return err;
433
434		/* Notify on table MFC entries */
435		list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
436			err = mr_call_mfc_notifier(nb, family,
437						   FIB_EVENT_ENTRY_ADD,
438						   mfc, mrt->id, extack);
439			if (err)
440				return err;
441		}
442	}
443
444	return 0;
445}
446EXPORT_SYMBOL(mr_dump);