Loading...
Note: File does not exist in v3.5.6.
1/* Linux multicast routing support
2 * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
3 */
4
5#include <linux/mroute_base.h>
6
7/* Sets everything common except 'dev', since that is done under locking */
8void vif_device_init(struct vif_device *v,
9 struct net_device *dev,
10 unsigned long rate_limit,
11 unsigned char threshold,
12 unsigned short flags,
13 unsigned short get_iflink_mask)
14{
15 v->dev = NULL;
16 v->bytes_in = 0;
17 v->bytes_out = 0;
18 v->pkt_in = 0;
19 v->pkt_out = 0;
20 v->rate_limit = rate_limit;
21 v->flags = flags;
22 v->threshold = threshold;
23 if (v->flags & get_iflink_mask)
24 v->link = dev_get_iflink(dev);
25 else
26 v->link = dev->ifindex;
27}
28EXPORT_SYMBOL(vif_device_init);
29
30struct mr_table *
31mr_table_alloc(struct net *net, u32 id,
32 struct mr_table_ops *ops,
33 void (*expire_func)(struct timer_list *t),
34 void (*table_set)(struct mr_table *mrt,
35 struct net *net))
36{
37 struct mr_table *mrt;
38
39 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
40 if (!mrt)
41 return NULL;
42 mrt->id = id;
43 write_pnet(&mrt->net, net);
44
45 mrt->ops = *ops;
46 if (rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params)) {
47 kfree(mrt);
48 return NULL;
49 }
50 INIT_LIST_HEAD(&mrt->mfc_cache_list);
51 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
52
53 timer_setup(&mrt->ipmr_expire_timer, expire_func, 0);
54
55 mrt->mroute_reg_vif_num = -1;
56 table_set(mrt, net);
57 return mrt;
58}
59EXPORT_SYMBOL(mr_table_alloc);
60
61void *mr_mfc_find_parent(struct mr_table *mrt, void *hasharg, int parent)
62{
63 struct rhlist_head *tmp, *list;
64 struct mr_mfc *c;
65
66 list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
67 rhl_for_each_entry_rcu(c, tmp, list, mnode)
68 if (parent == -1 || parent == c->mfc_parent)
69 return c;
70
71 return NULL;
72}
73EXPORT_SYMBOL(mr_mfc_find_parent);
74
75void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi)
76{
77 struct rhlist_head *tmp, *list;
78 struct mr_mfc *c;
79
80 list = rhltable_lookup(&mrt->mfc_hash, mrt->ops.cmparg_any,
81 *mrt->ops.rht_params);
82 rhl_for_each_entry_rcu(c, tmp, list, mnode)
83 if (c->mfc_un.res.ttls[vifi] < 255)
84 return c;
85
86 return NULL;
87}
88EXPORT_SYMBOL(mr_mfc_find_any_parent);
89
90void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg)
91{
92 struct rhlist_head *tmp, *list;
93 struct mr_mfc *c, *proxy;
94
95 list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
96 rhl_for_each_entry_rcu(c, tmp, list, mnode) {
97 if (c->mfc_un.res.ttls[vifi] < 255)
98 return c;
99
100 /* It's ok if the vifi is part of the static tree */
101 proxy = mr_mfc_find_any_parent(mrt, c->mfc_parent);
102 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
103 return c;
104 }
105
106 return mr_mfc_find_any_parent(mrt, vifi);
107}
108EXPORT_SYMBOL(mr_mfc_find_any);
109
110#ifdef CONFIG_PROC_FS
111void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos)
112{
113 struct mr_table *mrt = iter->mrt;
114
115 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
116 if (!VIF_EXISTS(mrt, iter->ct))
117 continue;
118 if (pos-- == 0)
119 return &mrt->vif_table[iter->ct];
120 }
121 return NULL;
122}
123EXPORT_SYMBOL(mr_vif_seq_idx);
124
125void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
126{
127 struct mr_vif_iter *iter = seq->private;
128 struct net *net = seq_file_net(seq);
129 struct mr_table *mrt = iter->mrt;
130
131 ++*pos;
132 if (v == SEQ_START_TOKEN)
133 return mr_vif_seq_idx(net, iter, 0);
134
135 while (++iter->ct < mrt->maxvif) {
136 if (!VIF_EXISTS(mrt, iter->ct))
137 continue;
138 return &mrt->vif_table[iter->ct];
139 }
140 return NULL;
141}
142EXPORT_SYMBOL(mr_vif_seq_next);
143
144void *mr_mfc_seq_idx(struct net *net,
145 struct mr_mfc_iter *it, loff_t pos)
146{
147 struct mr_table *mrt = it->mrt;
148 struct mr_mfc *mfc;
149
150 rcu_read_lock();
151 it->cache = &mrt->mfc_cache_list;
152 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
153 if (pos-- == 0)
154 return mfc;
155 rcu_read_unlock();
156
157 spin_lock_bh(it->lock);
158 it->cache = &mrt->mfc_unres_queue;
159 list_for_each_entry(mfc, it->cache, list)
160 if (pos-- == 0)
161 return mfc;
162 spin_unlock_bh(it->lock);
163
164 it->cache = NULL;
165 return NULL;
166}
167EXPORT_SYMBOL(mr_mfc_seq_idx);
168
169void *mr_mfc_seq_next(struct seq_file *seq, void *v,
170 loff_t *pos)
171{
172 struct mr_mfc_iter *it = seq->private;
173 struct net *net = seq_file_net(seq);
174 struct mr_table *mrt = it->mrt;
175 struct mr_mfc *c = v;
176
177 ++*pos;
178
179 if (v == SEQ_START_TOKEN)
180 return mr_mfc_seq_idx(net, seq->private, 0);
181
182 if (c->list.next != it->cache)
183 return list_entry(c->list.next, struct mr_mfc, list);
184
185 if (it->cache == &mrt->mfc_unres_queue)
186 goto end_of_list;
187
188 /* exhausted cache_array, show unresolved */
189 rcu_read_unlock();
190 it->cache = &mrt->mfc_unres_queue;
191
192 spin_lock_bh(it->lock);
193 if (!list_empty(it->cache))
194 return list_first_entry(it->cache, struct mr_mfc, list);
195
196end_of_list:
197 spin_unlock_bh(it->lock);
198 it->cache = NULL;
199
200 return NULL;
201}
202EXPORT_SYMBOL(mr_mfc_seq_next);
203#endif
204
205int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
206 struct mr_mfc *c, struct rtmsg *rtm)
207{
208 struct rta_mfc_stats mfcs;
209 struct nlattr *mp_attr;
210 struct rtnexthop *nhp;
211 unsigned long lastuse;
212 int ct;
213
214 /* If cache is unresolved, don't try to parse IIF and OIF */
215 if (c->mfc_parent >= MAXVIFS) {
216 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
217 return -ENOENT;
218 }
219
220 if (VIF_EXISTS(mrt, c->mfc_parent) &&
221 nla_put_u32(skb, RTA_IIF,
222 mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
223 return -EMSGSIZE;
224
225 if (c->mfc_flags & MFC_OFFLOAD)
226 rtm->rtm_flags |= RTNH_F_OFFLOAD;
227
228 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
229 if (!mp_attr)
230 return -EMSGSIZE;
231
232 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
233 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
234 struct vif_device *vif;
235
236 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
237 if (!nhp) {
238 nla_nest_cancel(skb, mp_attr);
239 return -EMSGSIZE;
240 }
241
242 nhp->rtnh_flags = 0;
243 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
244 vif = &mrt->vif_table[ct];
245 nhp->rtnh_ifindex = vif->dev->ifindex;
246 nhp->rtnh_len = sizeof(*nhp);
247 }
248 }
249
250 nla_nest_end(skb, mp_attr);
251
252 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
253 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
254
255 mfcs.mfcs_packets = c->mfc_un.res.pkt;
256 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
257 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
258 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
259 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
260 RTA_PAD))
261 return -EMSGSIZE;
262
263 rtm->rtm_type = RTN_MULTICAST;
264 return 1;
265}
266EXPORT_SYMBOL(mr_fill_mroute);
267
268int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
269 struct mr_table *(*iter)(struct net *net,
270 struct mr_table *mrt),
271 int (*fill)(struct mr_table *mrt,
272 struct sk_buff *skb,
273 u32 portid, u32 seq, struct mr_mfc *c,
274 int cmd, int flags),
275 spinlock_t *lock)
276{
277 unsigned int t = 0, e = 0, s_t = cb->args[0], s_e = cb->args[1];
278 struct net *net = sock_net(skb->sk);
279 struct mr_table *mrt;
280 struct mr_mfc *mfc;
281
282 rcu_read_lock();
283 for (mrt = iter(net, NULL); mrt; mrt = iter(net, mrt)) {
284 if (t < s_t)
285 goto next_table;
286 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
287 if (e < s_e)
288 goto next_entry;
289 if (fill(mrt, skb, NETLINK_CB(cb->skb).portid,
290 cb->nlh->nlmsg_seq, mfc,
291 RTM_NEWROUTE, NLM_F_MULTI) < 0)
292 goto done;
293next_entry:
294 e++;
295 }
296 e = 0;
297 s_e = 0;
298
299 spin_lock_bh(lock);
300 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
301 if (e < s_e)
302 goto next_entry2;
303 if (fill(mrt, skb, NETLINK_CB(cb->skb).portid,
304 cb->nlh->nlmsg_seq, mfc,
305 RTM_NEWROUTE, NLM_F_MULTI) < 0) {
306 spin_unlock_bh(lock);
307 goto done;
308 }
309next_entry2:
310 e++;
311 }
312 spin_unlock_bh(lock);
313 e = 0;
314 s_e = 0;
315next_table:
316 t++;
317 }
318done:
319 rcu_read_unlock();
320
321 cb->args[1] = e;
322 cb->args[0] = t;
323
324 return skb->len;
325}
326EXPORT_SYMBOL(mr_rtm_dumproute);
327
328int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
329 int (*rules_dump)(struct net *net,
330 struct notifier_block *nb),
331 struct mr_table *(*mr_iter)(struct net *net,
332 struct mr_table *mrt),
333 rwlock_t *mrt_lock)
334{
335 struct mr_table *mrt;
336 int err;
337
338 err = rules_dump(net, nb);
339 if (err)
340 return err;
341
342 for (mrt = mr_iter(net, NULL); mrt; mrt = mr_iter(net, mrt)) {
343 struct vif_device *v = &mrt->vif_table[0];
344 struct mr_mfc *mfc;
345 int vifi;
346
347 /* Notifiy on table VIF entries */
348 read_lock(mrt_lock);
349 for (vifi = 0; vifi < mrt->maxvif; vifi++, v++) {
350 if (!v->dev)
351 continue;
352
353 mr_call_vif_notifier(nb, net, family,
354 FIB_EVENT_VIF_ADD,
355 v, vifi, mrt->id);
356 }
357 read_unlock(mrt_lock);
358
359 /* Notify on table MFC entries */
360 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
361 mr_call_mfc_notifier(nb, net, family,
362 FIB_EVENT_ENTRY_ADD,
363 mfc, mrt->id);
364 }
365
366 return 0;
367}
368EXPORT_SYMBOL(mr_dump);