Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/bpf.h>
5#include <linux/filter.h>
6#include <linux/errno.h>
7#include <linux/file.h>
8#include <linux/net.h>
9#include <linux/workqueue.h>
10#include <linux/skmsg.h>
11#include <linux/list.h>
12#include <linux/jhash.h>
13
14struct bpf_stab {
15 struct bpf_map map;
16 struct sock **sks;
17 struct sk_psock_progs progs;
18 raw_spinlock_t lock;
19};
20
21#define SOCK_CREATE_FLAG_MASK \
22 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
23
24static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
25{
26 struct bpf_stab *stab;
27 u64 cost;
28 int err;
29
30 if (!capable(CAP_NET_ADMIN))
31 return ERR_PTR(-EPERM);
32 if (attr->max_entries == 0 ||
33 attr->key_size != 4 ||
34 attr->value_size != 4 ||
35 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
36 return ERR_PTR(-EINVAL);
37
38 stab = kzalloc(sizeof(*stab), GFP_USER);
39 if (!stab)
40 return ERR_PTR(-ENOMEM);
41
42 bpf_map_init_from_attr(&stab->map, attr);
43 raw_spin_lock_init(&stab->lock);
44
45 /* Make sure page count doesn't overflow. */
46 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
47 err = bpf_map_charge_init(&stab->map.memory, cost);
48 if (err)
49 goto free_stab;
50
51 stab->sks = bpf_map_area_alloc(stab->map.max_entries *
52 sizeof(struct sock *),
53 stab->map.numa_node);
54 if (stab->sks)
55 return &stab->map;
56 err = -ENOMEM;
57 bpf_map_charge_finish(&stab->map.memory);
58free_stab:
59 kfree(stab);
60 return ERR_PTR(err);
61}
62
63int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
64{
65 u32 ufd = attr->target_fd;
66 struct bpf_map *map;
67 struct fd f;
68 int ret;
69
70 f = fdget(ufd);
71 map = __bpf_map_get(f);
72 if (IS_ERR(map))
73 return PTR_ERR(map);
74 ret = sock_map_prog_update(map, prog, attr->attach_type);
75 fdput(f);
76 return ret;
77}
78
79static void sock_map_sk_acquire(struct sock *sk)
80 __acquires(&sk->sk_lock.slock)
81{
82 lock_sock(sk);
83 preempt_disable();
84 rcu_read_lock();
85}
86
87static void sock_map_sk_release(struct sock *sk)
88 __releases(&sk->sk_lock.slock)
89{
90 rcu_read_unlock();
91 preempt_enable();
92 release_sock(sk);
93}
94
95static void sock_map_add_link(struct sk_psock *psock,
96 struct sk_psock_link *link,
97 struct bpf_map *map, void *link_raw)
98{
99 link->link_raw = link_raw;
100 link->map = map;
101 spin_lock_bh(&psock->link_lock);
102 list_add_tail(&link->list, &psock->link);
103 spin_unlock_bh(&psock->link_lock);
104}
105
106static void sock_map_del_link(struct sock *sk,
107 struct sk_psock *psock, void *link_raw)
108{
109 struct sk_psock_link *link, *tmp;
110 bool strp_stop = false;
111
112 spin_lock_bh(&psock->link_lock);
113 list_for_each_entry_safe(link, tmp, &psock->link, list) {
114 if (link->link_raw == link_raw) {
115 struct bpf_map *map = link->map;
116 struct bpf_stab *stab = container_of(map, struct bpf_stab,
117 map);
118 if (psock->parser.enabled && stab->progs.skb_parser)
119 strp_stop = true;
120 list_del(&link->list);
121 sk_psock_free_link(link);
122 }
123 }
124 spin_unlock_bh(&psock->link_lock);
125 if (strp_stop) {
126 write_lock_bh(&sk->sk_callback_lock);
127 sk_psock_stop_strp(sk, psock);
128 write_unlock_bh(&sk->sk_callback_lock);
129 }
130}
131
132static void sock_map_unref(struct sock *sk, void *link_raw)
133{
134 struct sk_psock *psock = sk_psock(sk);
135
136 if (likely(psock)) {
137 sock_map_del_link(sk, psock, link_raw);
138 sk_psock_put(sk, psock);
139 }
140}
141
142static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
143 struct sock *sk)
144{
145 struct bpf_prog *msg_parser, *skb_parser, *skb_verdict;
146 bool skb_progs, sk_psock_is_new = false;
147 struct sk_psock *psock;
148 int ret;
149
150 skb_verdict = READ_ONCE(progs->skb_verdict);
151 skb_parser = READ_ONCE(progs->skb_parser);
152 skb_progs = skb_parser && skb_verdict;
153 if (skb_progs) {
154 skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
155 if (IS_ERR(skb_verdict))
156 return PTR_ERR(skb_verdict);
157 skb_parser = bpf_prog_inc_not_zero(skb_parser);
158 if (IS_ERR(skb_parser)) {
159 bpf_prog_put(skb_verdict);
160 return PTR_ERR(skb_parser);
161 }
162 }
163
164 msg_parser = READ_ONCE(progs->msg_parser);
165 if (msg_parser) {
166 msg_parser = bpf_prog_inc_not_zero(msg_parser);
167 if (IS_ERR(msg_parser)) {
168 ret = PTR_ERR(msg_parser);
169 goto out;
170 }
171 }
172
173 psock = sk_psock_get_checked(sk);
174 if (IS_ERR(psock)) {
175 ret = PTR_ERR(psock);
176 goto out_progs;
177 }
178
179 if (psock) {
180 if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
181 (skb_progs && READ_ONCE(psock->progs.skb_parser))) {
182 sk_psock_put(sk, psock);
183 ret = -EBUSY;
184 goto out_progs;
185 }
186 } else {
187 psock = sk_psock_init(sk, map->numa_node);
188 if (!psock) {
189 ret = -ENOMEM;
190 goto out_progs;
191 }
192 sk_psock_is_new = true;
193 }
194
195 if (msg_parser)
196 psock_set_prog(&psock->progs.msg_parser, msg_parser);
197 if (sk_psock_is_new) {
198 ret = tcp_bpf_init(sk);
199 if (ret < 0)
200 goto out_drop;
201 } else {
202 tcp_bpf_reinit(sk);
203 }
204
205 write_lock_bh(&sk->sk_callback_lock);
206 if (skb_progs && !psock->parser.enabled) {
207 ret = sk_psock_init_strp(sk, psock);
208 if (ret) {
209 write_unlock_bh(&sk->sk_callback_lock);
210 goto out_drop;
211 }
212 psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
213 psock_set_prog(&psock->progs.skb_parser, skb_parser);
214 sk_psock_start_strp(sk, psock);
215 }
216 write_unlock_bh(&sk->sk_callback_lock);
217 return 0;
218out_drop:
219 sk_psock_put(sk, psock);
220out_progs:
221 if (msg_parser)
222 bpf_prog_put(msg_parser);
223out:
224 if (skb_progs) {
225 bpf_prog_put(skb_verdict);
226 bpf_prog_put(skb_parser);
227 }
228 return ret;
229}
230
231static void sock_map_free(struct bpf_map *map)
232{
233 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
234 int i;
235
236 synchronize_rcu();
237 rcu_read_lock();
238 raw_spin_lock_bh(&stab->lock);
239 for (i = 0; i < stab->map.max_entries; i++) {
240 struct sock **psk = &stab->sks[i];
241 struct sock *sk;
242
243 sk = xchg(psk, NULL);
244 if (sk)
245 sock_map_unref(sk, psk);
246 }
247 raw_spin_unlock_bh(&stab->lock);
248 rcu_read_unlock();
249
250 synchronize_rcu();
251
252 bpf_map_area_free(stab->sks);
253 kfree(stab);
254}
255
256static void sock_map_release_progs(struct bpf_map *map)
257{
258 psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
259}
260
261static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
262{
263 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
264
265 WARN_ON_ONCE(!rcu_read_lock_held());
266
267 if (unlikely(key >= map->max_entries))
268 return NULL;
269 return READ_ONCE(stab->sks[key]);
270}
271
272static void *sock_map_lookup(struct bpf_map *map, void *key)
273{
274 return ERR_PTR(-EOPNOTSUPP);
275}
276
277static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
278 struct sock **psk)
279{
280 struct sock *sk;
281 int err = 0;
282
283 raw_spin_lock_bh(&stab->lock);
284 sk = *psk;
285 if (!sk_test || sk_test == sk)
286 sk = xchg(psk, NULL);
287
288 if (likely(sk))
289 sock_map_unref(sk, psk);
290 else
291 err = -EINVAL;
292
293 raw_spin_unlock_bh(&stab->lock);
294 return err;
295}
296
297static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
298 void *link_raw)
299{
300 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
301
302 __sock_map_delete(stab, sk, link_raw);
303}
304
305static int sock_map_delete_elem(struct bpf_map *map, void *key)
306{
307 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
308 u32 i = *(u32 *)key;
309 struct sock **psk;
310
311 if (unlikely(i >= map->max_entries))
312 return -EINVAL;
313
314 psk = &stab->sks[i];
315 return __sock_map_delete(stab, NULL, psk);
316}
317
318static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
319{
320 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
321 u32 i = key ? *(u32 *)key : U32_MAX;
322 u32 *key_next = next;
323
324 if (i == stab->map.max_entries - 1)
325 return -ENOENT;
326 if (i >= stab->map.max_entries)
327 *key_next = 0;
328 else
329 *key_next = i + 1;
330 return 0;
331}
332
333static int sock_map_update_common(struct bpf_map *map, u32 idx,
334 struct sock *sk, u64 flags)
335{
336 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
337 struct inet_connection_sock *icsk = inet_csk(sk);
338 struct sk_psock_link *link;
339 struct sk_psock *psock;
340 struct sock *osk;
341 int ret;
342
343 WARN_ON_ONCE(!rcu_read_lock_held());
344 if (unlikely(flags > BPF_EXIST))
345 return -EINVAL;
346 if (unlikely(idx >= map->max_entries))
347 return -E2BIG;
348 if (unlikely(rcu_access_pointer(icsk->icsk_ulp_data)))
349 return -EINVAL;
350
351 link = sk_psock_init_link();
352 if (!link)
353 return -ENOMEM;
354
355 ret = sock_map_link(map, &stab->progs, sk);
356 if (ret < 0)
357 goto out_free;
358
359 psock = sk_psock(sk);
360 WARN_ON_ONCE(!psock);
361
362 raw_spin_lock_bh(&stab->lock);
363 osk = stab->sks[idx];
364 if (osk && flags == BPF_NOEXIST) {
365 ret = -EEXIST;
366 goto out_unlock;
367 } else if (!osk && flags == BPF_EXIST) {
368 ret = -ENOENT;
369 goto out_unlock;
370 }
371
372 sock_map_add_link(psock, link, map, &stab->sks[idx]);
373 stab->sks[idx] = sk;
374 if (osk)
375 sock_map_unref(osk, &stab->sks[idx]);
376 raw_spin_unlock_bh(&stab->lock);
377 return 0;
378out_unlock:
379 raw_spin_unlock_bh(&stab->lock);
380 if (psock)
381 sk_psock_put(sk, psock);
382out_free:
383 sk_psock_free_link(link);
384 return ret;
385}
386
387static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
388{
389 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
390 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB;
391}
392
393static bool sock_map_sk_is_suitable(const struct sock *sk)
394{
395 return sk->sk_type == SOCK_STREAM &&
396 sk->sk_protocol == IPPROTO_TCP;
397}
398
399static int sock_map_update_elem(struct bpf_map *map, void *key,
400 void *value, u64 flags)
401{
402 u32 ufd = *(u32 *)value;
403 u32 idx = *(u32 *)key;
404 struct socket *sock;
405 struct sock *sk;
406 int ret;
407
408 sock = sockfd_lookup(ufd, &ret);
409 if (!sock)
410 return ret;
411 sk = sock->sk;
412 if (!sk) {
413 ret = -EINVAL;
414 goto out;
415 }
416 if (!sock_map_sk_is_suitable(sk) ||
417 sk->sk_state != TCP_ESTABLISHED) {
418 ret = -EOPNOTSUPP;
419 goto out;
420 }
421
422 sock_map_sk_acquire(sk);
423 ret = sock_map_update_common(map, idx, sk, flags);
424 sock_map_sk_release(sk);
425out:
426 fput(sock->file);
427 return ret;
428}
429
430BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
431 struct bpf_map *, map, void *, key, u64, flags)
432{
433 WARN_ON_ONCE(!rcu_read_lock_held());
434
435 if (likely(sock_map_sk_is_suitable(sops->sk) &&
436 sock_map_op_okay(sops)))
437 return sock_map_update_common(map, *(u32 *)key, sops->sk,
438 flags);
439 return -EOPNOTSUPP;
440}
441
442const struct bpf_func_proto bpf_sock_map_update_proto = {
443 .func = bpf_sock_map_update,
444 .gpl_only = false,
445 .pkt_access = true,
446 .ret_type = RET_INTEGER,
447 .arg1_type = ARG_PTR_TO_CTX,
448 .arg2_type = ARG_CONST_MAP_PTR,
449 .arg3_type = ARG_PTR_TO_MAP_KEY,
450 .arg4_type = ARG_ANYTHING,
451};
452
453BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
454 struct bpf_map *, map, u32, key, u64, flags)
455{
456 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
457
458 if (unlikely(flags & ~(BPF_F_INGRESS)))
459 return SK_DROP;
460 tcb->bpf.flags = flags;
461 tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key);
462 if (!tcb->bpf.sk_redir)
463 return SK_DROP;
464 return SK_PASS;
465}
466
467const struct bpf_func_proto bpf_sk_redirect_map_proto = {
468 .func = bpf_sk_redirect_map,
469 .gpl_only = false,
470 .ret_type = RET_INTEGER,
471 .arg1_type = ARG_PTR_TO_CTX,
472 .arg2_type = ARG_CONST_MAP_PTR,
473 .arg3_type = ARG_ANYTHING,
474 .arg4_type = ARG_ANYTHING,
475};
476
477BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
478 struct bpf_map *, map, u32, key, u64, flags)
479{
480 if (unlikely(flags & ~(BPF_F_INGRESS)))
481 return SK_DROP;
482 msg->flags = flags;
483 msg->sk_redir = __sock_map_lookup_elem(map, key);
484 if (!msg->sk_redir)
485 return SK_DROP;
486 return SK_PASS;
487}
488
489const struct bpf_func_proto bpf_msg_redirect_map_proto = {
490 .func = bpf_msg_redirect_map,
491 .gpl_only = false,
492 .ret_type = RET_INTEGER,
493 .arg1_type = ARG_PTR_TO_CTX,
494 .arg2_type = ARG_CONST_MAP_PTR,
495 .arg3_type = ARG_ANYTHING,
496 .arg4_type = ARG_ANYTHING,
497};
498
499const struct bpf_map_ops sock_map_ops = {
500 .map_alloc = sock_map_alloc,
501 .map_free = sock_map_free,
502 .map_get_next_key = sock_map_get_next_key,
503 .map_update_elem = sock_map_update_elem,
504 .map_delete_elem = sock_map_delete_elem,
505 .map_lookup_elem = sock_map_lookup,
506 .map_release_uref = sock_map_release_progs,
507 .map_check_btf = map_check_no_btf,
508};
509
510struct bpf_htab_elem {
511 struct rcu_head rcu;
512 u32 hash;
513 struct sock *sk;
514 struct hlist_node node;
515 u8 key[0];
516};
517
518struct bpf_htab_bucket {
519 struct hlist_head head;
520 raw_spinlock_t lock;
521};
522
523struct bpf_htab {
524 struct bpf_map map;
525 struct bpf_htab_bucket *buckets;
526 u32 buckets_num;
527 u32 elem_size;
528 struct sk_psock_progs progs;
529 atomic_t count;
530};
531
532static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
533{
534 return jhash(key, len, 0);
535}
536
537static struct bpf_htab_bucket *sock_hash_select_bucket(struct bpf_htab *htab,
538 u32 hash)
539{
540 return &htab->buckets[hash & (htab->buckets_num - 1)];
541}
542
543static struct bpf_htab_elem *
544sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
545 u32 key_size)
546{
547 struct bpf_htab_elem *elem;
548
549 hlist_for_each_entry_rcu(elem, head, node) {
550 if (elem->hash == hash &&
551 !memcmp(&elem->key, key, key_size))
552 return elem;
553 }
554
555 return NULL;
556}
557
558static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
559{
560 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
561 u32 key_size = map->key_size, hash;
562 struct bpf_htab_bucket *bucket;
563 struct bpf_htab_elem *elem;
564
565 WARN_ON_ONCE(!rcu_read_lock_held());
566
567 hash = sock_hash_bucket_hash(key, key_size);
568 bucket = sock_hash_select_bucket(htab, hash);
569 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
570
571 return elem ? elem->sk : NULL;
572}
573
574static void sock_hash_free_elem(struct bpf_htab *htab,
575 struct bpf_htab_elem *elem)
576{
577 atomic_dec(&htab->count);
578 kfree_rcu(elem, rcu);
579}
580
581static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
582 void *link_raw)
583{
584 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
585 struct bpf_htab_elem *elem_probe, *elem = link_raw;
586 struct bpf_htab_bucket *bucket;
587
588 WARN_ON_ONCE(!rcu_read_lock_held());
589 bucket = sock_hash_select_bucket(htab, elem->hash);
590
591 /* elem may be deleted in parallel from the map, but access here
592 * is okay since it's going away only after RCU grace period.
593 * However, we need to check whether it's still present.
594 */
595 raw_spin_lock_bh(&bucket->lock);
596 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
597 elem->key, map->key_size);
598 if (elem_probe && elem_probe == elem) {
599 hlist_del_rcu(&elem->node);
600 sock_map_unref(elem->sk, elem);
601 sock_hash_free_elem(htab, elem);
602 }
603 raw_spin_unlock_bh(&bucket->lock);
604}
605
606static int sock_hash_delete_elem(struct bpf_map *map, void *key)
607{
608 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
609 u32 hash, key_size = map->key_size;
610 struct bpf_htab_bucket *bucket;
611 struct bpf_htab_elem *elem;
612 int ret = -ENOENT;
613
614 hash = sock_hash_bucket_hash(key, key_size);
615 bucket = sock_hash_select_bucket(htab, hash);
616
617 raw_spin_lock_bh(&bucket->lock);
618 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
619 if (elem) {
620 hlist_del_rcu(&elem->node);
621 sock_map_unref(elem->sk, elem);
622 sock_hash_free_elem(htab, elem);
623 ret = 0;
624 }
625 raw_spin_unlock_bh(&bucket->lock);
626 return ret;
627}
628
629static struct bpf_htab_elem *sock_hash_alloc_elem(struct bpf_htab *htab,
630 void *key, u32 key_size,
631 u32 hash, struct sock *sk,
632 struct bpf_htab_elem *old)
633{
634 struct bpf_htab_elem *new;
635
636 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
637 if (!old) {
638 atomic_dec(&htab->count);
639 return ERR_PTR(-E2BIG);
640 }
641 }
642
643 new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
644 htab->map.numa_node);
645 if (!new) {
646 atomic_dec(&htab->count);
647 return ERR_PTR(-ENOMEM);
648 }
649 memcpy(new->key, key, key_size);
650 new->sk = sk;
651 new->hash = hash;
652 return new;
653}
654
655static int sock_hash_update_common(struct bpf_map *map, void *key,
656 struct sock *sk, u64 flags)
657{
658 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
659 struct inet_connection_sock *icsk = inet_csk(sk);
660 u32 key_size = map->key_size, hash;
661 struct bpf_htab_elem *elem, *elem_new;
662 struct bpf_htab_bucket *bucket;
663 struct sk_psock_link *link;
664 struct sk_psock *psock;
665 int ret;
666
667 WARN_ON_ONCE(!rcu_read_lock_held());
668 if (unlikely(flags > BPF_EXIST))
669 return -EINVAL;
670 if (unlikely(icsk->icsk_ulp_data))
671 return -EINVAL;
672
673 link = sk_psock_init_link();
674 if (!link)
675 return -ENOMEM;
676
677 ret = sock_map_link(map, &htab->progs, sk);
678 if (ret < 0)
679 goto out_free;
680
681 psock = sk_psock(sk);
682 WARN_ON_ONCE(!psock);
683
684 hash = sock_hash_bucket_hash(key, key_size);
685 bucket = sock_hash_select_bucket(htab, hash);
686
687 raw_spin_lock_bh(&bucket->lock);
688 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
689 if (elem && flags == BPF_NOEXIST) {
690 ret = -EEXIST;
691 goto out_unlock;
692 } else if (!elem && flags == BPF_EXIST) {
693 ret = -ENOENT;
694 goto out_unlock;
695 }
696
697 elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
698 if (IS_ERR(elem_new)) {
699 ret = PTR_ERR(elem_new);
700 goto out_unlock;
701 }
702
703 sock_map_add_link(psock, link, map, elem_new);
704 /* Add new element to the head of the list, so that
705 * concurrent search will find it before old elem.
706 */
707 hlist_add_head_rcu(&elem_new->node, &bucket->head);
708 if (elem) {
709 hlist_del_rcu(&elem->node);
710 sock_map_unref(elem->sk, elem);
711 sock_hash_free_elem(htab, elem);
712 }
713 raw_spin_unlock_bh(&bucket->lock);
714 return 0;
715out_unlock:
716 raw_spin_unlock_bh(&bucket->lock);
717 sk_psock_put(sk, psock);
718out_free:
719 sk_psock_free_link(link);
720 return ret;
721}
722
723static int sock_hash_update_elem(struct bpf_map *map, void *key,
724 void *value, u64 flags)
725{
726 u32 ufd = *(u32 *)value;
727 struct socket *sock;
728 struct sock *sk;
729 int ret;
730
731 sock = sockfd_lookup(ufd, &ret);
732 if (!sock)
733 return ret;
734 sk = sock->sk;
735 if (!sk) {
736 ret = -EINVAL;
737 goto out;
738 }
739 if (!sock_map_sk_is_suitable(sk) ||
740 sk->sk_state != TCP_ESTABLISHED) {
741 ret = -EOPNOTSUPP;
742 goto out;
743 }
744
745 sock_map_sk_acquire(sk);
746 ret = sock_hash_update_common(map, key, sk, flags);
747 sock_map_sk_release(sk);
748out:
749 fput(sock->file);
750 return ret;
751}
752
753static int sock_hash_get_next_key(struct bpf_map *map, void *key,
754 void *key_next)
755{
756 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
757 struct bpf_htab_elem *elem, *elem_next;
758 u32 hash, key_size = map->key_size;
759 struct hlist_head *head;
760 int i = 0;
761
762 if (!key)
763 goto find_first_elem;
764 hash = sock_hash_bucket_hash(key, key_size);
765 head = &sock_hash_select_bucket(htab, hash)->head;
766 elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
767 if (!elem)
768 goto find_first_elem;
769
770 elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)),
771 struct bpf_htab_elem, node);
772 if (elem_next) {
773 memcpy(key_next, elem_next->key, key_size);
774 return 0;
775 }
776
777 i = hash & (htab->buckets_num - 1);
778 i++;
779find_first_elem:
780 for (; i < htab->buckets_num; i++) {
781 head = &sock_hash_select_bucket(htab, i)->head;
782 elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
783 struct bpf_htab_elem, node);
784 if (elem_next) {
785 memcpy(key_next, elem_next->key, key_size);
786 return 0;
787 }
788 }
789
790 return -ENOENT;
791}
792
793static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
794{
795 struct bpf_htab *htab;
796 int i, err;
797 u64 cost;
798
799 if (!capable(CAP_NET_ADMIN))
800 return ERR_PTR(-EPERM);
801 if (attr->max_entries == 0 ||
802 attr->key_size == 0 ||
803 attr->value_size != 4 ||
804 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
805 return ERR_PTR(-EINVAL);
806 if (attr->key_size > MAX_BPF_STACK)
807 return ERR_PTR(-E2BIG);
808
809 htab = kzalloc(sizeof(*htab), GFP_USER);
810 if (!htab)
811 return ERR_PTR(-ENOMEM);
812
813 bpf_map_init_from_attr(&htab->map, attr);
814
815 htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
816 htab->elem_size = sizeof(struct bpf_htab_elem) +
817 round_up(htab->map.key_size, 8);
818 if (htab->buckets_num == 0 ||
819 htab->buckets_num > U32_MAX / sizeof(struct bpf_htab_bucket)) {
820 err = -EINVAL;
821 goto free_htab;
822 }
823
824 cost = (u64) htab->buckets_num * sizeof(struct bpf_htab_bucket) +
825 (u64) htab->elem_size * htab->map.max_entries;
826 if (cost >= U32_MAX - PAGE_SIZE) {
827 err = -EINVAL;
828 goto free_htab;
829 }
830
831 htab->buckets = bpf_map_area_alloc(htab->buckets_num *
832 sizeof(struct bpf_htab_bucket),
833 htab->map.numa_node);
834 if (!htab->buckets) {
835 err = -ENOMEM;
836 goto free_htab;
837 }
838
839 for (i = 0; i < htab->buckets_num; i++) {
840 INIT_HLIST_HEAD(&htab->buckets[i].head);
841 raw_spin_lock_init(&htab->buckets[i].lock);
842 }
843
844 return &htab->map;
845free_htab:
846 kfree(htab);
847 return ERR_PTR(err);
848}
849
850static void sock_hash_free(struct bpf_map *map)
851{
852 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
853 struct bpf_htab_bucket *bucket;
854 struct bpf_htab_elem *elem;
855 struct hlist_node *node;
856 int i;
857
858 synchronize_rcu();
859 rcu_read_lock();
860 for (i = 0; i < htab->buckets_num; i++) {
861 bucket = sock_hash_select_bucket(htab, i);
862 raw_spin_lock_bh(&bucket->lock);
863 hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
864 hlist_del_rcu(&elem->node);
865 sock_map_unref(elem->sk, elem);
866 }
867 raw_spin_unlock_bh(&bucket->lock);
868 }
869 rcu_read_unlock();
870
871 bpf_map_area_free(htab->buckets);
872 kfree(htab);
873}
874
875static void sock_hash_release_progs(struct bpf_map *map)
876{
877 psock_progs_drop(&container_of(map, struct bpf_htab, map)->progs);
878}
879
880BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
881 struct bpf_map *, map, void *, key, u64, flags)
882{
883 WARN_ON_ONCE(!rcu_read_lock_held());
884
885 if (likely(sock_map_sk_is_suitable(sops->sk) &&
886 sock_map_op_okay(sops)))
887 return sock_hash_update_common(map, key, sops->sk, flags);
888 return -EOPNOTSUPP;
889}
890
891const struct bpf_func_proto bpf_sock_hash_update_proto = {
892 .func = bpf_sock_hash_update,
893 .gpl_only = false,
894 .pkt_access = true,
895 .ret_type = RET_INTEGER,
896 .arg1_type = ARG_PTR_TO_CTX,
897 .arg2_type = ARG_CONST_MAP_PTR,
898 .arg3_type = ARG_PTR_TO_MAP_KEY,
899 .arg4_type = ARG_ANYTHING,
900};
901
902BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
903 struct bpf_map *, map, void *, key, u64, flags)
904{
905 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
906
907 if (unlikely(flags & ~(BPF_F_INGRESS)))
908 return SK_DROP;
909 tcb->bpf.flags = flags;
910 tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key);
911 if (!tcb->bpf.sk_redir)
912 return SK_DROP;
913 return SK_PASS;
914}
915
916const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
917 .func = bpf_sk_redirect_hash,
918 .gpl_only = false,
919 .ret_type = RET_INTEGER,
920 .arg1_type = ARG_PTR_TO_CTX,
921 .arg2_type = ARG_CONST_MAP_PTR,
922 .arg3_type = ARG_PTR_TO_MAP_KEY,
923 .arg4_type = ARG_ANYTHING,
924};
925
926BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
927 struct bpf_map *, map, void *, key, u64, flags)
928{
929 if (unlikely(flags & ~(BPF_F_INGRESS)))
930 return SK_DROP;
931 msg->flags = flags;
932 msg->sk_redir = __sock_hash_lookup_elem(map, key);
933 if (!msg->sk_redir)
934 return SK_DROP;
935 return SK_PASS;
936}
937
938const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
939 .func = bpf_msg_redirect_hash,
940 .gpl_only = false,
941 .ret_type = RET_INTEGER,
942 .arg1_type = ARG_PTR_TO_CTX,
943 .arg2_type = ARG_CONST_MAP_PTR,
944 .arg3_type = ARG_PTR_TO_MAP_KEY,
945 .arg4_type = ARG_ANYTHING,
946};
947
948const struct bpf_map_ops sock_hash_ops = {
949 .map_alloc = sock_hash_alloc,
950 .map_free = sock_hash_free,
951 .map_get_next_key = sock_hash_get_next_key,
952 .map_update_elem = sock_hash_update_elem,
953 .map_delete_elem = sock_hash_delete_elem,
954 .map_lookup_elem = sock_map_lookup,
955 .map_release_uref = sock_hash_release_progs,
956 .map_check_btf = map_check_no_btf,
957};
958
959static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
960{
961 switch (map->map_type) {
962 case BPF_MAP_TYPE_SOCKMAP:
963 return &container_of(map, struct bpf_stab, map)->progs;
964 case BPF_MAP_TYPE_SOCKHASH:
965 return &container_of(map, struct bpf_htab, map)->progs;
966 default:
967 break;
968 }
969
970 return NULL;
971}
972
973int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
974 u32 which)
975{
976 struct sk_psock_progs *progs = sock_map_progs(map);
977
978 if (!progs)
979 return -EOPNOTSUPP;
980
981 switch (which) {
982 case BPF_SK_MSG_VERDICT:
983 psock_set_prog(&progs->msg_parser, prog);
984 break;
985 case BPF_SK_SKB_STREAM_PARSER:
986 psock_set_prog(&progs->skb_parser, prog);
987 break;
988 case BPF_SK_SKB_STREAM_VERDICT:
989 psock_set_prog(&progs->skb_verdict, prog);
990 break;
991 default:
992 return -EOPNOTSUPP;
993 }
994
995 return 0;
996}
997
998void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link)
999{
1000 switch (link->map->map_type) {
1001 case BPF_MAP_TYPE_SOCKMAP:
1002 return sock_map_delete_from_link(link->map, sk,
1003 link->link_raw);
1004 case BPF_MAP_TYPE_SOCKHASH:
1005 return sock_hash_delete_from_link(link->map, sk,
1006 link->link_raw);
1007 default:
1008 break;
1009 }
1010}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/bpf.h>
5#include <linux/btf_ids.h>
6#include <linux/filter.h>
7#include <linux/errno.h>
8#include <linux/file.h>
9#include <linux/net.h>
10#include <linux/workqueue.h>
11#include <linux/skmsg.h>
12#include <linux/list.h>
13#include <linux/jhash.h>
14#include <linux/sock_diag.h>
15#include <net/udp.h>
16
17struct bpf_stab {
18 struct bpf_map map;
19 struct sock **sks;
20 struct sk_psock_progs progs;
21 raw_spinlock_t lock;
22};
23
24#define SOCK_CREATE_FLAG_MASK \
25 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
26
27static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
28 struct bpf_prog *old, u32 which);
29static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
30
31static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
32{
33 struct bpf_stab *stab;
34
35 if (!capable(CAP_NET_ADMIN))
36 return ERR_PTR(-EPERM);
37 if (attr->max_entries == 0 ||
38 attr->key_size != 4 ||
39 (attr->value_size != sizeof(u32) &&
40 attr->value_size != sizeof(u64)) ||
41 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
42 return ERR_PTR(-EINVAL);
43
44 stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE);
45 if (!stab)
46 return ERR_PTR(-ENOMEM);
47
48 bpf_map_init_from_attr(&stab->map, attr);
49 raw_spin_lock_init(&stab->lock);
50
51 stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
52 sizeof(struct sock *),
53 stab->map.numa_node);
54 if (!stab->sks) {
55 bpf_map_area_free(stab);
56 return ERR_PTR(-ENOMEM);
57 }
58
59 return &stab->map;
60}
61
62int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
63{
64 u32 ufd = attr->target_fd;
65 struct bpf_map *map;
66 struct fd f;
67 int ret;
68
69 if (attr->attach_flags || attr->replace_bpf_fd)
70 return -EINVAL;
71
72 f = fdget(ufd);
73 map = __bpf_map_get(f);
74 if (IS_ERR(map))
75 return PTR_ERR(map);
76 ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
77 fdput(f);
78 return ret;
79}
80
81int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
82{
83 u32 ufd = attr->target_fd;
84 struct bpf_prog *prog;
85 struct bpf_map *map;
86 struct fd f;
87 int ret;
88
89 if (attr->attach_flags || attr->replace_bpf_fd)
90 return -EINVAL;
91
92 f = fdget(ufd);
93 map = __bpf_map_get(f);
94 if (IS_ERR(map))
95 return PTR_ERR(map);
96
97 prog = bpf_prog_get(attr->attach_bpf_fd);
98 if (IS_ERR(prog)) {
99 ret = PTR_ERR(prog);
100 goto put_map;
101 }
102
103 if (prog->type != ptype) {
104 ret = -EINVAL;
105 goto put_prog;
106 }
107
108 ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
109put_prog:
110 bpf_prog_put(prog);
111put_map:
112 fdput(f);
113 return ret;
114}
115
116static void sock_map_sk_acquire(struct sock *sk)
117 __acquires(&sk->sk_lock.slock)
118{
119 lock_sock(sk);
120 preempt_disable();
121 rcu_read_lock();
122}
123
124static void sock_map_sk_release(struct sock *sk)
125 __releases(&sk->sk_lock.slock)
126{
127 rcu_read_unlock();
128 preempt_enable();
129 release_sock(sk);
130}
131
132static void sock_map_add_link(struct sk_psock *psock,
133 struct sk_psock_link *link,
134 struct bpf_map *map, void *link_raw)
135{
136 link->link_raw = link_raw;
137 link->map = map;
138 spin_lock_bh(&psock->link_lock);
139 list_add_tail(&link->list, &psock->link);
140 spin_unlock_bh(&psock->link_lock);
141}
142
143static void sock_map_del_link(struct sock *sk,
144 struct sk_psock *psock, void *link_raw)
145{
146 bool strp_stop = false, verdict_stop = false;
147 struct sk_psock_link *link, *tmp;
148
149 spin_lock_bh(&psock->link_lock);
150 list_for_each_entry_safe(link, tmp, &psock->link, list) {
151 if (link->link_raw == link_raw) {
152 struct bpf_map *map = link->map;
153 struct bpf_stab *stab = container_of(map, struct bpf_stab,
154 map);
155 if (psock->saved_data_ready && stab->progs.stream_parser)
156 strp_stop = true;
157 if (psock->saved_data_ready && stab->progs.stream_verdict)
158 verdict_stop = true;
159 if (psock->saved_data_ready && stab->progs.skb_verdict)
160 verdict_stop = true;
161 list_del(&link->list);
162 sk_psock_free_link(link);
163 }
164 }
165 spin_unlock_bh(&psock->link_lock);
166 if (strp_stop || verdict_stop) {
167 write_lock_bh(&sk->sk_callback_lock);
168 if (strp_stop)
169 sk_psock_stop_strp(sk, psock);
170 if (verdict_stop)
171 sk_psock_stop_verdict(sk, psock);
172
173 if (psock->psock_update_sk_prot)
174 psock->psock_update_sk_prot(sk, psock, false);
175 write_unlock_bh(&sk->sk_callback_lock);
176 }
177}
178
179static void sock_map_unref(struct sock *sk, void *link_raw)
180{
181 struct sk_psock *psock = sk_psock(sk);
182
183 if (likely(psock)) {
184 sock_map_del_link(sk, psock, link_raw);
185 sk_psock_put(sk, psock);
186 }
187}
188
189static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
190{
191 if (!sk->sk_prot->psock_update_sk_prot)
192 return -EINVAL;
193 psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot;
194 return sk->sk_prot->psock_update_sk_prot(sk, psock, false);
195}
196
197static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
198{
199 struct sk_psock *psock;
200
201 rcu_read_lock();
202 psock = sk_psock(sk);
203 if (psock) {
204 if (sk->sk_prot->close != sock_map_close) {
205 psock = ERR_PTR(-EBUSY);
206 goto out;
207 }
208
209 if (!refcount_inc_not_zero(&psock->refcnt))
210 psock = ERR_PTR(-EBUSY);
211 }
212out:
213 rcu_read_unlock();
214 return psock;
215}
216
217static int sock_map_link(struct bpf_map *map, struct sock *sk)
218{
219 struct sk_psock_progs *progs = sock_map_progs(map);
220 struct bpf_prog *stream_verdict = NULL;
221 struct bpf_prog *stream_parser = NULL;
222 struct bpf_prog *skb_verdict = NULL;
223 struct bpf_prog *msg_parser = NULL;
224 struct sk_psock *psock;
225 int ret;
226
227 stream_verdict = READ_ONCE(progs->stream_verdict);
228 if (stream_verdict) {
229 stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
230 if (IS_ERR(stream_verdict))
231 return PTR_ERR(stream_verdict);
232 }
233
234 stream_parser = READ_ONCE(progs->stream_parser);
235 if (stream_parser) {
236 stream_parser = bpf_prog_inc_not_zero(stream_parser);
237 if (IS_ERR(stream_parser)) {
238 ret = PTR_ERR(stream_parser);
239 goto out_put_stream_verdict;
240 }
241 }
242
243 msg_parser = READ_ONCE(progs->msg_parser);
244 if (msg_parser) {
245 msg_parser = bpf_prog_inc_not_zero(msg_parser);
246 if (IS_ERR(msg_parser)) {
247 ret = PTR_ERR(msg_parser);
248 goto out_put_stream_parser;
249 }
250 }
251
252 skb_verdict = READ_ONCE(progs->skb_verdict);
253 if (skb_verdict) {
254 skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
255 if (IS_ERR(skb_verdict)) {
256 ret = PTR_ERR(skb_verdict);
257 goto out_put_msg_parser;
258 }
259 }
260
261 psock = sock_map_psock_get_checked(sk);
262 if (IS_ERR(psock)) {
263 ret = PTR_ERR(psock);
264 goto out_progs;
265 }
266
267 if (psock) {
268 if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
269 (stream_parser && READ_ONCE(psock->progs.stream_parser)) ||
270 (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
271 (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) ||
272 (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
273 (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
274 sk_psock_put(sk, psock);
275 ret = -EBUSY;
276 goto out_progs;
277 }
278 } else {
279 psock = sk_psock_init(sk, map->numa_node);
280 if (IS_ERR(psock)) {
281 ret = PTR_ERR(psock);
282 goto out_progs;
283 }
284 }
285
286 if (msg_parser)
287 psock_set_prog(&psock->progs.msg_parser, msg_parser);
288 if (stream_parser)
289 psock_set_prog(&psock->progs.stream_parser, stream_parser);
290 if (stream_verdict)
291 psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
292 if (skb_verdict)
293 psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
294
295 /* msg_* and stream_* programs references tracked in psock after this
296 * point. Reference dec and cleanup will occur through psock destructor
297 */
298 ret = sock_map_init_proto(sk, psock);
299 if (ret < 0) {
300 sk_psock_put(sk, psock);
301 goto out;
302 }
303
304 write_lock_bh(&sk->sk_callback_lock);
305 if (stream_parser && stream_verdict && !psock->saved_data_ready) {
306 ret = sk_psock_init_strp(sk, psock);
307 if (ret) {
308 write_unlock_bh(&sk->sk_callback_lock);
309 sk_psock_put(sk, psock);
310 goto out;
311 }
312 sk_psock_start_strp(sk, psock);
313 } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
314 sk_psock_start_verdict(sk,psock);
315 } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
316 sk_psock_start_verdict(sk, psock);
317 }
318 write_unlock_bh(&sk->sk_callback_lock);
319 return 0;
320out_progs:
321 if (skb_verdict)
322 bpf_prog_put(skb_verdict);
323out_put_msg_parser:
324 if (msg_parser)
325 bpf_prog_put(msg_parser);
326out_put_stream_parser:
327 if (stream_parser)
328 bpf_prog_put(stream_parser);
329out_put_stream_verdict:
330 if (stream_verdict)
331 bpf_prog_put(stream_verdict);
332out:
333 return ret;
334}
335
336static void sock_map_free(struct bpf_map *map)
337{
338 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
339 int i;
340
341 /* After the sync no updates or deletes will be in-flight so it
342 * is safe to walk map and remove entries without risking a race
343 * in EEXIST update case.
344 */
345 synchronize_rcu();
346 for (i = 0; i < stab->map.max_entries; i++) {
347 struct sock **psk = &stab->sks[i];
348 struct sock *sk;
349
350 sk = xchg(psk, NULL);
351 if (sk) {
352 sock_hold(sk);
353 lock_sock(sk);
354 rcu_read_lock();
355 sock_map_unref(sk, psk);
356 rcu_read_unlock();
357 release_sock(sk);
358 sock_put(sk);
359 }
360 }
361
362 /* wait for psock readers accessing its map link */
363 synchronize_rcu();
364
365 bpf_map_area_free(stab->sks);
366 bpf_map_area_free(stab);
367}
368
369static void sock_map_release_progs(struct bpf_map *map)
370{
371 psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
372}
373
374static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
375{
376 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
377
378 WARN_ON_ONCE(!rcu_read_lock_held());
379
380 if (unlikely(key >= map->max_entries))
381 return NULL;
382 return READ_ONCE(stab->sks[key]);
383}
384
385static void *sock_map_lookup(struct bpf_map *map, void *key)
386{
387 struct sock *sk;
388
389 sk = __sock_map_lookup_elem(map, *(u32 *)key);
390 if (!sk)
391 return NULL;
392 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
393 return NULL;
394 return sk;
395}
396
397static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
398{
399 struct sock *sk;
400
401 if (map->value_size != sizeof(u64))
402 return ERR_PTR(-ENOSPC);
403
404 sk = __sock_map_lookup_elem(map, *(u32 *)key);
405 if (!sk)
406 return ERR_PTR(-ENOENT);
407
408 __sock_gen_cookie(sk);
409 return &sk->sk_cookie;
410}
411
412static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
413 struct sock **psk)
414{
415 struct sock *sk;
416 int err = 0;
417
418 raw_spin_lock_bh(&stab->lock);
419 sk = *psk;
420 if (!sk_test || sk_test == sk)
421 sk = xchg(psk, NULL);
422
423 if (likely(sk))
424 sock_map_unref(sk, psk);
425 else
426 err = -EINVAL;
427
428 raw_spin_unlock_bh(&stab->lock);
429 return err;
430}
431
432static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
433 void *link_raw)
434{
435 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
436
437 __sock_map_delete(stab, sk, link_raw);
438}
439
440static int sock_map_delete_elem(struct bpf_map *map, void *key)
441{
442 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
443 u32 i = *(u32 *)key;
444 struct sock **psk;
445
446 if (unlikely(i >= map->max_entries))
447 return -EINVAL;
448
449 psk = &stab->sks[i];
450 return __sock_map_delete(stab, NULL, psk);
451}
452
453static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
454{
455 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
456 u32 i = key ? *(u32 *)key : U32_MAX;
457 u32 *key_next = next;
458
459 if (i == stab->map.max_entries - 1)
460 return -ENOENT;
461 if (i >= stab->map.max_entries)
462 *key_next = 0;
463 else
464 *key_next = i + 1;
465 return 0;
466}
467
468static int sock_map_update_common(struct bpf_map *map, u32 idx,
469 struct sock *sk, u64 flags)
470{
471 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
472 struct sk_psock_link *link;
473 struct sk_psock *psock;
474 struct sock *osk;
475 int ret;
476
477 WARN_ON_ONCE(!rcu_read_lock_held());
478 if (unlikely(flags > BPF_EXIST))
479 return -EINVAL;
480 if (unlikely(idx >= map->max_entries))
481 return -E2BIG;
482
483 link = sk_psock_init_link();
484 if (!link)
485 return -ENOMEM;
486
487 ret = sock_map_link(map, sk);
488 if (ret < 0)
489 goto out_free;
490
491 psock = sk_psock(sk);
492 WARN_ON_ONCE(!psock);
493
494 raw_spin_lock_bh(&stab->lock);
495 osk = stab->sks[idx];
496 if (osk && flags == BPF_NOEXIST) {
497 ret = -EEXIST;
498 goto out_unlock;
499 } else if (!osk && flags == BPF_EXIST) {
500 ret = -ENOENT;
501 goto out_unlock;
502 }
503
504 sock_map_add_link(psock, link, map, &stab->sks[idx]);
505 stab->sks[idx] = sk;
506 if (osk)
507 sock_map_unref(osk, &stab->sks[idx]);
508 raw_spin_unlock_bh(&stab->lock);
509 return 0;
510out_unlock:
511 raw_spin_unlock_bh(&stab->lock);
512 if (psock)
513 sk_psock_put(sk, psock);
514out_free:
515 sk_psock_free_link(link);
516 return ret;
517}
518
519static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
520{
521 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
522 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
523 ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
524}
525
526static bool sock_map_redirect_allowed(const struct sock *sk)
527{
528 if (sk_is_tcp(sk))
529 return sk->sk_state != TCP_LISTEN;
530 else
531 return sk->sk_state == TCP_ESTABLISHED;
532}
533
534static bool sock_map_sk_is_suitable(const struct sock *sk)
535{
536 return !!sk->sk_prot->psock_update_sk_prot;
537}
538
539static bool sock_map_sk_state_allowed(const struct sock *sk)
540{
541 if (sk_is_tcp(sk))
542 return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
543 return true;
544}
545
546static int sock_hash_update_common(struct bpf_map *map, void *key,
547 struct sock *sk, u64 flags);
548
549int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
550 u64 flags)
551{
552 struct socket *sock;
553 struct sock *sk;
554 int ret;
555 u64 ufd;
556
557 if (map->value_size == sizeof(u64))
558 ufd = *(u64 *)value;
559 else
560 ufd = *(u32 *)value;
561 if (ufd > S32_MAX)
562 return -EINVAL;
563
564 sock = sockfd_lookup(ufd, &ret);
565 if (!sock)
566 return ret;
567 sk = sock->sk;
568 if (!sk) {
569 ret = -EINVAL;
570 goto out;
571 }
572 if (!sock_map_sk_is_suitable(sk)) {
573 ret = -EOPNOTSUPP;
574 goto out;
575 }
576
577 sock_map_sk_acquire(sk);
578 if (!sock_map_sk_state_allowed(sk))
579 ret = -EOPNOTSUPP;
580 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
581 ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
582 else
583 ret = sock_hash_update_common(map, key, sk, flags);
584 sock_map_sk_release(sk);
585out:
586 sockfd_put(sock);
587 return ret;
588}
589
590static int sock_map_update_elem(struct bpf_map *map, void *key,
591 void *value, u64 flags)
592{
593 struct sock *sk = (struct sock *)value;
594 int ret;
595
596 if (unlikely(!sk || !sk_fullsock(sk)))
597 return -EINVAL;
598
599 if (!sock_map_sk_is_suitable(sk))
600 return -EOPNOTSUPP;
601
602 local_bh_disable();
603 bh_lock_sock(sk);
604 if (!sock_map_sk_state_allowed(sk))
605 ret = -EOPNOTSUPP;
606 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
607 ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
608 else
609 ret = sock_hash_update_common(map, key, sk, flags);
610 bh_unlock_sock(sk);
611 local_bh_enable();
612 return ret;
613}
614
615BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
616 struct bpf_map *, map, void *, key, u64, flags)
617{
618 WARN_ON_ONCE(!rcu_read_lock_held());
619
620 if (likely(sock_map_sk_is_suitable(sops->sk) &&
621 sock_map_op_okay(sops)))
622 return sock_map_update_common(map, *(u32 *)key, sops->sk,
623 flags);
624 return -EOPNOTSUPP;
625}
626
627const struct bpf_func_proto bpf_sock_map_update_proto = {
628 .func = bpf_sock_map_update,
629 .gpl_only = false,
630 .pkt_access = true,
631 .ret_type = RET_INTEGER,
632 .arg1_type = ARG_PTR_TO_CTX,
633 .arg2_type = ARG_CONST_MAP_PTR,
634 .arg3_type = ARG_PTR_TO_MAP_KEY,
635 .arg4_type = ARG_ANYTHING,
636};
637
638BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
639 struct bpf_map *, map, u32, key, u64, flags)
640{
641 struct sock *sk;
642
643 if (unlikely(flags & ~(BPF_F_INGRESS)))
644 return SK_DROP;
645
646 sk = __sock_map_lookup_elem(map, key);
647 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
648 return SK_DROP;
649
650 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
651 return SK_PASS;
652}
653
654const struct bpf_func_proto bpf_sk_redirect_map_proto = {
655 .func = bpf_sk_redirect_map,
656 .gpl_only = false,
657 .ret_type = RET_INTEGER,
658 .arg1_type = ARG_PTR_TO_CTX,
659 .arg2_type = ARG_CONST_MAP_PTR,
660 .arg3_type = ARG_ANYTHING,
661 .arg4_type = ARG_ANYTHING,
662};
663
664BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
665 struct bpf_map *, map, u32, key, u64, flags)
666{
667 struct sock *sk;
668
669 if (unlikely(flags & ~(BPF_F_INGRESS)))
670 return SK_DROP;
671
672 sk = __sock_map_lookup_elem(map, key);
673 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
674 return SK_DROP;
675
676 msg->flags = flags;
677 msg->sk_redir = sk;
678 return SK_PASS;
679}
680
681const struct bpf_func_proto bpf_msg_redirect_map_proto = {
682 .func = bpf_msg_redirect_map,
683 .gpl_only = false,
684 .ret_type = RET_INTEGER,
685 .arg1_type = ARG_PTR_TO_CTX,
686 .arg2_type = ARG_CONST_MAP_PTR,
687 .arg3_type = ARG_ANYTHING,
688 .arg4_type = ARG_ANYTHING,
689};
690
691struct sock_map_seq_info {
692 struct bpf_map *map;
693 struct sock *sk;
694 u32 index;
695};
696
697struct bpf_iter__sockmap {
698 __bpf_md_ptr(struct bpf_iter_meta *, meta);
699 __bpf_md_ptr(struct bpf_map *, map);
700 __bpf_md_ptr(void *, key);
701 __bpf_md_ptr(struct sock *, sk);
702};
703
704DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
705 struct bpf_map *map, void *key,
706 struct sock *sk)
707
708static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
709{
710 if (unlikely(info->index >= info->map->max_entries))
711 return NULL;
712
713 info->sk = __sock_map_lookup_elem(info->map, info->index);
714
715 /* can't return sk directly, since that might be NULL */
716 return info;
717}
718
719static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
720 __acquires(rcu)
721{
722 struct sock_map_seq_info *info = seq->private;
723
724 if (*pos == 0)
725 ++*pos;
726
727 /* pairs with sock_map_seq_stop */
728 rcu_read_lock();
729 return sock_map_seq_lookup_elem(info);
730}
731
732static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
733 __must_hold(rcu)
734{
735 struct sock_map_seq_info *info = seq->private;
736
737 ++*pos;
738 ++info->index;
739
740 return sock_map_seq_lookup_elem(info);
741}
742
743static int sock_map_seq_show(struct seq_file *seq, void *v)
744 __must_hold(rcu)
745{
746 struct sock_map_seq_info *info = seq->private;
747 struct bpf_iter__sockmap ctx = {};
748 struct bpf_iter_meta meta;
749 struct bpf_prog *prog;
750
751 meta.seq = seq;
752 prog = bpf_iter_get_info(&meta, !v);
753 if (!prog)
754 return 0;
755
756 ctx.meta = &meta;
757 ctx.map = info->map;
758 if (v) {
759 ctx.key = &info->index;
760 ctx.sk = info->sk;
761 }
762
763 return bpf_iter_run_prog(prog, &ctx);
764}
765
766static void sock_map_seq_stop(struct seq_file *seq, void *v)
767 __releases(rcu)
768{
769 if (!v)
770 (void)sock_map_seq_show(seq, NULL);
771
772 /* pairs with sock_map_seq_start */
773 rcu_read_unlock();
774}
775
776static const struct seq_operations sock_map_seq_ops = {
777 .start = sock_map_seq_start,
778 .next = sock_map_seq_next,
779 .stop = sock_map_seq_stop,
780 .show = sock_map_seq_show,
781};
782
783static int sock_map_init_seq_private(void *priv_data,
784 struct bpf_iter_aux_info *aux)
785{
786 struct sock_map_seq_info *info = priv_data;
787
788 bpf_map_inc_with_uref(aux->map);
789 info->map = aux->map;
790 return 0;
791}
792
793static void sock_map_fini_seq_private(void *priv_data)
794{
795 struct sock_map_seq_info *info = priv_data;
796
797 bpf_map_put_with_uref(info->map);
798}
799
800static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
801 .seq_ops = &sock_map_seq_ops,
802 .init_seq_private = sock_map_init_seq_private,
803 .fini_seq_private = sock_map_fini_seq_private,
804 .seq_priv_size = sizeof(struct sock_map_seq_info),
805};
806
807BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
808const struct bpf_map_ops sock_map_ops = {
809 .map_meta_equal = bpf_map_meta_equal,
810 .map_alloc = sock_map_alloc,
811 .map_free = sock_map_free,
812 .map_get_next_key = sock_map_get_next_key,
813 .map_lookup_elem_sys_only = sock_map_lookup_sys,
814 .map_update_elem = sock_map_update_elem,
815 .map_delete_elem = sock_map_delete_elem,
816 .map_lookup_elem = sock_map_lookup,
817 .map_release_uref = sock_map_release_progs,
818 .map_check_btf = map_check_no_btf,
819 .map_btf_id = &sock_map_btf_ids[0],
820 .iter_seq_info = &sock_map_iter_seq_info,
821};
822
823struct bpf_shtab_elem {
824 struct rcu_head rcu;
825 u32 hash;
826 struct sock *sk;
827 struct hlist_node node;
828 u8 key[];
829};
830
831struct bpf_shtab_bucket {
832 struct hlist_head head;
833 raw_spinlock_t lock;
834};
835
836struct bpf_shtab {
837 struct bpf_map map;
838 struct bpf_shtab_bucket *buckets;
839 u32 buckets_num;
840 u32 elem_size;
841 struct sk_psock_progs progs;
842 atomic_t count;
843};
844
845static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
846{
847 return jhash(key, len, 0);
848}
849
850static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
851 u32 hash)
852{
853 return &htab->buckets[hash & (htab->buckets_num - 1)];
854}
855
856static struct bpf_shtab_elem *
857sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
858 u32 key_size)
859{
860 struct bpf_shtab_elem *elem;
861
862 hlist_for_each_entry_rcu(elem, head, node) {
863 if (elem->hash == hash &&
864 !memcmp(&elem->key, key, key_size))
865 return elem;
866 }
867
868 return NULL;
869}
870
871static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
872{
873 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
874 u32 key_size = map->key_size, hash;
875 struct bpf_shtab_bucket *bucket;
876 struct bpf_shtab_elem *elem;
877
878 WARN_ON_ONCE(!rcu_read_lock_held());
879
880 hash = sock_hash_bucket_hash(key, key_size);
881 bucket = sock_hash_select_bucket(htab, hash);
882 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
883
884 return elem ? elem->sk : NULL;
885}
886
887static void sock_hash_free_elem(struct bpf_shtab *htab,
888 struct bpf_shtab_elem *elem)
889{
890 atomic_dec(&htab->count);
891 kfree_rcu(elem, rcu);
892}
893
894static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
895 void *link_raw)
896{
897 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
898 struct bpf_shtab_elem *elem_probe, *elem = link_raw;
899 struct bpf_shtab_bucket *bucket;
900
901 WARN_ON_ONCE(!rcu_read_lock_held());
902 bucket = sock_hash_select_bucket(htab, elem->hash);
903
904 /* elem may be deleted in parallel from the map, but access here
905 * is okay since it's going away only after RCU grace period.
906 * However, we need to check whether it's still present.
907 */
908 raw_spin_lock_bh(&bucket->lock);
909 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
910 elem->key, map->key_size);
911 if (elem_probe && elem_probe == elem) {
912 hlist_del_rcu(&elem->node);
913 sock_map_unref(elem->sk, elem);
914 sock_hash_free_elem(htab, elem);
915 }
916 raw_spin_unlock_bh(&bucket->lock);
917}
918
919static int sock_hash_delete_elem(struct bpf_map *map, void *key)
920{
921 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
922 u32 hash, key_size = map->key_size;
923 struct bpf_shtab_bucket *bucket;
924 struct bpf_shtab_elem *elem;
925 int ret = -ENOENT;
926
927 hash = sock_hash_bucket_hash(key, key_size);
928 bucket = sock_hash_select_bucket(htab, hash);
929
930 raw_spin_lock_bh(&bucket->lock);
931 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
932 if (elem) {
933 hlist_del_rcu(&elem->node);
934 sock_map_unref(elem->sk, elem);
935 sock_hash_free_elem(htab, elem);
936 ret = 0;
937 }
938 raw_spin_unlock_bh(&bucket->lock);
939 return ret;
940}
941
942static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
943 void *key, u32 key_size,
944 u32 hash, struct sock *sk,
945 struct bpf_shtab_elem *old)
946{
947 struct bpf_shtab_elem *new;
948
949 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
950 if (!old) {
951 atomic_dec(&htab->count);
952 return ERR_PTR(-E2BIG);
953 }
954 }
955
956 new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
957 GFP_ATOMIC | __GFP_NOWARN,
958 htab->map.numa_node);
959 if (!new) {
960 atomic_dec(&htab->count);
961 return ERR_PTR(-ENOMEM);
962 }
963 memcpy(new->key, key, key_size);
964 new->sk = sk;
965 new->hash = hash;
966 return new;
967}
968
969static int sock_hash_update_common(struct bpf_map *map, void *key,
970 struct sock *sk, u64 flags)
971{
972 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
973 u32 key_size = map->key_size, hash;
974 struct bpf_shtab_elem *elem, *elem_new;
975 struct bpf_shtab_bucket *bucket;
976 struct sk_psock_link *link;
977 struct sk_psock *psock;
978 int ret;
979
980 WARN_ON_ONCE(!rcu_read_lock_held());
981 if (unlikely(flags > BPF_EXIST))
982 return -EINVAL;
983
984 link = sk_psock_init_link();
985 if (!link)
986 return -ENOMEM;
987
988 ret = sock_map_link(map, sk);
989 if (ret < 0)
990 goto out_free;
991
992 psock = sk_psock(sk);
993 WARN_ON_ONCE(!psock);
994
995 hash = sock_hash_bucket_hash(key, key_size);
996 bucket = sock_hash_select_bucket(htab, hash);
997
998 raw_spin_lock_bh(&bucket->lock);
999 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
1000 if (elem && flags == BPF_NOEXIST) {
1001 ret = -EEXIST;
1002 goto out_unlock;
1003 } else if (!elem && flags == BPF_EXIST) {
1004 ret = -ENOENT;
1005 goto out_unlock;
1006 }
1007
1008 elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
1009 if (IS_ERR(elem_new)) {
1010 ret = PTR_ERR(elem_new);
1011 goto out_unlock;
1012 }
1013
1014 sock_map_add_link(psock, link, map, elem_new);
1015 /* Add new element to the head of the list, so that
1016 * concurrent search will find it before old elem.
1017 */
1018 hlist_add_head_rcu(&elem_new->node, &bucket->head);
1019 if (elem) {
1020 hlist_del_rcu(&elem->node);
1021 sock_map_unref(elem->sk, elem);
1022 sock_hash_free_elem(htab, elem);
1023 }
1024 raw_spin_unlock_bh(&bucket->lock);
1025 return 0;
1026out_unlock:
1027 raw_spin_unlock_bh(&bucket->lock);
1028 sk_psock_put(sk, psock);
1029out_free:
1030 sk_psock_free_link(link);
1031 return ret;
1032}
1033
1034static int sock_hash_get_next_key(struct bpf_map *map, void *key,
1035 void *key_next)
1036{
1037 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1038 struct bpf_shtab_elem *elem, *elem_next;
1039 u32 hash, key_size = map->key_size;
1040 struct hlist_head *head;
1041 int i = 0;
1042
1043 if (!key)
1044 goto find_first_elem;
1045 hash = sock_hash_bucket_hash(key, key_size);
1046 head = &sock_hash_select_bucket(htab, hash)->head;
1047 elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
1048 if (!elem)
1049 goto find_first_elem;
1050
1051 elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
1052 struct bpf_shtab_elem, node);
1053 if (elem_next) {
1054 memcpy(key_next, elem_next->key, key_size);
1055 return 0;
1056 }
1057
1058 i = hash & (htab->buckets_num - 1);
1059 i++;
1060find_first_elem:
1061 for (; i < htab->buckets_num; i++) {
1062 head = &sock_hash_select_bucket(htab, i)->head;
1063 elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
1064 struct bpf_shtab_elem, node);
1065 if (elem_next) {
1066 memcpy(key_next, elem_next->key, key_size);
1067 return 0;
1068 }
1069 }
1070
1071 return -ENOENT;
1072}
1073
1074static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1075{
1076 struct bpf_shtab *htab;
1077 int i, err;
1078
1079 if (!capable(CAP_NET_ADMIN))
1080 return ERR_PTR(-EPERM);
1081 if (attr->max_entries == 0 ||
1082 attr->key_size == 0 ||
1083 (attr->value_size != sizeof(u32) &&
1084 attr->value_size != sizeof(u64)) ||
1085 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1086 return ERR_PTR(-EINVAL);
1087 if (attr->key_size > MAX_BPF_STACK)
1088 return ERR_PTR(-E2BIG);
1089
1090 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
1091 if (!htab)
1092 return ERR_PTR(-ENOMEM);
1093
1094 bpf_map_init_from_attr(&htab->map, attr);
1095
1096 htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1097 htab->elem_size = sizeof(struct bpf_shtab_elem) +
1098 round_up(htab->map.key_size, 8);
1099 if (htab->buckets_num == 0 ||
1100 htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1101 err = -EINVAL;
1102 goto free_htab;
1103 }
1104
1105 htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1106 sizeof(struct bpf_shtab_bucket),
1107 htab->map.numa_node);
1108 if (!htab->buckets) {
1109 err = -ENOMEM;
1110 goto free_htab;
1111 }
1112
1113 for (i = 0; i < htab->buckets_num; i++) {
1114 INIT_HLIST_HEAD(&htab->buckets[i].head);
1115 raw_spin_lock_init(&htab->buckets[i].lock);
1116 }
1117
1118 return &htab->map;
1119free_htab:
1120 bpf_map_area_free(htab);
1121 return ERR_PTR(err);
1122}
1123
1124static void sock_hash_free(struct bpf_map *map)
1125{
1126 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1127 struct bpf_shtab_bucket *bucket;
1128 struct hlist_head unlink_list;
1129 struct bpf_shtab_elem *elem;
1130 struct hlist_node *node;
1131 int i;
1132
1133 /* After the sync no updates or deletes will be in-flight so it
1134 * is safe to walk map and remove entries without risking a race
1135 * in EEXIST update case.
1136 */
1137 synchronize_rcu();
1138 for (i = 0; i < htab->buckets_num; i++) {
1139 bucket = sock_hash_select_bucket(htab, i);
1140
1141 /* We are racing with sock_hash_delete_from_link to
1142 * enter the spin-lock critical section. Every socket on
1143 * the list is still linked to sockhash. Since link
1144 * exists, psock exists and holds a ref to socket. That
1145 * lets us to grab a socket ref too.
1146 */
1147 raw_spin_lock_bh(&bucket->lock);
1148 hlist_for_each_entry(elem, &bucket->head, node)
1149 sock_hold(elem->sk);
1150 hlist_move_list(&bucket->head, &unlink_list);
1151 raw_spin_unlock_bh(&bucket->lock);
1152
1153 /* Process removed entries out of atomic context to
1154 * block for socket lock before deleting the psock's
1155 * link to sockhash.
1156 */
1157 hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1158 hlist_del(&elem->node);
1159 lock_sock(elem->sk);
1160 rcu_read_lock();
1161 sock_map_unref(elem->sk, elem);
1162 rcu_read_unlock();
1163 release_sock(elem->sk);
1164 sock_put(elem->sk);
1165 sock_hash_free_elem(htab, elem);
1166 }
1167 }
1168
1169 /* wait for psock readers accessing its map link */
1170 synchronize_rcu();
1171
1172 bpf_map_area_free(htab->buckets);
1173 bpf_map_area_free(htab);
1174}
1175
1176static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1177{
1178 struct sock *sk;
1179
1180 if (map->value_size != sizeof(u64))
1181 return ERR_PTR(-ENOSPC);
1182
1183 sk = __sock_hash_lookup_elem(map, key);
1184 if (!sk)
1185 return ERR_PTR(-ENOENT);
1186
1187 __sock_gen_cookie(sk);
1188 return &sk->sk_cookie;
1189}
1190
1191static void *sock_hash_lookup(struct bpf_map *map, void *key)
1192{
1193 struct sock *sk;
1194
1195 sk = __sock_hash_lookup_elem(map, key);
1196 if (!sk)
1197 return NULL;
1198 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1199 return NULL;
1200 return sk;
1201}
1202
1203static void sock_hash_release_progs(struct bpf_map *map)
1204{
1205 psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1206}
1207
1208BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1209 struct bpf_map *, map, void *, key, u64, flags)
1210{
1211 WARN_ON_ONCE(!rcu_read_lock_held());
1212
1213 if (likely(sock_map_sk_is_suitable(sops->sk) &&
1214 sock_map_op_okay(sops)))
1215 return sock_hash_update_common(map, key, sops->sk, flags);
1216 return -EOPNOTSUPP;
1217}
1218
1219const struct bpf_func_proto bpf_sock_hash_update_proto = {
1220 .func = bpf_sock_hash_update,
1221 .gpl_only = false,
1222 .pkt_access = true,
1223 .ret_type = RET_INTEGER,
1224 .arg1_type = ARG_PTR_TO_CTX,
1225 .arg2_type = ARG_CONST_MAP_PTR,
1226 .arg3_type = ARG_PTR_TO_MAP_KEY,
1227 .arg4_type = ARG_ANYTHING,
1228};
1229
1230BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1231 struct bpf_map *, map, void *, key, u64, flags)
1232{
1233 struct sock *sk;
1234
1235 if (unlikely(flags & ~(BPF_F_INGRESS)))
1236 return SK_DROP;
1237
1238 sk = __sock_hash_lookup_elem(map, key);
1239 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1240 return SK_DROP;
1241
1242 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
1243 return SK_PASS;
1244}
1245
1246const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1247 .func = bpf_sk_redirect_hash,
1248 .gpl_only = false,
1249 .ret_type = RET_INTEGER,
1250 .arg1_type = ARG_PTR_TO_CTX,
1251 .arg2_type = ARG_CONST_MAP_PTR,
1252 .arg3_type = ARG_PTR_TO_MAP_KEY,
1253 .arg4_type = ARG_ANYTHING,
1254};
1255
1256BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1257 struct bpf_map *, map, void *, key, u64, flags)
1258{
1259 struct sock *sk;
1260
1261 if (unlikely(flags & ~(BPF_F_INGRESS)))
1262 return SK_DROP;
1263
1264 sk = __sock_hash_lookup_elem(map, key);
1265 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1266 return SK_DROP;
1267
1268 msg->flags = flags;
1269 msg->sk_redir = sk;
1270 return SK_PASS;
1271}
1272
1273const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1274 .func = bpf_msg_redirect_hash,
1275 .gpl_only = false,
1276 .ret_type = RET_INTEGER,
1277 .arg1_type = ARG_PTR_TO_CTX,
1278 .arg2_type = ARG_CONST_MAP_PTR,
1279 .arg3_type = ARG_PTR_TO_MAP_KEY,
1280 .arg4_type = ARG_ANYTHING,
1281};
1282
1283struct sock_hash_seq_info {
1284 struct bpf_map *map;
1285 struct bpf_shtab *htab;
1286 u32 bucket_id;
1287};
1288
1289static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
1290 struct bpf_shtab_elem *prev_elem)
1291{
1292 const struct bpf_shtab *htab = info->htab;
1293 struct bpf_shtab_bucket *bucket;
1294 struct bpf_shtab_elem *elem;
1295 struct hlist_node *node;
1296
1297 /* try to find next elem in the same bucket */
1298 if (prev_elem) {
1299 node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
1300 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1301 if (elem)
1302 return elem;
1303
1304 /* no more elements, continue in the next bucket */
1305 info->bucket_id++;
1306 }
1307
1308 for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
1309 bucket = &htab->buckets[info->bucket_id];
1310 node = rcu_dereference(hlist_first_rcu(&bucket->head));
1311 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1312 if (elem)
1313 return elem;
1314 }
1315
1316 return NULL;
1317}
1318
1319static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
1320 __acquires(rcu)
1321{
1322 struct sock_hash_seq_info *info = seq->private;
1323
1324 if (*pos == 0)
1325 ++*pos;
1326
1327 /* pairs with sock_hash_seq_stop */
1328 rcu_read_lock();
1329 return sock_hash_seq_find_next(info, NULL);
1330}
1331
1332static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1333 __must_hold(rcu)
1334{
1335 struct sock_hash_seq_info *info = seq->private;
1336
1337 ++*pos;
1338 return sock_hash_seq_find_next(info, v);
1339}
1340
1341static int sock_hash_seq_show(struct seq_file *seq, void *v)
1342 __must_hold(rcu)
1343{
1344 struct sock_hash_seq_info *info = seq->private;
1345 struct bpf_iter__sockmap ctx = {};
1346 struct bpf_shtab_elem *elem = v;
1347 struct bpf_iter_meta meta;
1348 struct bpf_prog *prog;
1349
1350 meta.seq = seq;
1351 prog = bpf_iter_get_info(&meta, !elem);
1352 if (!prog)
1353 return 0;
1354
1355 ctx.meta = &meta;
1356 ctx.map = info->map;
1357 if (elem) {
1358 ctx.key = elem->key;
1359 ctx.sk = elem->sk;
1360 }
1361
1362 return bpf_iter_run_prog(prog, &ctx);
1363}
1364
1365static void sock_hash_seq_stop(struct seq_file *seq, void *v)
1366 __releases(rcu)
1367{
1368 if (!v)
1369 (void)sock_hash_seq_show(seq, NULL);
1370
1371 /* pairs with sock_hash_seq_start */
1372 rcu_read_unlock();
1373}
1374
1375static const struct seq_operations sock_hash_seq_ops = {
1376 .start = sock_hash_seq_start,
1377 .next = sock_hash_seq_next,
1378 .stop = sock_hash_seq_stop,
1379 .show = sock_hash_seq_show,
1380};
1381
1382static int sock_hash_init_seq_private(void *priv_data,
1383 struct bpf_iter_aux_info *aux)
1384{
1385 struct sock_hash_seq_info *info = priv_data;
1386
1387 bpf_map_inc_with_uref(aux->map);
1388 info->map = aux->map;
1389 info->htab = container_of(aux->map, struct bpf_shtab, map);
1390 return 0;
1391}
1392
1393static void sock_hash_fini_seq_private(void *priv_data)
1394{
1395 struct sock_hash_seq_info *info = priv_data;
1396
1397 bpf_map_put_with_uref(info->map);
1398}
1399
1400static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
1401 .seq_ops = &sock_hash_seq_ops,
1402 .init_seq_private = sock_hash_init_seq_private,
1403 .fini_seq_private = sock_hash_fini_seq_private,
1404 .seq_priv_size = sizeof(struct sock_hash_seq_info),
1405};
1406
1407BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
1408const struct bpf_map_ops sock_hash_ops = {
1409 .map_meta_equal = bpf_map_meta_equal,
1410 .map_alloc = sock_hash_alloc,
1411 .map_free = sock_hash_free,
1412 .map_get_next_key = sock_hash_get_next_key,
1413 .map_update_elem = sock_map_update_elem,
1414 .map_delete_elem = sock_hash_delete_elem,
1415 .map_lookup_elem = sock_hash_lookup,
1416 .map_lookup_elem_sys_only = sock_hash_lookup_sys,
1417 .map_release_uref = sock_hash_release_progs,
1418 .map_check_btf = map_check_no_btf,
1419 .map_btf_id = &sock_hash_map_btf_ids[0],
1420 .iter_seq_info = &sock_hash_iter_seq_info,
1421};
1422
1423static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1424{
1425 switch (map->map_type) {
1426 case BPF_MAP_TYPE_SOCKMAP:
1427 return &container_of(map, struct bpf_stab, map)->progs;
1428 case BPF_MAP_TYPE_SOCKHASH:
1429 return &container_of(map, struct bpf_shtab, map)->progs;
1430 default:
1431 break;
1432 }
1433
1434 return NULL;
1435}
1436
1437static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
1438 u32 which)
1439{
1440 struct sk_psock_progs *progs = sock_map_progs(map);
1441
1442 if (!progs)
1443 return -EOPNOTSUPP;
1444
1445 switch (which) {
1446 case BPF_SK_MSG_VERDICT:
1447 *pprog = &progs->msg_parser;
1448 break;
1449#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1450 case BPF_SK_SKB_STREAM_PARSER:
1451 *pprog = &progs->stream_parser;
1452 break;
1453#endif
1454 case BPF_SK_SKB_STREAM_VERDICT:
1455 if (progs->skb_verdict)
1456 return -EBUSY;
1457 *pprog = &progs->stream_verdict;
1458 break;
1459 case BPF_SK_SKB_VERDICT:
1460 if (progs->stream_verdict)
1461 return -EBUSY;
1462 *pprog = &progs->skb_verdict;
1463 break;
1464 default:
1465 return -EOPNOTSUPP;
1466 }
1467
1468 return 0;
1469}
1470
1471static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1472 struct bpf_prog *old, u32 which)
1473{
1474 struct bpf_prog **pprog;
1475 int ret;
1476
1477 ret = sock_map_prog_lookup(map, &pprog, which);
1478 if (ret)
1479 return ret;
1480
1481 if (old)
1482 return psock_replace_prog(pprog, prog, old);
1483
1484 psock_set_prog(pprog, prog);
1485 return 0;
1486}
1487
1488int sock_map_bpf_prog_query(const union bpf_attr *attr,
1489 union bpf_attr __user *uattr)
1490{
1491 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1492 u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
1493 struct bpf_prog **pprog;
1494 struct bpf_prog *prog;
1495 struct bpf_map *map;
1496 struct fd f;
1497 u32 id = 0;
1498 int ret;
1499
1500 if (attr->query.query_flags)
1501 return -EINVAL;
1502
1503 f = fdget(ufd);
1504 map = __bpf_map_get(f);
1505 if (IS_ERR(map))
1506 return PTR_ERR(map);
1507
1508 rcu_read_lock();
1509
1510 ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type);
1511 if (ret)
1512 goto end;
1513
1514 prog = *pprog;
1515 prog_cnt = !prog ? 0 : 1;
1516
1517 if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
1518 goto end;
1519
1520 /* we do not hold the refcnt, the bpf prog may be released
1521 * asynchronously and the id would be set to 0.
1522 */
1523 id = data_race(prog->aux->id);
1524 if (id == 0)
1525 prog_cnt = 0;
1526
1527end:
1528 rcu_read_unlock();
1529
1530 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) ||
1531 (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) ||
1532 copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
1533 ret = -EFAULT;
1534
1535 fdput(f);
1536 return ret;
1537}
1538
1539static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1540{
1541 switch (link->map->map_type) {
1542 case BPF_MAP_TYPE_SOCKMAP:
1543 return sock_map_delete_from_link(link->map, sk,
1544 link->link_raw);
1545 case BPF_MAP_TYPE_SOCKHASH:
1546 return sock_hash_delete_from_link(link->map, sk,
1547 link->link_raw);
1548 default:
1549 break;
1550 }
1551}
1552
1553static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1554{
1555 struct sk_psock_link *link;
1556
1557 while ((link = sk_psock_link_pop(psock))) {
1558 sock_map_unlink(sk, link);
1559 sk_psock_free_link(link);
1560 }
1561}
1562
1563void sock_map_unhash(struct sock *sk)
1564{
1565 void (*saved_unhash)(struct sock *sk);
1566 struct sk_psock *psock;
1567
1568 rcu_read_lock();
1569 psock = sk_psock(sk);
1570 if (unlikely(!psock)) {
1571 rcu_read_unlock();
1572 saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
1573 } else {
1574 saved_unhash = psock->saved_unhash;
1575 sock_map_remove_links(sk, psock);
1576 rcu_read_unlock();
1577 }
1578 if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
1579 return;
1580 if (saved_unhash)
1581 saved_unhash(sk);
1582}
1583EXPORT_SYMBOL_GPL(sock_map_unhash);
1584
1585void sock_map_destroy(struct sock *sk)
1586{
1587 void (*saved_destroy)(struct sock *sk);
1588 struct sk_psock *psock;
1589
1590 rcu_read_lock();
1591 psock = sk_psock_get(sk);
1592 if (unlikely(!psock)) {
1593 rcu_read_unlock();
1594 saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
1595 } else {
1596 saved_destroy = psock->saved_destroy;
1597 sock_map_remove_links(sk, psock);
1598 rcu_read_unlock();
1599 sk_psock_stop(psock);
1600 sk_psock_put(sk, psock);
1601 }
1602 if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
1603 return;
1604 if (saved_destroy)
1605 saved_destroy(sk);
1606}
1607EXPORT_SYMBOL_GPL(sock_map_destroy);
1608
1609void sock_map_close(struct sock *sk, long timeout)
1610{
1611 void (*saved_close)(struct sock *sk, long timeout);
1612 struct sk_psock *psock;
1613
1614 lock_sock(sk);
1615 rcu_read_lock();
1616 psock = sk_psock_get(sk);
1617 if (unlikely(!psock)) {
1618 rcu_read_unlock();
1619 release_sock(sk);
1620 saved_close = READ_ONCE(sk->sk_prot)->close;
1621 } else {
1622 saved_close = psock->saved_close;
1623 sock_map_remove_links(sk, psock);
1624 rcu_read_unlock();
1625 sk_psock_stop(psock);
1626 release_sock(sk);
1627 cancel_work_sync(&psock->work);
1628 sk_psock_put(sk, psock);
1629 }
1630 /* Make sure we do not recurse. This is a bug.
1631 * Leak the socket instead of crashing on a stack overflow.
1632 */
1633 if (WARN_ON_ONCE(saved_close == sock_map_close))
1634 return;
1635 saved_close(sk, timeout);
1636}
1637EXPORT_SYMBOL_GPL(sock_map_close);
1638
1639static int sock_map_iter_attach_target(struct bpf_prog *prog,
1640 union bpf_iter_link_info *linfo,
1641 struct bpf_iter_aux_info *aux)
1642{
1643 struct bpf_map *map;
1644 int err = -EINVAL;
1645
1646 if (!linfo->map.map_fd)
1647 return -EBADF;
1648
1649 map = bpf_map_get_with_uref(linfo->map.map_fd);
1650 if (IS_ERR(map))
1651 return PTR_ERR(map);
1652
1653 if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
1654 map->map_type != BPF_MAP_TYPE_SOCKHASH)
1655 goto put_map;
1656
1657 if (prog->aux->max_rdonly_access > map->key_size) {
1658 err = -EACCES;
1659 goto put_map;
1660 }
1661
1662 aux->map = map;
1663 return 0;
1664
1665put_map:
1666 bpf_map_put_with_uref(map);
1667 return err;
1668}
1669
1670static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
1671{
1672 bpf_map_put_with_uref(aux->map);
1673}
1674
1675static struct bpf_iter_reg sock_map_iter_reg = {
1676 .target = "sockmap",
1677 .attach_target = sock_map_iter_attach_target,
1678 .detach_target = sock_map_iter_detach_target,
1679 .show_fdinfo = bpf_iter_map_show_fdinfo,
1680 .fill_link_info = bpf_iter_map_fill_link_info,
1681 .ctx_arg_info_size = 2,
1682 .ctx_arg_info = {
1683 { offsetof(struct bpf_iter__sockmap, key),
1684 PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
1685 { offsetof(struct bpf_iter__sockmap, sk),
1686 PTR_TO_BTF_ID_OR_NULL },
1687 },
1688};
1689
1690static int __init bpf_sockmap_iter_init(void)
1691{
1692 sock_map_iter_reg.ctx_arg_info[1].btf_id =
1693 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
1694 return bpf_iter_reg_target(&sock_map_iter_reg);
1695}
1696late_initcall(bpf_sockmap_iter_init);