Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/bpf.h>
5#include <linux/btf_ids.h>
6#include <linux/filter.h>
7#include <linux/errno.h>
8#include <linux/file.h>
9#include <linux/net.h>
10#include <linux/workqueue.h>
11#include <linux/skmsg.h>
12#include <linux/list.h>
13#include <linux/jhash.h>
14#include <linux/sock_diag.h>
15#include <net/udp.h>
16
17struct bpf_stab {
18 struct bpf_map map;
19 struct sock **sks;
20 struct sk_psock_progs progs;
21 spinlock_t lock;
22};
23
24#define SOCK_CREATE_FLAG_MASK \
25 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
26
27static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
28 struct bpf_prog *old, u32 which);
29static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
30
31static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
32{
33 struct bpf_stab *stab;
34
35 if (attr->max_entries == 0 ||
36 attr->key_size != 4 ||
37 (attr->value_size != sizeof(u32) &&
38 attr->value_size != sizeof(u64)) ||
39 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
40 return ERR_PTR(-EINVAL);
41
42 stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE);
43 if (!stab)
44 return ERR_PTR(-ENOMEM);
45
46 bpf_map_init_from_attr(&stab->map, attr);
47 spin_lock_init(&stab->lock);
48
49 stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
50 sizeof(struct sock *),
51 stab->map.numa_node);
52 if (!stab->sks) {
53 bpf_map_area_free(stab);
54 return ERR_PTR(-ENOMEM);
55 }
56
57 return &stab->map;
58}
59
60int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
61{
62 u32 ufd = attr->target_fd;
63 struct bpf_map *map;
64 struct fd f;
65 int ret;
66
67 if (attr->attach_flags || attr->replace_bpf_fd)
68 return -EINVAL;
69
70 f = fdget(ufd);
71 map = __bpf_map_get(f);
72 if (IS_ERR(map))
73 return PTR_ERR(map);
74 ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
75 fdput(f);
76 return ret;
77}
78
79int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
80{
81 u32 ufd = attr->target_fd;
82 struct bpf_prog *prog;
83 struct bpf_map *map;
84 struct fd f;
85 int ret;
86
87 if (attr->attach_flags || attr->replace_bpf_fd)
88 return -EINVAL;
89
90 f = fdget(ufd);
91 map = __bpf_map_get(f);
92 if (IS_ERR(map))
93 return PTR_ERR(map);
94
95 prog = bpf_prog_get(attr->attach_bpf_fd);
96 if (IS_ERR(prog)) {
97 ret = PTR_ERR(prog);
98 goto put_map;
99 }
100
101 if (prog->type != ptype) {
102 ret = -EINVAL;
103 goto put_prog;
104 }
105
106 ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
107put_prog:
108 bpf_prog_put(prog);
109put_map:
110 fdput(f);
111 return ret;
112}
113
114static void sock_map_sk_acquire(struct sock *sk)
115 __acquires(&sk->sk_lock.slock)
116{
117 lock_sock(sk);
118 rcu_read_lock();
119}
120
121static void sock_map_sk_release(struct sock *sk)
122 __releases(&sk->sk_lock.slock)
123{
124 rcu_read_unlock();
125 release_sock(sk);
126}
127
128static void sock_map_add_link(struct sk_psock *psock,
129 struct sk_psock_link *link,
130 struct bpf_map *map, void *link_raw)
131{
132 link->link_raw = link_raw;
133 link->map = map;
134 spin_lock_bh(&psock->link_lock);
135 list_add_tail(&link->list, &psock->link);
136 spin_unlock_bh(&psock->link_lock);
137}
138
139static void sock_map_del_link(struct sock *sk,
140 struct sk_psock *psock, void *link_raw)
141{
142 bool strp_stop = false, verdict_stop = false;
143 struct sk_psock_link *link, *tmp;
144
145 spin_lock_bh(&psock->link_lock);
146 list_for_each_entry_safe(link, tmp, &psock->link, list) {
147 if (link->link_raw == link_raw) {
148 struct bpf_map *map = link->map;
149 struct sk_psock_progs *progs = sock_map_progs(map);
150
151 if (psock->saved_data_ready && progs->stream_parser)
152 strp_stop = true;
153 if (psock->saved_data_ready && progs->stream_verdict)
154 verdict_stop = true;
155 if (psock->saved_data_ready && progs->skb_verdict)
156 verdict_stop = true;
157 list_del(&link->list);
158 sk_psock_free_link(link);
159 }
160 }
161 spin_unlock_bh(&psock->link_lock);
162 if (strp_stop || verdict_stop) {
163 write_lock_bh(&sk->sk_callback_lock);
164 if (strp_stop)
165 sk_psock_stop_strp(sk, psock);
166 if (verdict_stop)
167 sk_psock_stop_verdict(sk, psock);
168
169 if (psock->psock_update_sk_prot)
170 psock->psock_update_sk_prot(sk, psock, false);
171 write_unlock_bh(&sk->sk_callback_lock);
172 }
173}
174
175static void sock_map_unref(struct sock *sk, void *link_raw)
176{
177 struct sk_psock *psock = sk_psock(sk);
178
179 if (likely(psock)) {
180 sock_map_del_link(sk, psock, link_raw);
181 sk_psock_put(sk, psock);
182 }
183}
184
185static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
186{
187 if (!sk->sk_prot->psock_update_sk_prot)
188 return -EINVAL;
189 psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot;
190 return sk->sk_prot->psock_update_sk_prot(sk, psock, false);
191}
192
193static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
194{
195 struct sk_psock *psock;
196
197 rcu_read_lock();
198 psock = sk_psock(sk);
199 if (psock) {
200 if (sk->sk_prot->close != sock_map_close) {
201 psock = ERR_PTR(-EBUSY);
202 goto out;
203 }
204
205 if (!refcount_inc_not_zero(&psock->refcnt))
206 psock = ERR_PTR(-EBUSY);
207 }
208out:
209 rcu_read_unlock();
210 return psock;
211}
212
213static int sock_map_link(struct bpf_map *map, struct sock *sk)
214{
215 struct sk_psock_progs *progs = sock_map_progs(map);
216 struct bpf_prog *stream_verdict = NULL;
217 struct bpf_prog *stream_parser = NULL;
218 struct bpf_prog *skb_verdict = NULL;
219 struct bpf_prog *msg_parser = NULL;
220 struct sk_psock *psock;
221 int ret;
222
223 stream_verdict = READ_ONCE(progs->stream_verdict);
224 if (stream_verdict) {
225 stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
226 if (IS_ERR(stream_verdict))
227 return PTR_ERR(stream_verdict);
228 }
229
230 stream_parser = READ_ONCE(progs->stream_parser);
231 if (stream_parser) {
232 stream_parser = bpf_prog_inc_not_zero(stream_parser);
233 if (IS_ERR(stream_parser)) {
234 ret = PTR_ERR(stream_parser);
235 goto out_put_stream_verdict;
236 }
237 }
238
239 msg_parser = READ_ONCE(progs->msg_parser);
240 if (msg_parser) {
241 msg_parser = bpf_prog_inc_not_zero(msg_parser);
242 if (IS_ERR(msg_parser)) {
243 ret = PTR_ERR(msg_parser);
244 goto out_put_stream_parser;
245 }
246 }
247
248 skb_verdict = READ_ONCE(progs->skb_verdict);
249 if (skb_verdict) {
250 skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
251 if (IS_ERR(skb_verdict)) {
252 ret = PTR_ERR(skb_verdict);
253 goto out_put_msg_parser;
254 }
255 }
256
257 psock = sock_map_psock_get_checked(sk);
258 if (IS_ERR(psock)) {
259 ret = PTR_ERR(psock);
260 goto out_progs;
261 }
262
263 if (psock) {
264 if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
265 (stream_parser && READ_ONCE(psock->progs.stream_parser)) ||
266 (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
267 (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) ||
268 (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
269 (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
270 sk_psock_put(sk, psock);
271 ret = -EBUSY;
272 goto out_progs;
273 }
274 } else {
275 psock = sk_psock_init(sk, map->numa_node);
276 if (IS_ERR(psock)) {
277 ret = PTR_ERR(psock);
278 goto out_progs;
279 }
280 }
281
282 if (msg_parser)
283 psock_set_prog(&psock->progs.msg_parser, msg_parser);
284 if (stream_parser)
285 psock_set_prog(&psock->progs.stream_parser, stream_parser);
286 if (stream_verdict)
287 psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
288 if (skb_verdict)
289 psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
290
291 /* msg_* and stream_* programs references tracked in psock after this
292 * point. Reference dec and cleanup will occur through psock destructor
293 */
294 ret = sock_map_init_proto(sk, psock);
295 if (ret < 0) {
296 sk_psock_put(sk, psock);
297 goto out;
298 }
299
300 write_lock_bh(&sk->sk_callback_lock);
301 if (stream_parser && stream_verdict && !psock->saved_data_ready) {
302 ret = sk_psock_init_strp(sk, psock);
303 if (ret) {
304 write_unlock_bh(&sk->sk_callback_lock);
305 sk_psock_put(sk, psock);
306 goto out;
307 }
308 sk_psock_start_strp(sk, psock);
309 } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
310 sk_psock_start_verdict(sk,psock);
311 } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
312 sk_psock_start_verdict(sk, psock);
313 }
314 write_unlock_bh(&sk->sk_callback_lock);
315 return 0;
316out_progs:
317 if (skb_verdict)
318 bpf_prog_put(skb_verdict);
319out_put_msg_parser:
320 if (msg_parser)
321 bpf_prog_put(msg_parser);
322out_put_stream_parser:
323 if (stream_parser)
324 bpf_prog_put(stream_parser);
325out_put_stream_verdict:
326 if (stream_verdict)
327 bpf_prog_put(stream_verdict);
328out:
329 return ret;
330}
331
332static void sock_map_free(struct bpf_map *map)
333{
334 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
335 int i;
336
337 /* After the sync no updates or deletes will be in-flight so it
338 * is safe to walk map and remove entries without risking a race
339 * in EEXIST update case.
340 */
341 synchronize_rcu();
342 for (i = 0; i < stab->map.max_entries; i++) {
343 struct sock **psk = &stab->sks[i];
344 struct sock *sk;
345
346 sk = xchg(psk, NULL);
347 if (sk) {
348 sock_hold(sk);
349 lock_sock(sk);
350 rcu_read_lock();
351 sock_map_unref(sk, psk);
352 rcu_read_unlock();
353 release_sock(sk);
354 sock_put(sk);
355 }
356 }
357
358 /* wait for psock readers accessing its map link */
359 synchronize_rcu();
360
361 bpf_map_area_free(stab->sks);
362 bpf_map_area_free(stab);
363}
364
365static void sock_map_release_progs(struct bpf_map *map)
366{
367 psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
368}
369
370static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
371{
372 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
373
374 WARN_ON_ONCE(!rcu_read_lock_held());
375
376 if (unlikely(key >= map->max_entries))
377 return NULL;
378 return READ_ONCE(stab->sks[key]);
379}
380
381static void *sock_map_lookup(struct bpf_map *map, void *key)
382{
383 struct sock *sk;
384
385 sk = __sock_map_lookup_elem(map, *(u32 *)key);
386 if (!sk)
387 return NULL;
388 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
389 return NULL;
390 return sk;
391}
392
393static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
394{
395 struct sock *sk;
396
397 if (map->value_size != sizeof(u64))
398 return ERR_PTR(-ENOSPC);
399
400 sk = __sock_map_lookup_elem(map, *(u32 *)key);
401 if (!sk)
402 return ERR_PTR(-ENOENT);
403
404 __sock_gen_cookie(sk);
405 return &sk->sk_cookie;
406}
407
408static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
409 struct sock **psk)
410{
411 struct sock *sk;
412 int err = 0;
413
414 if (irqs_disabled())
415 return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
416
417 spin_lock_bh(&stab->lock);
418 sk = *psk;
419 if (!sk_test || sk_test == sk)
420 sk = xchg(psk, NULL);
421
422 if (likely(sk))
423 sock_map_unref(sk, psk);
424 else
425 err = -EINVAL;
426
427 spin_unlock_bh(&stab->lock);
428 return err;
429}
430
431static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
432 void *link_raw)
433{
434 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
435
436 __sock_map_delete(stab, sk, link_raw);
437}
438
439static long sock_map_delete_elem(struct bpf_map *map, void *key)
440{
441 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
442 u32 i = *(u32 *)key;
443 struct sock **psk;
444
445 if (unlikely(i >= map->max_entries))
446 return -EINVAL;
447
448 psk = &stab->sks[i];
449 return __sock_map_delete(stab, NULL, psk);
450}
451
452static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
453{
454 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
455 u32 i = key ? *(u32 *)key : U32_MAX;
456 u32 *key_next = next;
457
458 if (i == stab->map.max_entries - 1)
459 return -ENOENT;
460 if (i >= stab->map.max_entries)
461 *key_next = 0;
462 else
463 *key_next = i + 1;
464 return 0;
465}
466
467static int sock_map_update_common(struct bpf_map *map, u32 idx,
468 struct sock *sk, u64 flags)
469{
470 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
471 struct sk_psock_link *link;
472 struct sk_psock *psock;
473 struct sock *osk;
474 int ret;
475
476 WARN_ON_ONCE(!rcu_read_lock_held());
477 if (unlikely(flags > BPF_EXIST))
478 return -EINVAL;
479 if (unlikely(idx >= map->max_entries))
480 return -E2BIG;
481
482 link = sk_psock_init_link();
483 if (!link)
484 return -ENOMEM;
485
486 ret = sock_map_link(map, sk);
487 if (ret < 0)
488 goto out_free;
489
490 psock = sk_psock(sk);
491 WARN_ON_ONCE(!psock);
492
493 spin_lock_bh(&stab->lock);
494 osk = stab->sks[idx];
495 if (osk && flags == BPF_NOEXIST) {
496 ret = -EEXIST;
497 goto out_unlock;
498 } else if (!osk && flags == BPF_EXIST) {
499 ret = -ENOENT;
500 goto out_unlock;
501 }
502
503 sock_map_add_link(psock, link, map, &stab->sks[idx]);
504 stab->sks[idx] = sk;
505 if (osk)
506 sock_map_unref(osk, &stab->sks[idx]);
507 spin_unlock_bh(&stab->lock);
508 return 0;
509out_unlock:
510 spin_unlock_bh(&stab->lock);
511 if (psock)
512 sk_psock_put(sk, psock);
513out_free:
514 sk_psock_free_link(link);
515 return ret;
516}
517
518static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
519{
520 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
521 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
522 ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
523}
524
525static bool sock_map_redirect_allowed(const struct sock *sk)
526{
527 if (sk_is_tcp(sk))
528 return sk->sk_state != TCP_LISTEN;
529 else
530 return sk->sk_state == TCP_ESTABLISHED;
531}
532
533static bool sock_map_sk_is_suitable(const struct sock *sk)
534{
535 return !!sk->sk_prot->psock_update_sk_prot;
536}
537
538static bool sock_map_sk_state_allowed(const struct sock *sk)
539{
540 if (sk_is_tcp(sk))
541 return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
542 if (sk_is_stream_unix(sk))
543 return (1 << sk->sk_state) & TCPF_ESTABLISHED;
544 return true;
545}
546
547static int sock_hash_update_common(struct bpf_map *map, void *key,
548 struct sock *sk, u64 flags);
549
550int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
551 u64 flags)
552{
553 struct socket *sock;
554 struct sock *sk;
555 int ret;
556 u64 ufd;
557
558 if (map->value_size == sizeof(u64))
559 ufd = *(u64 *)value;
560 else
561 ufd = *(u32 *)value;
562 if (ufd > S32_MAX)
563 return -EINVAL;
564
565 sock = sockfd_lookup(ufd, &ret);
566 if (!sock)
567 return ret;
568 sk = sock->sk;
569 if (!sk) {
570 ret = -EINVAL;
571 goto out;
572 }
573 if (!sock_map_sk_is_suitable(sk)) {
574 ret = -EOPNOTSUPP;
575 goto out;
576 }
577
578 sock_map_sk_acquire(sk);
579 if (!sock_map_sk_state_allowed(sk))
580 ret = -EOPNOTSUPP;
581 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
582 ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
583 else
584 ret = sock_hash_update_common(map, key, sk, flags);
585 sock_map_sk_release(sk);
586out:
587 sockfd_put(sock);
588 return ret;
589}
590
591static long sock_map_update_elem(struct bpf_map *map, void *key,
592 void *value, u64 flags)
593{
594 struct sock *sk = (struct sock *)value;
595 int ret;
596
597 if (unlikely(!sk || !sk_fullsock(sk)))
598 return -EINVAL;
599
600 if (!sock_map_sk_is_suitable(sk))
601 return -EOPNOTSUPP;
602
603 local_bh_disable();
604 bh_lock_sock(sk);
605 if (!sock_map_sk_state_allowed(sk))
606 ret = -EOPNOTSUPP;
607 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
608 ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
609 else
610 ret = sock_hash_update_common(map, key, sk, flags);
611 bh_unlock_sock(sk);
612 local_bh_enable();
613 return ret;
614}
615
616BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
617 struct bpf_map *, map, void *, key, u64, flags)
618{
619 WARN_ON_ONCE(!rcu_read_lock_held());
620
621 if (likely(sock_map_sk_is_suitable(sops->sk) &&
622 sock_map_op_okay(sops)))
623 return sock_map_update_common(map, *(u32 *)key, sops->sk,
624 flags);
625 return -EOPNOTSUPP;
626}
627
628const struct bpf_func_proto bpf_sock_map_update_proto = {
629 .func = bpf_sock_map_update,
630 .gpl_only = false,
631 .pkt_access = true,
632 .ret_type = RET_INTEGER,
633 .arg1_type = ARG_PTR_TO_CTX,
634 .arg2_type = ARG_CONST_MAP_PTR,
635 .arg3_type = ARG_PTR_TO_MAP_KEY,
636 .arg4_type = ARG_ANYTHING,
637};
638
639BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
640 struct bpf_map *, map, u32, key, u64, flags)
641{
642 struct sock *sk;
643
644 if (unlikely(flags & ~(BPF_F_INGRESS)))
645 return SK_DROP;
646
647 sk = __sock_map_lookup_elem(map, key);
648 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
649 return SK_DROP;
650
651 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
652 return SK_PASS;
653}
654
655const struct bpf_func_proto bpf_sk_redirect_map_proto = {
656 .func = bpf_sk_redirect_map,
657 .gpl_only = false,
658 .ret_type = RET_INTEGER,
659 .arg1_type = ARG_PTR_TO_CTX,
660 .arg2_type = ARG_CONST_MAP_PTR,
661 .arg3_type = ARG_ANYTHING,
662 .arg4_type = ARG_ANYTHING,
663};
664
665BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
666 struct bpf_map *, map, u32, key, u64, flags)
667{
668 struct sock *sk;
669
670 if (unlikely(flags & ~(BPF_F_INGRESS)))
671 return SK_DROP;
672
673 sk = __sock_map_lookup_elem(map, key);
674 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
675 return SK_DROP;
676 if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
677 return SK_DROP;
678
679 msg->flags = flags;
680 msg->sk_redir = sk;
681 return SK_PASS;
682}
683
684const struct bpf_func_proto bpf_msg_redirect_map_proto = {
685 .func = bpf_msg_redirect_map,
686 .gpl_only = false,
687 .ret_type = RET_INTEGER,
688 .arg1_type = ARG_PTR_TO_CTX,
689 .arg2_type = ARG_CONST_MAP_PTR,
690 .arg3_type = ARG_ANYTHING,
691 .arg4_type = ARG_ANYTHING,
692};
693
694struct sock_map_seq_info {
695 struct bpf_map *map;
696 struct sock *sk;
697 u32 index;
698};
699
700struct bpf_iter__sockmap {
701 __bpf_md_ptr(struct bpf_iter_meta *, meta);
702 __bpf_md_ptr(struct bpf_map *, map);
703 __bpf_md_ptr(void *, key);
704 __bpf_md_ptr(struct sock *, sk);
705};
706
707DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
708 struct bpf_map *map, void *key,
709 struct sock *sk)
710
711static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
712{
713 if (unlikely(info->index >= info->map->max_entries))
714 return NULL;
715
716 info->sk = __sock_map_lookup_elem(info->map, info->index);
717
718 /* can't return sk directly, since that might be NULL */
719 return info;
720}
721
722static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
723 __acquires(rcu)
724{
725 struct sock_map_seq_info *info = seq->private;
726
727 if (*pos == 0)
728 ++*pos;
729
730 /* pairs with sock_map_seq_stop */
731 rcu_read_lock();
732 return sock_map_seq_lookup_elem(info);
733}
734
735static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
736 __must_hold(rcu)
737{
738 struct sock_map_seq_info *info = seq->private;
739
740 ++*pos;
741 ++info->index;
742
743 return sock_map_seq_lookup_elem(info);
744}
745
746static int sock_map_seq_show(struct seq_file *seq, void *v)
747 __must_hold(rcu)
748{
749 struct sock_map_seq_info *info = seq->private;
750 struct bpf_iter__sockmap ctx = {};
751 struct bpf_iter_meta meta;
752 struct bpf_prog *prog;
753
754 meta.seq = seq;
755 prog = bpf_iter_get_info(&meta, !v);
756 if (!prog)
757 return 0;
758
759 ctx.meta = &meta;
760 ctx.map = info->map;
761 if (v) {
762 ctx.key = &info->index;
763 ctx.sk = info->sk;
764 }
765
766 return bpf_iter_run_prog(prog, &ctx);
767}
768
769static void sock_map_seq_stop(struct seq_file *seq, void *v)
770 __releases(rcu)
771{
772 if (!v)
773 (void)sock_map_seq_show(seq, NULL);
774
775 /* pairs with sock_map_seq_start */
776 rcu_read_unlock();
777}
778
779static const struct seq_operations sock_map_seq_ops = {
780 .start = sock_map_seq_start,
781 .next = sock_map_seq_next,
782 .stop = sock_map_seq_stop,
783 .show = sock_map_seq_show,
784};
785
786static int sock_map_init_seq_private(void *priv_data,
787 struct bpf_iter_aux_info *aux)
788{
789 struct sock_map_seq_info *info = priv_data;
790
791 bpf_map_inc_with_uref(aux->map);
792 info->map = aux->map;
793 return 0;
794}
795
796static void sock_map_fini_seq_private(void *priv_data)
797{
798 struct sock_map_seq_info *info = priv_data;
799
800 bpf_map_put_with_uref(info->map);
801}
802
803static u64 sock_map_mem_usage(const struct bpf_map *map)
804{
805 u64 usage = sizeof(struct bpf_stab);
806
807 usage += (u64)map->max_entries * sizeof(struct sock *);
808 return usage;
809}
810
811static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
812 .seq_ops = &sock_map_seq_ops,
813 .init_seq_private = sock_map_init_seq_private,
814 .fini_seq_private = sock_map_fini_seq_private,
815 .seq_priv_size = sizeof(struct sock_map_seq_info),
816};
817
818BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
819const struct bpf_map_ops sock_map_ops = {
820 .map_meta_equal = bpf_map_meta_equal,
821 .map_alloc = sock_map_alloc,
822 .map_free = sock_map_free,
823 .map_get_next_key = sock_map_get_next_key,
824 .map_lookup_elem_sys_only = sock_map_lookup_sys,
825 .map_update_elem = sock_map_update_elem,
826 .map_delete_elem = sock_map_delete_elem,
827 .map_lookup_elem = sock_map_lookup,
828 .map_release_uref = sock_map_release_progs,
829 .map_check_btf = map_check_no_btf,
830 .map_mem_usage = sock_map_mem_usage,
831 .map_btf_id = &sock_map_btf_ids[0],
832 .iter_seq_info = &sock_map_iter_seq_info,
833};
834
835struct bpf_shtab_elem {
836 struct rcu_head rcu;
837 u32 hash;
838 struct sock *sk;
839 struct hlist_node node;
840 u8 key[];
841};
842
843struct bpf_shtab_bucket {
844 struct hlist_head head;
845 spinlock_t lock;
846};
847
848struct bpf_shtab {
849 struct bpf_map map;
850 struct bpf_shtab_bucket *buckets;
851 u32 buckets_num;
852 u32 elem_size;
853 struct sk_psock_progs progs;
854 atomic_t count;
855};
856
857static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
858{
859 return jhash(key, len, 0);
860}
861
862static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
863 u32 hash)
864{
865 return &htab->buckets[hash & (htab->buckets_num - 1)];
866}
867
868static struct bpf_shtab_elem *
869sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
870 u32 key_size)
871{
872 struct bpf_shtab_elem *elem;
873
874 hlist_for_each_entry_rcu(elem, head, node) {
875 if (elem->hash == hash &&
876 !memcmp(&elem->key, key, key_size))
877 return elem;
878 }
879
880 return NULL;
881}
882
883static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
884{
885 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
886 u32 key_size = map->key_size, hash;
887 struct bpf_shtab_bucket *bucket;
888 struct bpf_shtab_elem *elem;
889
890 WARN_ON_ONCE(!rcu_read_lock_held());
891
892 hash = sock_hash_bucket_hash(key, key_size);
893 bucket = sock_hash_select_bucket(htab, hash);
894 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
895
896 return elem ? elem->sk : NULL;
897}
898
899static void sock_hash_free_elem(struct bpf_shtab *htab,
900 struct bpf_shtab_elem *elem)
901{
902 atomic_dec(&htab->count);
903 kfree_rcu(elem, rcu);
904}
905
906static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
907 void *link_raw)
908{
909 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
910 struct bpf_shtab_elem *elem_probe, *elem = link_raw;
911 struct bpf_shtab_bucket *bucket;
912
913 WARN_ON_ONCE(!rcu_read_lock_held());
914 bucket = sock_hash_select_bucket(htab, elem->hash);
915
916 /* elem may be deleted in parallel from the map, but access here
917 * is okay since it's going away only after RCU grace period.
918 * However, we need to check whether it's still present.
919 */
920 spin_lock_bh(&bucket->lock);
921 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
922 elem->key, map->key_size);
923 if (elem_probe && elem_probe == elem) {
924 hlist_del_rcu(&elem->node);
925 sock_map_unref(elem->sk, elem);
926 sock_hash_free_elem(htab, elem);
927 }
928 spin_unlock_bh(&bucket->lock);
929}
930
931static long sock_hash_delete_elem(struct bpf_map *map, void *key)
932{
933 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
934 u32 hash, key_size = map->key_size;
935 struct bpf_shtab_bucket *bucket;
936 struct bpf_shtab_elem *elem;
937 int ret = -ENOENT;
938
939 if (irqs_disabled())
940 return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
941
942 hash = sock_hash_bucket_hash(key, key_size);
943 bucket = sock_hash_select_bucket(htab, hash);
944
945 spin_lock_bh(&bucket->lock);
946 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
947 if (elem) {
948 hlist_del_rcu(&elem->node);
949 sock_map_unref(elem->sk, elem);
950 sock_hash_free_elem(htab, elem);
951 ret = 0;
952 }
953 spin_unlock_bh(&bucket->lock);
954 return ret;
955}
956
957static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
958 void *key, u32 key_size,
959 u32 hash, struct sock *sk,
960 struct bpf_shtab_elem *old)
961{
962 struct bpf_shtab_elem *new;
963
964 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
965 if (!old) {
966 atomic_dec(&htab->count);
967 return ERR_PTR(-E2BIG);
968 }
969 }
970
971 new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
972 GFP_ATOMIC | __GFP_NOWARN,
973 htab->map.numa_node);
974 if (!new) {
975 atomic_dec(&htab->count);
976 return ERR_PTR(-ENOMEM);
977 }
978 memcpy(new->key, key, key_size);
979 new->sk = sk;
980 new->hash = hash;
981 return new;
982}
983
984static int sock_hash_update_common(struct bpf_map *map, void *key,
985 struct sock *sk, u64 flags)
986{
987 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
988 u32 key_size = map->key_size, hash;
989 struct bpf_shtab_elem *elem, *elem_new;
990 struct bpf_shtab_bucket *bucket;
991 struct sk_psock_link *link;
992 struct sk_psock *psock;
993 int ret;
994
995 WARN_ON_ONCE(!rcu_read_lock_held());
996 if (unlikely(flags > BPF_EXIST))
997 return -EINVAL;
998
999 link = sk_psock_init_link();
1000 if (!link)
1001 return -ENOMEM;
1002
1003 ret = sock_map_link(map, sk);
1004 if (ret < 0)
1005 goto out_free;
1006
1007 psock = sk_psock(sk);
1008 WARN_ON_ONCE(!psock);
1009
1010 hash = sock_hash_bucket_hash(key, key_size);
1011 bucket = sock_hash_select_bucket(htab, hash);
1012
1013 spin_lock_bh(&bucket->lock);
1014 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
1015 if (elem && flags == BPF_NOEXIST) {
1016 ret = -EEXIST;
1017 goto out_unlock;
1018 } else if (!elem && flags == BPF_EXIST) {
1019 ret = -ENOENT;
1020 goto out_unlock;
1021 }
1022
1023 elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
1024 if (IS_ERR(elem_new)) {
1025 ret = PTR_ERR(elem_new);
1026 goto out_unlock;
1027 }
1028
1029 sock_map_add_link(psock, link, map, elem_new);
1030 /* Add new element to the head of the list, so that
1031 * concurrent search will find it before old elem.
1032 */
1033 hlist_add_head_rcu(&elem_new->node, &bucket->head);
1034 if (elem) {
1035 hlist_del_rcu(&elem->node);
1036 sock_map_unref(elem->sk, elem);
1037 sock_hash_free_elem(htab, elem);
1038 }
1039 spin_unlock_bh(&bucket->lock);
1040 return 0;
1041out_unlock:
1042 spin_unlock_bh(&bucket->lock);
1043 sk_psock_put(sk, psock);
1044out_free:
1045 sk_psock_free_link(link);
1046 return ret;
1047}
1048
1049static int sock_hash_get_next_key(struct bpf_map *map, void *key,
1050 void *key_next)
1051{
1052 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1053 struct bpf_shtab_elem *elem, *elem_next;
1054 u32 hash, key_size = map->key_size;
1055 struct hlist_head *head;
1056 int i = 0;
1057
1058 if (!key)
1059 goto find_first_elem;
1060 hash = sock_hash_bucket_hash(key, key_size);
1061 head = &sock_hash_select_bucket(htab, hash)->head;
1062 elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
1063 if (!elem)
1064 goto find_first_elem;
1065
1066 elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
1067 struct bpf_shtab_elem, node);
1068 if (elem_next) {
1069 memcpy(key_next, elem_next->key, key_size);
1070 return 0;
1071 }
1072
1073 i = hash & (htab->buckets_num - 1);
1074 i++;
1075find_first_elem:
1076 for (; i < htab->buckets_num; i++) {
1077 head = &sock_hash_select_bucket(htab, i)->head;
1078 elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
1079 struct bpf_shtab_elem, node);
1080 if (elem_next) {
1081 memcpy(key_next, elem_next->key, key_size);
1082 return 0;
1083 }
1084 }
1085
1086 return -ENOENT;
1087}
1088
1089static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1090{
1091 struct bpf_shtab *htab;
1092 int i, err;
1093
1094 if (attr->max_entries == 0 ||
1095 attr->key_size == 0 ||
1096 (attr->value_size != sizeof(u32) &&
1097 attr->value_size != sizeof(u64)) ||
1098 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1099 return ERR_PTR(-EINVAL);
1100 if (attr->key_size > MAX_BPF_STACK)
1101 return ERR_PTR(-E2BIG);
1102
1103 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
1104 if (!htab)
1105 return ERR_PTR(-ENOMEM);
1106
1107 bpf_map_init_from_attr(&htab->map, attr);
1108
1109 htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1110 htab->elem_size = sizeof(struct bpf_shtab_elem) +
1111 round_up(htab->map.key_size, 8);
1112 if (htab->buckets_num == 0 ||
1113 htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1114 err = -EINVAL;
1115 goto free_htab;
1116 }
1117
1118 htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1119 sizeof(struct bpf_shtab_bucket),
1120 htab->map.numa_node);
1121 if (!htab->buckets) {
1122 err = -ENOMEM;
1123 goto free_htab;
1124 }
1125
1126 for (i = 0; i < htab->buckets_num; i++) {
1127 INIT_HLIST_HEAD(&htab->buckets[i].head);
1128 spin_lock_init(&htab->buckets[i].lock);
1129 }
1130
1131 return &htab->map;
1132free_htab:
1133 bpf_map_area_free(htab);
1134 return ERR_PTR(err);
1135}
1136
1137static void sock_hash_free(struct bpf_map *map)
1138{
1139 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1140 struct bpf_shtab_bucket *bucket;
1141 struct hlist_head unlink_list;
1142 struct bpf_shtab_elem *elem;
1143 struct hlist_node *node;
1144 int i;
1145
1146 /* After the sync no updates or deletes will be in-flight so it
1147 * is safe to walk map and remove entries without risking a race
1148 * in EEXIST update case.
1149 */
1150 synchronize_rcu();
1151 for (i = 0; i < htab->buckets_num; i++) {
1152 bucket = sock_hash_select_bucket(htab, i);
1153
1154 /* We are racing with sock_hash_delete_from_link to
1155 * enter the spin-lock critical section. Every socket on
1156 * the list is still linked to sockhash. Since link
1157 * exists, psock exists and holds a ref to socket. That
1158 * lets us to grab a socket ref too.
1159 */
1160 spin_lock_bh(&bucket->lock);
1161 hlist_for_each_entry(elem, &bucket->head, node)
1162 sock_hold(elem->sk);
1163 hlist_move_list(&bucket->head, &unlink_list);
1164 spin_unlock_bh(&bucket->lock);
1165
1166 /* Process removed entries out of atomic context to
1167 * block for socket lock before deleting the psock's
1168 * link to sockhash.
1169 */
1170 hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1171 hlist_del(&elem->node);
1172 lock_sock(elem->sk);
1173 rcu_read_lock();
1174 sock_map_unref(elem->sk, elem);
1175 rcu_read_unlock();
1176 release_sock(elem->sk);
1177 sock_put(elem->sk);
1178 sock_hash_free_elem(htab, elem);
1179 }
1180 }
1181
1182 /* wait for psock readers accessing its map link */
1183 synchronize_rcu();
1184
1185 bpf_map_area_free(htab->buckets);
1186 bpf_map_area_free(htab);
1187}
1188
1189static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1190{
1191 struct sock *sk;
1192
1193 if (map->value_size != sizeof(u64))
1194 return ERR_PTR(-ENOSPC);
1195
1196 sk = __sock_hash_lookup_elem(map, key);
1197 if (!sk)
1198 return ERR_PTR(-ENOENT);
1199
1200 __sock_gen_cookie(sk);
1201 return &sk->sk_cookie;
1202}
1203
1204static void *sock_hash_lookup(struct bpf_map *map, void *key)
1205{
1206 struct sock *sk;
1207
1208 sk = __sock_hash_lookup_elem(map, key);
1209 if (!sk)
1210 return NULL;
1211 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1212 return NULL;
1213 return sk;
1214}
1215
1216static void sock_hash_release_progs(struct bpf_map *map)
1217{
1218 psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1219}
1220
1221BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1222 struct bpf_map *, map, void *, key, u64, flags)
1223{
1224 WARN_ON_ONCE(!rcu_read_lock_held());
1225
1226 if (likely(sock_map_sk_is_suitable(sops->sk) &&
1227 sock_map_op_okay(sops)))
1228 return sock_hash_update_common(map, key, sops->sk, flags);
1229 return -EOPNOTSUPP;
1230}
1231
1232const struct bpf_func_proto bpf_sock_hash_update_proto = {
1233 .func = bpf_sock_hash_update,
1234 .gpl_only = false,
1235 .pkt_access = true,
1236 .ret_type = RET_INTEGER,
1237 .arg1_type = ARG_PTR_TO_CTX,
1238 .arg2_type = ARG_CONST_MAP_PTR,
1239 .arg3_type = ARG_PTR_TO_MAP_KEY,
1240 .arg4_type = ARG_ANYTHING,
1241};
1242
1243BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1244 struct bpf_map *, map, void *, key, u64, flags)
1245{
1246 struct sock *sk;
1247
1248 if (unlikely(flags & ~(BPF_F_INGRESS)))
1249 return SK_DROP;
1250
1251 sk = __sock_hash_lookup_elem(map, key);
1252 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1253 return SK_DROP;
1254
1255 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
1256 return SK_PASS;
1257}
1258
1259const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1260 .func = bpf_sk_redirect_hash,
1261 .gpl_only = false,
1262 .ret_type = RET_INTEGER,
1263 .arg1_type = ARG_PTR_TO_CTX,
1264 .arg2_type = ARG_CONST_MAP_PTR,
1265 .arg3_type = ARG_PTR_TO_MAP_KEY,
1266 .arg4_type = ARG_ANYTHING,
1267};
1268
1269BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1270 struct bpf_map *, map, void *, key, u64, flags)
1271{
1272 struct sock *sk;
1273
1274 if (unlikely(flags & ~(BPF_F_INGRESS)))
1275 return SK_DROP;
1276
1277 sk = __sock_hash_lookup_elem(map, key);
1278 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1279 return SK_DROP;
1280 if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
1281 return SK_DROP;
1282
1283 msg->flags = flags;
1284 msg->sk_redir = sk;
1285 return SK_PASS;
1286}
1287
1288const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1289 .func = bpf_msg_redirect_hash,
1290 .gpl_only = false,
1291 .ret_type = RET_INTEGER,
1292 .arg1_type = ARG_PTR_TO_CTX,
1293 .arg2_type = ARG_CONST_MAP_PTR,
1294 .arg3_type = ARG_PTR_TO_MAP_KEY,
1295 .arg4_type = ARG_ANYTHING,
1296};
1297
1298struct sock_hash_seq_info {
1299 struct bpf_map *map;
1300 struct bpf_shtab *htab;
1301 u32 bucket_id;
1302};
1303
1304static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
1305 struct bpf_shtab_elem *prev_elem)
1306{
1307 const struct bpf_shtab *htab = info->htab;
1308 struct bpf_shtab_bucket *bucket;
1309 struct bpf_shtab_elem *elem;
1310 struct hlist_node *node;
1311
1312 /* try to find next elem in the same bucket */
1313 if (prev_elem) {
1314 node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
1315 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1316 if (elem)
1317 return elem;
1318
1319 /* no more elements, continue in the next bucket */
1320 info->bucket_id++;
1321 }
1322
1323 for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
1324 bucket = &htab->buckets[info->bucket_id];
1325 node = rcu_dereference(hlist_first_rcu(&bucket->head));
1326 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1327 if (elem)
1328 return elem;
1329 }
1330
1331 return NULL;
1332}
1333
1334static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
1335 __acquires(rcu)
1336{
1337 struct sock_hash_seq_info *info = seq->private;
1338
1339 if (*pos == 0)
1340 ++*pos;
1341
1342 /* pairs with sock_hash_seq_stop */
1343 rcu_read_lock();
1344 return sock_hash_seq_find_next(info, NULL);
1345}
1346
1347static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1348 __must_hold(rcu)
1349{
1350 struct sock_hash_seq_info *info = seq->private;
1351
1352 ++*pos;
1353 return sock_hash_seq_find_next(info, v);
1354}
1355
1356static int sock_hash_seq_show(struct seq_file *seq, void *v)
1357 __must_hold(rcu)
1358{
1359 struct sock_hash_seq_info *info = seq->private;
1360 struct bpf_iter__sockmap ctx = {};
1361 struct bpf_shtab_elem *elem = v;
1362 struct bpf_iter_meta meta;
1363 struct bpf_prog *prog;
1364
1365 meta.seq = seq;
1366 prog = bpf_iter_get_info(&meta, !elem);
1367 if (!prog)
1368 return 0;
1369
1370 ctx.meta = &meta;
1371 ctx.map = info->map;
1372 if (elem) {
1373 ctx.key = elem->key;
1374 ctx.sk = elem->sk;
1375 }
1376
1377 return bpf_iter_run_prog(prog, &ctx);
1378}
1379
1380static void sock_hash_seq_stop(struct seq_file *seq, void *v)
1381 __releases(rcu)
1382{
1383 if (!v)
1384 (void)sock_hash_seq_show(seq, NULL);
1385
1386 /* pairs with sock_hash_seq_start */
1387 rcu_read_unlock();
1388}
1389
1390static const struct seq_operations sock_hash_seq_ops = {
1391 .start = sock_hash_seq_start,
1392 .next = sock_hash_seq_next,
1393 .stop = sock_hash_seq_stop,
1394 .show = sock_hash_seq_show,
1395};
1396
1397static int sock_hash_init_seq_private(void *priv_data,
1398 struct bpf_iter_aux_info *aux)
1399{
1400 struct sock_hash_seq_info *info = priv_data;
1401
1402 bpf_map_inc_with_uref(aux->map);
1403 info->map = aux->map;
1404 info->htab = container_of(aux->map, struct bpf_shtab, map);
1405 return 0;
1406}
1407
1408static void sock_hash_fini_seq_private(void *priv_data)
1409{
1410 struct sock_hash_seq_info *info = priv_data;
1411
1412 bpf_map_put_with_uref(info->map);
1413}
1414
1415static u64 sock_hash_mem_usage(const struct bpf_map *map)
1416{
1417 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1418 u64 usage = sizeof(*htab);
1419
1420 usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
1421 usage += atomic_read(&htab->count) * (u64)htab->elem_size;
1422 return usage;
1423}
1424
1425static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
1426 .seq_ops = &sock_hash_seq_ops,
1427 .init_seq_private = sock_hash_init_seq_private,
1428 .fini_seq_private = sock_hash_fini_seq_private,
1429 .seq_priv_size = sizeof(struct sock_hash_seq_info),
1430};
1431
1432BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
1433const struct bpf_map_ops sock_hash_ops = {
1434 .map_meta_equal = bpf_map_meta_equal,
1435 .map_alloc = sock_hash_alloc,
1436 .map_free = sock_hash_free,
1437 .map_get_next_key = sock_hash_get_next_key,
1438 .map_update_elem = sock_map_update_elem,
1439 .map_delete_elem = sock_hash_delete_elem,
1440 .map_lookup_elem = sock_hash_lookup,
1441 .map_lookup_elem_sys_only = sock_hash_lookup_sys,
1442 .map_release_uref = sock_hash_release_progs,
1443 .map_check_btf = map_check_no_btf,
1444 .map_mem_usage = sock_hash_mem_usage,
1445 .map_btf_id = &sock_hash_map_btf_ids[0],
1446 .iter_seq_info = &sock_hash_iter_seq_info,
1447};
1448
1449static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1450{
1451 switch (map->map_type) {
1452 case BPF_MAP_TYPE_SOCKMAP:
1453 return &container_of(map, struct bpf_stab, map)->progs;
1454 case BPF_MAP_TYPE_SOCKHASH:
1455 return &container_of(map, struct bpf_shtab, map)->progs;
1456 default:
1457 break;
1458 }
1459
1460 return NULL;
1461}
1462
1463static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
1464 u32 which)
1465{
1466 struct sk_psock_progs *progs = sock_map_progs(map);
1467
1468 if (!progs)
1469 return -EOPNOTSUPP;
1470
1471 switch (which) {
1472 case BPF_SK_MSG_VERDICT:
1473 *pprog = &progs->msg_parser;
1474 break;
1475#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1476 case BPF_SK_SKB_STREAM_PARSER:
1477 *pprog = &progs->stream_parser;
1478 break;
1479#endif
1480 case BPF_SK_SKB_STREAM_VERDICT:
1481 if (progs->skb_verdict)
1482 return -EBUSY;
1483 *pprog = &progs->stream_verdict;
1484 break;
1485 case BPF_SK_SKB_VERDICT:
1486 if (progs->stream_verdict)
1487 return -EBUSY;
1488 *pprog = &progs->skb_verdict;
1489 break;
1490 default:
1491 return -EOPNOTSUPP;
1492 }
1493
1494 return 0;
1495}
1496
1497static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1498 struct bpf_prog *old, u32 which)
1499{
1500 struct bpf_prog **pprog;
1501 int ret;
1502
1503 ret = sock_map_prog_lookup(map, &pprog, which);
1504 if (ret)
1505 return ret;
1506
1507 if (old)
1508 return psock_replace_prog(pprog, prog, old);
1509
1510 psock_set_prog(pprog, prog);
1511 return 0;
1512}
1513
1514int sock_map_bpf_prog_query(const union bpf_attr *attr,
1515 union bpf_attr __user *uattr)
1516{
1517 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1518 u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
1519 struct bpf_prog **pprog;
1520 struct bpf_prog *prog;
1521 struct bpf_map *map;
1522 struct fd f;
1523 u32 id = 0;
1524 int ret;
1525
1526 if (attr->query.query_flags)
1527 return -EINVAL;
1528
1529 f = fdget(ufd);
1530 map = __bpf_map_get(f);
1531 if (IS_ERR(map))
1532 return PTR_ERR(map);
1533
1534 rcu_read_lock();
1535
1536 ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type);
1537 if (ret)
1538 goto end;
1539
1540 prog = *pprog;
1541 prog_cnt = !prog ? 0 : 1;
1542
1543 if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
1544 goto end;
1545
1546 /* we do not hold the refcnt, the bpf prog may be released
1547 * asynchronously and the id would be set to 0.
1548 */
1549 id = data_race(prog->aux->id);
1550 if (id == 0)
1551 prog_cnt = 0;
1552
1553end:
1554 rcu_read_unlock();
1555
1556 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) ||
1557 (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) ||
1558 copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
1559 ret = -EFAULT;
1560
1561 fdput(f);
1562 return ret;
1563}
1564
1565static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1566{
1567 switch (link->map->map_type) {
1568 case BPF_MAP_TYPE_SOCKMAP:
1569 return sock_map_delete_from_link(link->map, sk,
1570 link->link_raw);
1571 case BPF_MAP_TYPE_SOCKHASH:
1572 return sock_hash_delete_from_link(link->map, sk,
1573 link->link_raw);
1574 default:
1575 break;
1576 }
1577}
1578
1579static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1580{
1581 struct sk_psock_link *link;
1582
1583 while ((link = sk_psock_link_pop(psock))) {
1584 sock_map_unlink(sk, link);
1585 sk_psock_free_link(link);
1586 }
1587}
1588
1589void sock_map_unhash(struct sock *sk)
1590{
1591 void (*saved_unhash)(struct sock *sk);
1592 struct sk_psock *psock;
1593
1594 rcu_read_lock();
1595 psock = sk_psock(sk);
1596 if (unlikely(!psock)) {
1597 rcu_read_unlock();
1598 saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
1599 } else {
1600 saved_unhash = psock->saved_unhash;
1601 sock_map_remove_links(sk, psock);
1602 rcu_read_unlock();
1603 }
1604 if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
1605 return;
1606 if (saved_unhash)
1607 saved_unhash(sk);
1608}
1609EXPORT_SYMBOL_GPL(sock_map_unhash);
1610
1611void sock_map_destroy(struct sock *sk)
1612{
1613 void (*saved_destroy)(struct sock *sk);
1614 struct sk_psock *psock;
1615
1616 rcu_read_lock();
1617 psock = sk_psock_get(sk);
1618 if (unlikely(!psock)) {
1619 rcu_read_unlock();
1620 saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
1621 } else {
1622 saved_destroy = psock->saved_destroy;
1623 sock_map_remove_links(sk, psock);
1624 rcu_read_unlock();
1625 sk_psock_stop(psock);
1626 sk_psock_put(sk, psock);
1627 }
1628 if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
1629 return;
1630 if (saved_destroy)
1631 saved_destroy(sk);
1632}
1633EXPORT_SYMBOL_GPL(sock_map_destroy);
1634
1635void sock_map_close(struct sock *sk, long timeout)
1636{
1637 void (*saved_close)(struct sock *sk, long timeout);
1638 struct sk_psock *psock;
1639
1640 lock_sock(sk);
1641 rcu_read_lock();
1642 psock = sk_psock_get(sk);
1643 if (unlikely(!psock)) {
1644 rcu_read_unlock();
1645 release_sock(sk);
1646 saved_close = READ_ONCE(sk->sk_prot)->close;
1647 } else {
1648 saved_close = psock->saved_close;
1649 sock_map_remove_links(sk, psock);
1650 rcu_read_unlock();
1651 sk_psock_stop(psock);
1652 release_sock(sk);
1653 cancel_delayed_work_sync(&psock->work);
1654 sk_psock_put(sk, psock);
1655 }
1656
1657 /* Make sure we do not recurse. This is a bug.
1658 * Leak the socket instead of crashing on a stack overflow.
1659 */
1660 if (WARN_ON_ONCE(saved_close == sock_map_close))
1661 return;
1662 saved_close(sk, timeout);
1663}
1664EXPORT_SYMBOL_GPL(sock_map_close);
1665
1666static int sock_map_iter_attach_target(struct bpf_prog *prog,
1667 union bpf_iter_link_info *linfo,
1668 struct bpf_iter_aux_info *aux)
1669{
1670 struct bpf_map *map;
1671 int err = -EINVAL;
1672
1673 if (!linfo->map.map_fd)
1674 return -EBADF;
1675
1676 map = bpf_map_get_with_uref(linfo->map.map_fd);
1677 if (IS_ERR(map))
1678 return PTR_ERR(map);
1679
1680 if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
1681 map->map_type != BPF_MAP_TYPE_SOCKHASH)
1682 goto put_map;
1683
1684 if (prog->aux->max_rdonly_access > map->key_size) {
1685 err = -EACCES;
1686 goto put_map;
1687 }
1688
1689 aux->map = map;
1690 return 0;
1691
1692put_map:
1693 bpf_map_put_with_uref(map);
1694 return err;
1695}
1696
1697static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
1698{
1699 bpf_map_put_with_uref(aux->map);
1700}
1701
1702static struct bpf_iter_reg sock_map_iter_reg = {
1703 .target = "sockmap",
1704 .attach_target = sock_map_iter_attach_target,
1705 .detach_target = sock_map_iter_detach_target,
1706 .show_fdinfo = bpf_iter_map_show_fdinfo,
1707 .fill_link_info = bpf_iter_map_fill_link_info,
1708 .ctx_arg_info_size = 2,
1709 .ctx_arg_info = {
1710 { offsetof(struct bpf_iter__sockmap, key),
1711 PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
1712 { offsetof(struct bpf_iter__sockmap, sk),
1713 PTR_TO_BTF_ID_OR_NULL },
1714 },
1715};
1716
1717static int __init bpf_sockmap_iter_init(void)
1718{
1719 sock_map_iter_reg.ctx_arg_info[1].btf_id =
1720 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
1721 return bpf_iter_reg_target(&sock_map_iter_reg);
1722}
1723late_initcall(bpf_sockmap_iter_init);
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/bpf.h>
5#include <linux/btf_ids.h>
6#include <linux/filter.h>
7#include <linux/errno.h>
8#include <linux/file.h>
9#include <linux/net.h>
10#include <linux/workqueue.h>
11#include <linux/skmsg.h>
12#include <linux/list.h>
13#include <linux/jhash.h>
14#include <linux/sock_diag.h>
15#include <net/udp.h>
16
17struct bpf_stab {
18 struct bpf_map map;
19 struct sock **sks;
20 struct sk_psock_progs progs;
21 spinlock_t lock;
22};
23
24#define SOCK_CREATE_FLAG_MASK \
25 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
26
27/* This mutex is used to
28 * - protect race between prog/link attach/detach and link prog update, and
29 * - protect race between releasing and accessing map in bpf_link.
30 * A single global mutex lock is used since it is expected contention is low.
31 */
32static DEFINE_MUTEX(sockmap_mutex);
33
34static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
35 struct bpf_prog *old, struct bpf_link *link,
36 u32 which);
37static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
38
39static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
40{
41 struct bpf_stab *stab;
42
43 if (attr->max_entries == 0 ||
44 attr->key_size != 4 ||
45 (attr->value_size != sizeof(u32) &&
46 attr->value_size != sizeof(u64)) ||
47 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
48 return ERR_PTR(-EINVAL);
49
50 stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE);
51 if (!stab)
52 return ERR_PTR(-ENOMEM);
53
54 bpf_map_init_from_attr(&stab->map, attr);
55 spin_lock_init(&stab->lock);
56
57 stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
58 sizeof(struct sock *),
59 stab->map.numa_node);
60 if (!stab->sks) {
61 bpf_map_area_free(stab);
62 return ERR_PTR(-ENOMEM);
63 }
64
65 return &stab->map;
66}
67
68int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
69{
70 struct bpf_map *map;
71 int ret;
72
73 if (attr->attach_flags || attr->replace_bpf_fd)
74 return -EINVAL;
75
76 CLASS(fd, f)(attr->target_fd);
77 map = __bpf_map_get(f);
78 if (IS_ERR(map))
79 return PTR_ERR(map);
80 mutex_lock(&sockmap_mutex);
81 ret = sock_map_prog_update(map, prog, NULL, NULL, attr->attach_type);
82 mutex_unlock(&sockmap_mutex);
83 return ret;
84}
85
86int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
87{
88 struct bpf_prog *prog;
89 struct bpf_map *map;
90 int ret;
91
92 if (attr->attach_flags || attr->replace_bpf_fd)
93 return -EINVAL;
94
95 CLASS(fd, f)(attr->target_fd);
96 map = __bpf_map_get(f);
97 if (IS_ERR(map))
98 return PTR_ERR(map);
99
100 prog = bpf_prog_get(attr->attach_bpf_fd);
101 if (IS_ERR(prog))
102 return PTR_ERR(prog);
103
104 if (prog->type != ptype) {
105 ret = -EINVAL;
106 goto put_prog;
107 }
108
109 mutex_lock(&sockmap_mutex);
110 ret = sock_map_prog_update(map, NULL, prog, NULL, attr->attach_type);
111 mutex_unlock(&sockmap_mutex);
112put_prog:
113 bpf_prog_put(prog);
114 return ret;
115}
116
117static void sock_map_sk_acquire(struct sock *sk)
118 __acquires(&sk->sk_lock.slock)
119{
120 lock_sock(sk);
121 rcu_read_lock();
122}
123
124static void sock_map_sk_release(struct sock *sk)
125 __releases(&sk->sk_lock.slock)
126{
127 rcu_read_unlock();
128 release_sock(sk);
129}
130
131static void sock_map_add_link(struct sk_psock *psock,
132 struct sk_psock_link *link,
133 struct bpf_map *map, void *link_raw)
134{
135 link->link_raw = link_raw;
136 link->map = map;
137 spin_lock_bh(&psock->link_lock);
138 list_add_tail(&link->list, &psock->link);
139 spin_unlock_bh(&psock->link_lock);
140}
141
142static void sock_map_del_link(struct sock *sk,
143 struct sk_psock *psock, void *link_raw)
144{
145 bool strp_stop = false, verdict_stop = false;
146 struct sk_psock_link *link, *tmp;
147
148 spin_lock_bh(&psock->link_lock);
149 list_for_each_entry_safe(link, tmp, &psock->link, list) {
150 if (link->link_raw == link_raw) {
151 struct bpf_map *map = link->map;
152 struct sk_psock_progs *progs = sock_map_progs(map);
153
154 if (psock->saved_data_ready && progs->stream_parser)
155 strp_stop = true;
156 if (psock->saved_data_ready && progs->stream_verdict)
157 verdict_stop = true;
158 if (psock->saved_data_ready && progs->skb_verdict)
159 verdict_stop = true;
160 list_del(&link->list);
161 sk_psock_free_link(link);
162 break;
163 }
164 }
165 spin_unlock_bh(&psock->link_lock);
166 if (strp_stop || verdict_stop) {
167 write_lock_bh(&sk->sk_callback_lock);
168 if (strp_stop)
169 sk_psock_stop_strp(sk, psock);
170 if (verdict_stop)
171 sk_psock_stop_verdict(sk, psock);
172
173 if (psock->psock_update_sk_prot)
174 psock->psock_update_sk_prot(sk, psock, false);
175 write_unlock_bh(&sk->sk_callback_lock);
176 }
177}
178
179static void sock_map_unref(struct sock *sk, void *link_raw)
180{
181 struct sk_psock *psock = sk_psock(sk);
182
183 if (likely(psock)) {
184 sock_map_del_link(sk, psock, link_raw);
185 sk_psock_put(sk, psock);
186 }
187}
188
189static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
190{
191 if (!sk->sk_prot->psock_update_sk_prot)
192 return -EINVAL;
193 psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot;
194 return sk->sk_prot->psock_update_sk_prot(sk, psock, false);
195}
196
197static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
198{
199 struct sk_psock *psock;
200
201 rcu_read_lock();
202 psock = sk_psock(sk);
203 if (psock) {
204 if (sk->sk_prot->close != sock_map_close) {
205 psock = ERR_PTR(-EBUSY);
206 goto out;
207 }
208
209 if (!refcount_inc_not_zero(&psock->refcnt))
210 psock = ERR_PTR(-EBUSY);
211 }
212out:
213 rcu_read_unlock();
214 return psock;
215}
216
217static int sock_map_link(struct bpf_map *map, struct sock *sk)
218{
219 struct sk_psock_progs *progs = sock_map_progs(map);
220 struct bpf_prog *stream_verdict = NULL;
221 struct bpf_prog *stream_parser = NULL;
222 struct bpf_prog *skb_verdict = NULL;
223 struct bpf_prog *msg_parser = NULL;
224 struct sk_psock *psock;
225 int ret;
226
227 stream_verdict = READ_ONCE(progs->stream_verdict);
228 if (stream_verdict) {
229 stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
230 if (IS_ERR(stream_verdict))
231 return PTR_ERR(stream_verdict);
232 }
233
234 stream_parser = READ_ONCE(progs->stream_parser);
235 if (stream_parser) {
236 stream_parser = bpf_prog_inc_not_zero(stream_parser);
237 if (IS_ERR(stream_parser)) {
238 ret = PTR_ERR(stream_parser);
239 goto out_put_stream_verdict;
240 }
241 }
242
243 msg_parser = READ_ONCE(progs->msg_parser);
244 if (msg_parser) {
245 msg_parser = bpf_prog_inc_not_zero(msg_parser);
246 if (IS_ERR(msg_parser)) {
247 ret = PTR_ERR(msg_parser);
248 goto out_put_stream_parser;
249 }
250 }
251
252 skb_verdict = READ_ONCE(progs->skb_verdict);
253 if (skb_verdict) {
254 skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
255 if (IS_ERR(skb_verdict)) {
256 ret = PTR_ERR(skb_verdict);
257 goto out_put_msg_parser;
258 }
259 }
260
261 psock = sock_map_psock_get_checked(sk);
262 if (IS_ERR(psock)) {
263 ret = PTR_ERR(psock);
264 goto out_progs;
265 }
266
267 if (psock) {
268 if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
269 (stream_parser && READ_ONCE(psock->progs.stream_parser)) ||
270 (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
271 (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) ||
272 (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
273 (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
274 sk_psock_put(sk, psock);
275 ret = -EBUSY;
276 goto out_progs;
277 }
278 } else {
279 psock = sk_psock_init(sk, map->numa_node);
280 if (IS_ERR(psock)) {
281 ret = PTR_ERR(psock);
282 goto out_progs;
283 }
284 }
285
286 if (msg_parser)
287 psock_set_prog(&psock->progs.msg_parser, msg_parser);
288 if (stream_parser)
289 psock_set_prog(&psock->progs.stream_parser, stream_parser);
290 if (stream_verdict)
291 psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
292 if (skb_verdict)
293 psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
294
295 /* msg_* and stream_* programs references tracked in psock after this
296 * point. Reference dec and cleanup will occur through psock destructor
297 */
298 ret = sock_map_init_proto(sk, psock);
299 if (ret < 0) {
300 sk_psock_put(sk, psock);
301 goto out;
302 }
303
304 write_lock_bh(&sk->sk_callback_lock);
305 if (stream_parser && stream_verdict && !psock->saved_data_ready) {
306 if (sk_is_tcp(sk))
307 ret = sk_psock_init_strp(sk, psock);
308 else
309 ret = -EOPNOTSUPP;
310 if (ret) {
311 write_unlock_bh(&sk->sk_callback_lock);
312 sk_psock_put(sk, psock);
313 goto out;
314 }
315 sk_psock_start_strp(sk, psock);
316 } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
317 sk_psock_start_verdict(sk,psock);
318 } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
319 sk_psock_start_verdict(sk, psock);
320 }
321 write_unlock_bh(&sk->sk_callback_lock);
322 return 0;
323out_progs:
324 if (skb_verdict)
325 bpf_prog_put(skb_verdict);
326out_put_msg_parser:
327 if (msg_parser)
328 bpf_prog_put(msg_parser);
329out_put_stream_parser:
330 if (stream_parser)
331 bpf_prog_put(stream_parser);
332out_put_stream_verdict:
333 if (stream_verdict)
334 bpf_prog_put(stream_verdict);
335out:
336 return ret;
337}
338
339static void sock_map_free(struct bpf_map *map)
340{
341 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
342 int i;
343
344 /* After the sync no updates or deletes will be in-flight so it
345 * is safe to walk map and remove entries without risking a race
346 * in EEXIST update case.
347 */
348 synchronize_rcu();
349 for (i = 0; i < stab->map.max_entries; i++) {
350 struct sock **psk = &stab->sks[i];
351 struct sock *sk;
352
353 sk = xchg(psk, NULL);
354 if (sk) {
355 sock_hold(sk);
356 lock_sock(sk);
357 rcu_read_lock();
358 sock_map_unref(sk, psk);
359 rcu_read_unlock();
360 release_sock(sk);
361 sock_put(sk);
362 }
363 }
364
365 /* wait for psock readers accessing its map link */
366 synchronize_rcu();
367
368 bpf_map_area_free(stab->sks);
369 bpf_map_area_free(stab);
370}
371
372static void sock_map_release_progs(struct bpf_map *map)
373{
374 psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
375}
376
377static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
378{
379 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
380
381 WARN_ON_ONCE(!rcu_read_lock_held());
382
383 if (unlikely(key >= map->max_entries))
384 return NULL;
385 return READ_ONCE(stab->sks[key]);
386}
387
388static void *sock_map_lookup(struct bpf_map *map, void *key)
389{
390 struct sock *sk;
391
392 sk = __sock_map_lookup_elem(map, *(u32 *)key);
393 if (!sk)
394 return NULL;
395 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
396 return NULL;
397 return sk;
398}
399
400static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
401{
402 struct sock *sk;
403
404 if (map->value_size != sizeof(u64))
405 return ERR_PTR(-ENOSPC);
406
407 sk = __sock_map_lookup_elem(map, *(u32 *)key);
408 if (!sk)
409 return ERR_PTR(-ENOENT);
410
411 __sock_gen_cookie(sk);
412 return &sk->sk_cookie;
413}
414
415static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
416 struct sock **psk)
417{
418 struct sock *sk = NULL;
419 int err = 0;
420
421 spin_lock_bh(&stab->lock);
422 if (!sk_test || sk_test == *psk)
423 sk = xchg(psk, NULL);
424
425 if (likely(sk))
426 sock_map_unref(sk, psk);
427 else
428 err = -EINVAL;
429
430 spin_unlock_bh(&stab->lock);
431 return err;
432}
433
434static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
435 void *link_raw)
436{
437 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
438
439 __sock_map_delete(stab, sk, link_raw);
440}
441
442static long sock_map_delete_elem(struct bpf_map *map, void *key)
443{
444 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
445 u32 i = *(u32 *)key;
446 struct sock **psk;
447
448 if (unlikely(i >= map->max_entries))
449 return -EINVAL;
450
451 psk = &stab->sks[i];
452 return __sock_map_delete(stab, NULL, psk);
453}
454
455static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
456{
457 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
458 u32 i = key ? *(u32 *)key : U32_MAX;
459 u32 *key_next = next;
460
461 if (i == stab->map.max_entries - 1)
462 return -ENOENT;
463 if (i >= stab->map.max_entries)
464 *key_next = 0;
465 else
466 *key_next = i + 1;
467 return 0;
468}
469
470static int sock_map_update_common(struct bpf_map *map, u32 idx,
471 struct sock *sk, u64 flags)
472{
473 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
474 struct sk_psock_link *link;
475 struct sk_psock *psock;
476 struct sock *osk;
477 int ret;
478
479 WARN_ON_ONCE(!rcu_read_lock_held());
480 if (unlikely(flags > BPF_EXIST))
481 return -EINVAL;
482 if (unlikely(idx >= map->max_entries))
483 return -E2BIG;
484
485 link = sk_psock_init_link();
486 if (!link)
487 return -ENOMEM;
488
489 ret = sock_map_link(map, sk);
490 if (ret < 0)
491 goto out_free;
492
493 psock = sk_psock(sk);
494 WARN_ON_ONCE(!psock);
495
496 spin_lock_bh(&stab->lock);
497 osk = stab->sks[idx];
498 if (osk && flags == BPF_NOEXIST) {
499 ret = -EEXIST;
500 goto out_unlock;
501 } else if (!osk && flags == BPF_EXIST) {
502 ret = -ENOENT;
503 goto out_unlock;
504 }
505
506 sock_map_add_link(psock, link, map, &stab->sks[idx]);
507 stab->sks[idx] = sk;
508 if (osk)
509 sock_map_unref(osk, &stab->sks[idx]);
510 spin_unlock_bh(&stab->lock);
511 return 0;
512out_unlock:
513 spin_unlock_bh(&stab->lock);
514 if (psock)
515 sk_psock_put(sk, psock);
516out_free:
517 sk_psock_free_link(link);
518 return ret;
519}
520
521static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
522{
523 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
524 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
525 ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
526}
527
528static bool sock_map_redirect_allowed(const struct sock *sk)
529{
530 if (sk_is_tcp(sk))
531 return sk->sk_state != TCP_LISTEN;
532 else
533 return sk->sk_state == TCP_ESTABLISHED;
534}
535
536static bool sock_map_sk_is_suitable(const struct sock *sk)
537{
538 return !!sk->sk_prot->psock_update_sk_prot;
539}
540
541static bool sock_map_sk_state_allowed(const struct sock *sk)
542{
543 if (sk_is_tcp(sk))
544 return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
545 if (sk_is_stream_unix(sk))
546 return (1 << sk->sk_state) & TCPF_ESTABLISHED;
547 if (sk_is_vsock(sk) &&
548 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET))
549 return (1 << sk->sk_state) & TCPF_ESTABLISHED;
550 return true;
551}
552
553static int sock_hash_update_common(struct bpf_map *map, void *key,
554 struct sock *sk, u64 flags);
555
556int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
557 u64 flags)
558{
559 struct socket *sock;
560 struct sock *sk;
561 int ret;
562 u64 ufd;
563
564 if (map->value_size == sizeof(u64))
565 ufd = *(u64 *)value;
566 else
567 ufd = *(u32 *)value;
568 if (ufd > S32_MAX)
569 return -EINVAL;
570
571 sock = sockfd_lookup(ufd, &ret);
572 if (!sock)
573 return ret;
574 sk = sock->sk;
575 if (!sk) {
576 ret = -EINVAL;
577 goto out;
578 }
579 if (!sock_map_sk_is_suitable(sk)) {
580 ret = -EOPNOTSUPP;
581 goto out;
582 }
583
584 sock_map_sk_acquire(sk);
585 if (!sock_map_sk_state_allowed(sk))
586 ret = -EOPNOTSUPP;
587 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
588 ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
589 else
590 ret = sock_hash_update_common(map, key, sk, flags);
591 sock_map_sk_release(sk);
592out:
593 sockfd_put(sock);
594 return ret;
595}
596
597static long sock_map_update_elem(struct bpf_map *map, void *key,
598 void *value, u64 flags)
599{
600 struct sock *sk = (struct sock *)value;
601 int ret;
602
603 if (unlikely(!sk || !sk_fullsock(sk)))
604 return -EINVAL;
605
606 if (!sock_map_sk_is_suitable(sk))
607 return -EOPNOTSUPP;
608
609 local_bh_disable();
610 bh_lock_sock(sk);
611 if (!sock_map_sk_state_allowed(sk))
612 ret = -EOPNOTSUPP;
613 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
614 ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
615 else
616 ret = sock_hash_update_common(map, key, sk, flags);
617 bh_unlock_sock(sk);
618 local_bh_enable();
619 return ret;
620}
621
622BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
623 struct bpf_map *, map, void *, key, u64, flags)
624{
625 WARN_ON_ONCE(!rcu_read_lock_held());
626
627 if (likely(sock_map_sk_is_suitable(sops->sk) &&
628 sock_map_op_okay(sops)))
629 return sock_map_update_common(map, *(u32 *)key, sops->sk,
630 flags);
631 return -EOPNOTSUPP;
632}
633
634const struct bpf_func_proto bpf_sock_map_update_proto = {
635 .func = bpf_sock_map_update,
636 .gpl_only = false,
637 .pkt_access = true,
638 .ret_type = RET_INTEGER,
639 .arg1_type = ARG_PTR_TO_CTX,
640 .arg2_type = ARG_CONST_MAP_PTR,
641 .arg3_type = ARG_PTR_TO_MAP_KEY,
642 .arg4_type = ARG_ANYTHING,
643};
644
645BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
646 struct bpf_map *, map, u32, key, u64, flags)
647{
648 struct sock *sk;
649
650 if (unlikely(flags & ~(BPF_F_INGRESS)))
651 return SK_DROP;
652
653 sk = __sock_map_lookup_elem(map, key);
654 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
655 return SK_DROP;
656 if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
657 return SK_DROP;
658
659 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
660 return SK_PASS;
661}
662
663const struct bpf_func_proto bpf_sk_redirect_map_proto = {
664 .func = bpf_sk_redirect_map,
665 .gpl_only = false,
666 .ret_type = RET_INTEGER,
667 .arg1_type = ARG_PTR_TO_CTX,
668 .arg2_type = ARG_CONST_MAP_PTR,
669 .arg3_type = ARG_ANYTHING,
670 .arg4_type = ARG_ANYTHING,
671};
672
673BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
674 struct bpf_map *, map, u32, key, u64, flags)
675{
676 struct sock *sk;
677
678 if (unlikely(flags & ~(BPF_F_INGRESS)))
679 return SK_DROP;
680
681 sk = __sock_map_lookup_elem(map, key);
682 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
683 return SK_DROP;
684 if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
685 return SK_DROP;
686 if (sk_is_vsock(sk))
687 return SK_DROP;
688
689 msg->flags = flags;
690 msg->sk_redir = sk;
691 return SK_PASS;
692}
693
694const struct bpf_func_proto bpf_msg_redirect_map_proto = {
695 .func = bpf_msg_redirect_map,
696 .gpl_only = false,
697 .ret_type = RET_INTEGER,
698 .arg1_type = ARG_PTR_TO_CTX,
699 .arg2_type = ARG_CONST_MAP_PTR,
700 .arg3_type = ARG_ANYTHING,
701 .arg4_type = ARG_ANYTHING,
702};
703
704struct sock_map_seq_info {
705 struct bpf_map *map;
706 struct sock *sk;
707 u32 index;
708};
709
710struct bpf_iter__sockmap {
711 __bpf_md_ptr(struct bpf_iter_meta *, meta);
712 __bpf_md_ptr(struct bpf_map *, map);
713 __bpf_md_ptr(void *, key);
714 __bpf_md_ptr(struct sock *, sk);
715};
716
717DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
718 struct bpf_map *map, void *key,
719 struct sock *sk)
720
721static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
722{
723 if (unlikely(info->index >= info->map->max_entries))
724 return NULL;
725
726 info->sk = __sock_map_lookup_elem(info->map, info->index);
727
728 /* can't return sk directly, since that might be NULL */
729 return info;
730}
731
732static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
733 __acquires(rcu)
734{
735 struct sock_map_seq_info *info = seq->private;
736
737 if (*pos == 0)
738 ++*pos;
739
740 /* pairs with sock_map_seq_stop */
741 rcu_read_lock();
742 return sock_map_seq_lookup_elem(info);
743}
744
745static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
746 __must_hold(rcu)
747{
748 struct sock_map_seq_info *info = seq->private;
749
750 ++*pos;
751 ++info->index;
752
753 return sock_map_seq_lookup_elem(info);
754}
755
756static int sock_map_seq_show(struct seq_file *seq, void *v)
757 __must_hold(rcu)
758{
759 struct sock_map_seq_info *info = seq->private;
760 struct bpf_iter__sockmap ctx = {};
761 struct bpf_iter_meta meta;
762 struct bpf_prog *prog;
763
764 meta.seq = seq;
765 prog = bpf_iter_get_info(&meta, !v);
766 if (!prog)
767 return 0;
768
769 ctx.meta = &meta;
770 ctx.map = info->map;
771 if (v) {
772 ctx.key = &info->index;
773 ctx.sk = info->sk;
774 }
775
776 return bpf_iter_run_prog(prog, &ctx);
777}
778
779static void sock_map_seq_stop(struct seq_file *seq, void *v)
780 __releases(rcu)
781{
782 if (!v)
783 (void)sock_map_seq_show(seq, NULL);
784
785 /* pairs with sock_map_seq_start */
786 rcu_read_unlock();
787}
788
789static const struct seq_operations sock_map_seq_ops = {
790 .start = sock_map_seq_start,
791 .next = sock_map_seq_next,
792 .stop = sock_map_seq_stop,
793 .show = sock_map_seq_show,
794};
795
796static int sock_map_init_seq_private(void *priv_data,
797 struct bpf_iter_aux_info *aux)
798{
799 struct sock_map_seq_info *info = priv_data;
800
801 bpf_map_inc_with_uref(aux->map);
802 info->map = aux->map;
803 return 0;
804}
805
806static void sock_map_fini_seq_private(void *priv_data)
807{
808 struct sock_map_seq_info *info = priv_data;
809
810 bpf_map_put_with_uref(info->map);
811}
812
813static u64 sock_map_mem_usage(const struct bpf_map *map)
814{
815 u64 usage = sizeof(struct bpf_stab);
816
817 usage += (u64)map->max_entries * sizeof(struct sock *);
818 return usage;
819}
820
821static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
822 .seq_ops = &sock_map_seq_ops,
823 .init_seq_private = sock_map_init_seq_private,
824 .fini_seq_private = sock_map_fini_seq_private,
825 .seq_priv_size = sizeof(struct sock_map_seq_info),
826};
827
828BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
829const struct bpf_map_ops sock_map_ops = {
830 .map_meta_equal = bpf_map_meta_equal,
831 .map_alloc = sock_map_alloc,
832 .map_free = sock_map_free,
833 .map_get_next_key = sock_map_get_next_key,
834 .map_lookup_elem_sys_only = sock_map_lookup_sys,
835 .map_update_elem = sock_map_update_elem,
836 .map_delete_elem = sock_map_delete_elem,
837 .map_lookup_elem = sock_map_lookup,
838 .map_release_uref = sock_map_release_progs,
839 .map_check_btf = map_check_no_btf,
840 .map_mem_usage = sock_map_mem_usage,
841 .map_btf_id = &sock_map_btf_ids[0],
842 .iter_seq_info = &sock_map_iter_seq_info,
843};
844
845struct bpf_shtab_elem {
846 struct rcu_head rcu;
847 u32 hash;
848 struct sock *sk;
849 struct hlist_node node;
850 u8 key[];
851};
852
853struct bpf_shtab_bucket {
854 struct hlist_head head;
855 spinlock_t lock;
856};
857
858struct bpf_shtab {
859 struct bpf_map map;
860 struct bpf_shtab_bucket *buckets;
861 u32 buckets_num;
862 u32 elem_size;
863 struct sk_psock_progs progs;
864 atomic_t count;
865};
866
867static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
868{
869 return jhash(key, len, 0);
870}
871
872static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
873 u32 hash)
874{
875 return &htab->buckets[hash & (htab->buckets_num - 1)];
876}
877
878static struct bpf_shtab_elem *
879sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
880 u32 key_size)
881{
882 struct bpf_shtab_elem *elem;
883
884 hlist_for_each_entry_rcu(elem, head, node) {
885 if (elem->hash == hash &&
886 !memcmp(&elem->key, key, key_size))
887 return elem;
888 }
889
890 return NULL;
891}
892
893static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
894{
895 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
896 u32 key_size = map->key_size, hash;
897 struct bpf_shtab_bucket *bucket;
898 struct bpf_shtab_elem *elem;
899
900 WARN_ON_ONCE(!rcu_read_lock_held());
901
902 hash = sock_hash_bucket_hash(key, key_size);
903 bucket = sock_hash_select_bucket(htab, hash);
904 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
905
906 return elem ? elem->sk : NULL;
907}
908
909static void sock_hash_free_elem(struct bpf_shtab *htab,
910 struct bpf_shtab_elem *elem)
911{
912 atomic_dec(&htab->count);
913 kfree_rcu(elem, rcu);
914}
915
916static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
917 void *link_raw)
918{
919 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
920 struct bpf_shtab_elem *elem_probe, *elem = link_raw;
921 struct bpf_shtab_bucket *bucket;
922
923 WARN_ON_ONCE(!rcu_read_lock_held());
924 bucket = sock_hash_select_bucket(htab, elem->hash);
925
926 /* elem may be deleted in parallel from the map, but access here
927 * is okay since it's going away only after RCU grace period.
928 * However, we need to check whether it's still present.
929 */
930 spin_lock_bh(&bucket->lock);
931 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
932 elem->key, map->key_size);
933 if (elem_probe && elem_probe == elem) {
934 hlist_del_rcu(&elem->node);
935 sock_map_unref(elem->sk, elem);
936 sock_hash_free_elem(htab, elem);
937 }
938 spin_unlock_bh(&bucket->lock);
939}
940
941static long sock_hash_delete_elem(struct bpf_map *map, void *key)
942{
943 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
944 u32 hash, key_size = map->key_size;
945 struct bpf_shtab_bucket *bucket;
946 struct bpf_shtab_elem *elem;
947 int ret = -ENOENT;
948
949 hash = sock_hash_bucket_hash(key, key_size);
950 bucket = sock_hash_select_bucket(htab, hash);
951
952 spin_lock_bh(&bucket->lock);
953 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
954 if (elem) {
955 hlist_del_rcu(&elem->node);
956 sock_map_unref(elem->sk, elem);
957 sock_hash_free_elem(htab, elem);
958 ret = 0;
959 }
960 spin_unlock_bh(&bucket->lock);
961 return ret;
962}
963
964static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
965 void *key, u32 key_size,
966 u32 hash, struct sock *sk,
967 struct bpf_shtab_elem *old)
968{
969 struct bpf_shtab_elem *new;
970
971 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
972 if (!old) {
973 atomic_dec(&htab->count);
974 return ERR_PTR(-E2BIG);
975 }
976 }
977
978 new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
979 GFP_ATOMIC | __GFP_NOWARN,
980 htab->map.numa_node);
981 if (!new) {
982 atomic_dec(&htab->count);
983 return ERR_PTR(-ENOMEM);
984 }
985 memcpy(new->key, key, key_size);
986 new->sk = sk;
987 new->hash = hash;
988 return new;
989}
990
991static int sock_hash_update_common(struct bpf_map *map, void *key,
992 struct sock *sk, u64 flags)
993{
994 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
995 u32 key_size = map->key_size, hash;
996 struct bpf_shtab_elem *elem, *elem_new;
997 struct bpf_shtab_bucket *bucket;
998 struct sk_psock_link *link;
999 struct sk_psock *psock;
1000 int ret;
1001
1002 WARN_ON_ONCE(!rcu_read_lock_held());
1003 if (unlikely(flags > BPF_EXIST))
1004 return -EINVAL;
1005
1006 link = sk_psock_init_link();
1007 if (!link)
1008 return -ENOMEM;
1009
1010 ret = sock_map_link(map, sk);
1011 if (ret < 0)
1012 goto out_free;
1013
1014 psock = sk_psock(sk);
1015 WARN_ON_ONCE(!psock);
1016
1017 hash = sock_hash_bucket_hash(key, key_size);
1018 bucket = sock_hash_select_bucket(htab, hash);
1019
1020 spin_lock_bh(&bucket->lock);
1021 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
1022 if (elem && flags == BPF_NOEXIST) {
1023 ret = -EEXIST;
1024 goto out_unlock;
1025 } else if (!elem && flags == BPF_EXIST) {
1026 ret = -ENOENT;
1027 goto out_unlock;
1028 }
1029
1030 elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
1031 if (IS_ERR(elem_new)) {
1032 ret = PTR_ERR(elem_new);
1033 goto out_unlock;
1034 }
1035
1036 sock_map_add_link(psock, link, map, elem_new);
1037 /* Add new element to the head of the list, so that
1038 * concurrent search will find it before old elem.
1039 */
1040 hlist_add_head_rcu(&elem_new->node, &bucket->head);
1041 if (elem) {
1042 hlist_del_rcu(&elem->node);
1043 sock_map_unref(elem->sk, elem);
1044 sock_hash_free_elem(htab, elem);
1045 }
1046 spin_unlock_bh(&bucket->lock);
1047 return 0;
1048out_unlock:
1049 spin_unlock_bh(&bucket->lock);
1050 sk_psock_put(sk, psock);
1051out_free:
1052 sk_psock_free_link(link);
1053 return ret;
1054}
1055
1056static int sock_hash_get_next_key(struct bpf_map *map, void *key,
1057 void *key_next)
1058{
1059 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1060 struct bpf_shtab_elem *elem, *elem_next;
1061 u32 hash, key_size = map->key_size;
1062 struct hlist_head *head;
1063 int i = 0;
1064
1065 if (!key)
1066 goto find_first_elem;
1067 hash = sock_hash_bucket_hash(key, key_size);
1068 head = &sock_hash_select_bucket(htab, hash)->head;
1069 elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
1070 if (!elem)
1071 goto find_first_elem;
1072
1073 elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
1074 struct bpf_shtab_elem, node);
1075 if (elem_next) {
1076 memcpy(key_next, elem_next->key, key_size);
1077 return 0;
1078 }
1079
1080 i = hash & (htab->buckets_num - 1);
1081 i++;
1082find_first_elem:
1083 for (; i < htab->buckets_num; i++) {
1084 head = &sock_hash_select_bucket(htab, i)->head;
1085 elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
1086 struct bpf_shtab_elem, node);
1087 if (elem_next) {
1088 memcpy(key_next, elem_next->key, key_size);
1089 return 0;
1090 }
1091 }
1092
1093 return -ENOENT;
1094}
1095
1096static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1097{
1098 struct bpf_shtab *htab;
1099 int i, err;
1100
1101 if (attr->max_entries == 0 ||
1102 attr->key_size == 0 ||
1103 (attr->value_size != sizeof(u32) &&
1104 attr->value_size != sizeof(u64)) ||
1105 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1106 return ERR_PTR(-EINVAL);
1107 if (attr->key_size > MAX_BPF_STACK)
1108 return ERR_PTR(-E2BIG);
1109
1110 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
1111 if (!htab)
1112 return ERR_PTR(-ENOMEM);
1113
1114 bpf_map_init_from_attr(&htab->map, attr);
1115
1116 htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1117 htab->elem_size = sizeof(struct bpf_shtab_elem) +
1118 round_up(htab->map.key_size, 8);
1119 if (htab->buckets_num == 0 ||
1120 htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1121 err = -EINVAL;
1122 goto free_htab;
1123 }
1124
1125 htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1126 sizeof(struct bpf_shtab_bucket),
1127 htab->map.numa_node);
1128 if (!htab->buckets) {
1129 err = -ENOMEM;
1130 goto free_htab;
1131 }
1132
1133 for (i = 0; i < htab->buckets_num; i++) {
1134 INIT_HLIST_HEAD(&htab->buckets[i].head);
1135 spin_lock_init(&htab->buckets[i].lock);
1136 }
1137
1138 return &htab->map;
1139free_htab:
1140 bpf_map_area_free(htab);
1141 return ERR_PTR(err);
1142}
1143
1144static void sock_hash_free(struct bpf_map *map)
1145{
1146 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1147 struct bpf_shtab_bucket *bucket;
1148 struct hlist_head unlink_list;
1149 struct bpf_shtab_elem *elem;
1150 struct hlist_node *node;
1151 int i;
1152
1153 /* After the sync no updates or deletes will be in-flight so it
1154 * is safe to walk map and remove entries without risking a race
1155 * in EEXIST update case.
1156 */
1157 synchronize_rcu();
1158 for (i = 0; i < htab->buckets_num; i++) {
1159 bucket = sock_hash_select_bucket(htab, i);
1160
1161 /* We are racing with sock_hash_delete_from_link to
1162 * enter the spin-lock critical section. Every socket on
1163 * the list is still linked to sockhash. Since link
1164 * exists, psock exists and holds a ref to socket. That
1165 * lets us to grab a socket ref too.
1166 */
1167 spin_lock_bh(&bucket->lock);
1168 hlist_for_each_entry(elem, &bucket->head, node)
1169 sock_hold(elem->sk);
1170 hlist_move_list(&bucket->head, &unlink_list);
1171 spin_unlock_bh(&bucket->lock);
1172
1173 /* Process removed entries out of atomic context to
1174 * block for socket lock before deleting the psock's
1175 * link to sockhash.
1176 */
1177 hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1178 hlist_del(&elem->node);
1179 lock_sock(elem->sk);
1180 rcu_read_lock();
1181 sock_map_unref(elem->sk, elem);
1182 rcu_read_unlock();
1183 release_sock(elem->sk);
1184 sock_put(elem->sk);
1185 sock_hash_free_elem(htab, elem);
1186 }
1187 cond_resched();
1188 }
1189
1190 /* wait for psock readers accessing its map link */
1191 synchronize_rcu();
1192
1193 bpf_map_area_free(htab->buckets);
1194 bpf_map_area_free(htab);
1195}
1196
1197static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1198{
1199 struct sock *sk;
1200
1201 if (map->value_size != sizeof(u64))
1202 return ERR_PTR(-ENOSPC);
1203
1204 sk = __sock_hash_lookup_elem(map, key);
1205 if (!sk)
1206 return ERR_PTR(-ENOENT);
1207
1208 __sock_gen_cookie(sk);
1209 return &sk->sk_cookie;
1210}
1211
1212static void *sock_hash_lookup(struct bpf_map *map, void *key)
1213{
1214 struct sock *sk;
1215
1216 sk = __sock_hash_lookup_elem(map, key);
1217 if (!sk)
1218 return NULL;
1219 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1220 return NULL;
1221 return sk;
1222}
1223
1224static void sock_hash_release_progs(struct bpf_map *map)
1225{
1226 psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1227}
1228
1229BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1230 struct bpf_map *, map, void *, key, u64, flags)
1231{
1232 WARN_ON_ONCE(!rcu_read_lock_held());
1233
1234 if (likely(sock_map_sk_is_suitable(sops->sk) &&
1235 sock_map_op_okay(sops)))
1236 return sock_hash_update_common(map, key, sops->sk, flags);
1237 return -EOPNOTSUPP;
1238}
1239
1240const struct bpf_func_proto bpf_sock_hash_update_proto = {
1241 .func = bpf_sock_hash_update,
1242 .gpl_only = false,
1243 .pkt_access = true,
1244 .ret_type = RET_INTEGER,
1245 .arg1_type = ARG_PTR_TO_CTX,
1246 .arg2_type = ARG_CONST_MAP_PTR,
1247 .arg3_type = ARG_PTR_TO_MAP_KEY,
1248 .arg4_type = ARG_ANYTHING,
1249};
1250
1251BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1252 struct bpf_map *, map, void *, key, u64, flags)
1253{
1254 struct sock *sk;
1255
1256 if (unlikely(flags & ~(BPF_F_INGRESS)))
1257 return SK_DROP;
1258
1259 sk = __sock_hash_lookup_elem(map, key);
1260 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1261 return SK_DROP;
1262 if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
1263 return SK_DROP;
1264
1265 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
1266 return SK_PASS;
1267}
1268
1269const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1270 .func = bpf_sk_redirect_hash,
1271 .gpl_only = false,
1272 .ret_type = RET_INTEGER,
1273 .arg1_type = ARG_PTR_TO_CTX,
1274 .arg2_type = ARG_CONST_MAP_PTR,
1275 .arg3_type = ARG_PTR_TO_MAP_KEY,
1276 .arg4_type = ARG_ANYTHING,
1277};
1278
1279BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1280 struct bpf_map *, map, void *, key, u64, flags)
1281{
1282 struct sock *sk;
1283
1284 if (unlikely(flags & ~(BPF_F_INGRESS)))
1285 return SK_DROP;
1286
1287 sk = __sock_hash_lookup_elem(map, key);
1288 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1289 return SK_DROP;
1290 if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
1291 return SK_DROP;
1292 if (sk_is_vsock(sk))
1293 return SK_DROP;
1294
1295 msg->flags = flags;
1296 msg->sk_redir = sk;
1297 return SK_PASS;
1298}
1299
1300const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1301 .func = bpf_msg_redirect_hash,
1302 .gpl_only = false,
1303 .ret_type = RET_INTEGER,
1304 .arg1_type = ARG_PTR_TO_CTX,
1305 .arg2_type = ARG_CONST_MAP_PTR,
1306 .arg3_type = ARG_PTR_TO_MAP_KEY,
1307 .arg4_type = ARG_ANYTHING,
1308};
1309
1310struct sock_hash_seq_info {
1311 struct bpf_map *map;
1312 struct bpf_shtab *htab;
1313 u32 bucket_id;
1314};
1315
1316static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
1317 struct bpf_shtab_elem *prev_elem)
1318{
1319 const struct bpf_shtab *htab = info->htab;
1320 struct bpf_shtab_bucket *bucket;
1321 struct bpf_shtab_elem *elem;
1322 struct hlist_node *node;
1323
1324 /* try to find next elem in the same bucket */
1325 if (prev_elem) {
1326 node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
1327 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1328 if (elem)
1329 return elem;
1330
1331 /* no more elements, continue in the next bucket */
1332 info->bucket_id++;
1333 }
1334
1335 for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
1336 bucket = &htab->buckets[info->bucket_id];
1337 node = rcu_dereference(hlist_first_rcu(&bucket->head));
1338 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1339 if (elem)
1340 return elem;
1341 }
1342
1343 return NULL;
1344}
1345
1346static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
1347 __acquires(rcu)
1348{
1349 struct sock_hash_seq_info *info = seq->private;
1350
1351 if (*pos == 0)
1352 ++*pos;
1353
1354 /* pairs with sock_hash_seq_stop */
1355 rcu_read_lock();
1356 return sock_hash_seq_find_next(info, NULL);
1357}
1358
1359static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1360 __must_hold(rcu)
1361{
1362 struct sock_hash_seq_info *info = seq->private;
1363
1364 ++*pos;
1365 return sock_hash_seq_find_next(info, v);
1366}
1367
1368static int sock_hash_seq_show(struct seq_file *seq, void *v)
1369 __must_hold(rcu)
1370{
1371 struct sock_hash_seq_info *info = seq->private;
1372 struct bpf_iter__sockmap ctx = {};
1373 struct bpf_shtab_elem *elem = v;
1374 struct bpf_iter_meta meta;
1375 struct bpf_prog *prog;
1376
1377 meta.seq = seq;
1378 prog = bpf_iter_get_info(&meta, !elem);
1379 if (!prog)
1380 return 0;
1381
1382 ctx.meta = &meta;
1383 ctx.map = info->map;
1384 if (elem) {
1385 ctx.key = elem->key;
1386 ctx.sk = elem->sk;
1387 }
1388
1389 return bpf_iter_run_prog(prog, &ctx);
1390}
1391
1392static void sock_hash_seq_stop(struct seq_file *seq, void *v)
1393 __releases(rcu)
1394{
1395 if (!v)
1396 (void)sock_hash_seq_show(seq, NULL);
1397
1398 /* pairs with sock_hash_seq_start */
1399 rcu_read_unlock();
1400}
1401
1402static const struct seq_operations sock_hash_seq_ops = {
1403 .start = sock_hash_seq_start,
1404 .next = sock_hash_seq_next,
1405 .stop = sock_hash_seq_stop,
1406 .show = sock_hash_seq_show,
1407};
1408
1409static int sock_hash_init_seq_private(void *priv_data,
1410 struct bpf_iter_aux_info *aux)
1411{
1412 struct sock_hash_seq_info *info = priv_data;
1413
1414 bpf_map_inc_with_uref(aux->map);
1415 info->map = aux->map;
1416 info->htab = container_of(aux->map, struct bpf_shtab, map);
1417 return 0;
1418}
1419
1420static void sock_hash_fini_seq_private(void *priv_data)
1421{
1422 struct sock_hash_seq_info *info = priv_data;
1423
1424 bpf_map_put_with_uref(info->map);
1425}
1426
1427static u64 sock_hash_mem_usage(const struct bpf_map *map)
1428{
1429 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1430 u64 usage = sizeof(*htab);
1431
1432 usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
1433 usage += atomic_read(&htab->count) * (u64)htab->elem_size;
1434 return usage;
1435}
1436
1437static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
1438 .seq_ops = &sock_hash_seq_ops,
1439 .init_seq_private = sock_hash_init_seq_private,
1440 .fini_seq_private = sock_hash_fini_seq_private,
1441 .seq_priv_size = sizeof(struct sock_hash_seq_info),
1442};
1443
1444BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
1445const struct bpf_map_ops sock_hash_ops = {
1446 .map_meta_equal = bpf_map_meta_equal,
1447 .map_alloc = sock_hash_alloc,
1448 .map_free = sock_hash_free,
1449 .map_get_next_key = sock_hash_get_next_key,
1450 .map_update_elem = sock_map_update_elem,
1451 .map_delete_elem = sock_hash_delete_elem,
1452 .map_lookup_elem = sock_hash_lookup,
1453 .map_lookup_elem_sys_only = sock_hash_lookup_sys,
1454 .map_release_uref = sock_hash_release_progs,
1455 .map_check_btf = map_check_no_btf,
1456 .map_mem_usage = sock_hash_mem_usage,
1457 .map_btf_id = &sock_hash_map_btf_ids[0],
1458 .iter_seq_info = &sock_hash_iter_seq_info,
1459};
1460
1461static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1462{
1463 switch (map->map_type) {
1464 case BPF_MAP_TYPE_SOCKMAP:
1465 return &container_of(map, struct bpf_stab, map)->progs;
1466 case BPF_MAP_TYPE_SOCKHASH:
1467 return &container_of(map, struct bpf_shtab, map)->progs;
1468 default:
1469 break;
1470 }
1471
1472 return NULL;
1473}
1474
1475static int sock_map_prog_link_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
1476 struct bpf_link ***plink, u32 which)
1477{
1478 struct sk_psock_progs *progs = sock_map_progs(map);
1479 struct bpf_prog **cur_pprog;
1480 struct bpf_link **cur_plink;
1481
1482 if (!progs)
1483 return -EOPNOTSUPP;
1484
1485 switch (which) {
1486 case BPF_SK_MSG_VERDICT:
1487 cur_pprog = &progs->msg_parser;
1488 cur_plink = &progs->msg_parser_link;
1489 break;
1490#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1491 case BPF_SK_SKB_STREAM_PARSER:
1492 cur_pprog = &progs->stream_parser;
1493 cur_plink = &progs->stream_parser_link;
1494 break;
1495#endif
1496 case BPF_SK_SKB_STREAM_VERDICT:
1497 if (progs->skb_verdict)
1498 return -EBUSY;
1499 cur_pprog = &progs->stream_verdict;
1500 cur_plink = &progs->stream_verdict_link;
1501 break;
1502 case BPF_SK_SKB_VERDICT:
1503 if (progs->stream_verdict)
1504 return -EBUSY;
1505 cur_pprog = &progs->skb_verdict;
1506 cur_plink = &progs->skb_verdict_link;
1507 break;
1508 default:
1509 return -EOPNOTSUPP;
1510 }
1511
1512 *pprog = cur_pprog;
1513 if (plink)
1514 *plink = cur_plink;
1515 return 0;
1516}
1517
1518/* Handle the following four cases:
1519 * prog_attach: prog != NULL, old == NULL, link == NULL
1520 * prog_detach: prog == NULL, old != NULL, link == NULL
1521 * link_attach: prog != NULL, old == NULL, link != NULL
1522 * link_detach: prog == NULL, old != NULL, link != NULL
1523 */
1524static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1525 struct bpf_prog *old, struct bpf_link *link,
1526 u32 which)
1527{
1528 struct bpf_prog **pprog;
1529 struct bpf_link **plink;
1530 int ret;
1531
1532 ret = sock_map_prog_link_lookup(map, &pprog, &plink, which);
1533 if (ret)
1534 return ret;
1535
1536 /* for prog_attach/prog_detach/link_attach, return error if a bpf_link
1537 * exists for that prog.
1538 */
1539 if ((!link || prog) && *plink)
1540 return -EBUSY;
1541
1542 if (old) {
1543 ret = psock_replace_prog(pprog, prog, old);
1544 if (!ret)
1545 *plink = NULL;
1546 } else {
1547 psock_set_prog(pprog, prog);
1548 if (link)
1549 *plink = link;
1550 }
1551
1552 return ret;
1553}
1554
1555int sock_map_bpf_prog_query(const union bpf_attr *attr,
1556 union bpf_attr __user *uattr)
1557{
1558 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1559 u32 prog_cnt = 0, flags = 0;
1560 struct bpf_prog **pprog;
1561 struct bpf_prog *prog;
1562 struct bpf_map *map;
1563 u32 id = 0;
1564 int ret;
1565
1566 if (attr->query.query_flags)
1567 return -EINVAL;
1568
1569 CLASS(fd, f)(attr->target_fd);
1570 map = __bpf_map_get(f);
1571 if (IS_ERR(map))
1572 return PTR_ERR(map);
1573
1574 rcu_read_lock();
1575
1576 ret = sock_map_prog_link_lookup(map, &pprog, NULL, attr->query.attach_type);
1577 if (ret)
1578 goto end;
1579
1580 prog = *pprog;
1581 prog_cnt = !prog ? 0 : 1;
1582
1583 if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
1584 goto end;
1585
1586 /* we do not hold the refcnt, the bpf prog may be released
1587 * asynchronously and the id would be set to 0.
1588 */
1589 id = data_race(prog->aux->id);
1590 if (id == 0)
1591 prog_cnt = 0;
1592
1593end:
1594 rcu_read_unlock();
1595
1596 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) ||
1597 (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) ||
1598 copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
1599 ret = -EFAULT;
1600
1601 return ret;
1602}
1603
1604static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1605{
1606 switch (link->map->map_type) {
1607 case BPF_MAP_TYPE_SOCKMAP:
1608 return sock_map_delete_from_link(link->map, sk,
1609 link->link_raw);
1610 case BPF_MAP_TYPE_SOCKHASH:
1611 return sock_hash_delete_from_link(link->map, sk,
1612 link->link_raw);
1613 default:
1614 break;
1615 }
1616}
1617
1618static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1619{
1620 struct sk_psock_link *link;
1621
1622 while ((link = sk_psock_link_pop(psock))) {
1623 sock_map_unlink(sk, link);
1624 sk_psock_free_link(link);
1625 }
1626}
1627
1628void sock_map_unhash(struct sock *sk)
1629{
1630 void (*saved_unhash)(struct sock *sk);
1631 struct sk_psock *psock;
1632
1633 rcu_read_lock();
1634 psock = sk_psock(sk);
1635 if (unlikely(!psock)) {
1636 rcu_read_unlock();
1637 saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
1638 } else {
1639 saved_unhash = psock->saved_unhash;
1640 sock_map_remove_links(sk, psock);
1641 rcu_read_unlock();
1642 }
1643 if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
1644 return;
1645 if (saved_unhash)
1646 saved_unhash(sk);
1647}
1648EXPORT_SYMBOL_GPL(sock_map_unhash);
1649
1650void sock_map_destroy(struct sock *sk)
1651{
1652 void (*saved_destroy)(struct sock *sk);
1653 struct sk_psock *psock;
1654
1655 rcu_read_lock();
1656 psock = sk_psock_get(sk);
1657 if (unlikely(!psock)) {
1658 rcu_read_unlock();
1659 saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
1660 } else {
1661 saved_destroy = psock->saved_destroy;
1662 sock_map_remove_links(sk, psock);
1663 rcu_read_unlock();
1664 sk_psock_stop(psock);
1665 sk_psock_put(sk, psock);
1666 }
1667 if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
1668 return;
1669 if (saved_destroy)
1670 saved_destroy(sk);
1671}
1672EXPORT_SYMBOL_GPL(sock_map_destroy);
1673
1674void sock_map_close(struct sock *sk, long timeout)
1675{
1676 void (*saved_close)(struct sock *sk, long timeout);
1677 struct sk_psock *psock;
1678
1679 lock_sock(sk);
1680 rcu_read_lock();
1681 psock = sk_psock(sk);
1682 if (likely(psock)) {
1683 saved_close = psock->saved_close;
1684 sock_map_remove_links(sk, psock);
1685 psock = sk_psock_get(sk);
1686 if (unlikely(!psock))
1687 goto no_psock;
1688 rcu_read_unlock();
1689 sk_psock_stop(psock);
1690 release_sock(sk);
1691 cancel_delayed_work_sync(&psock->work);
1692 sk_psock_put(sk, psock);
1693 } else {
1694 saved_close = READ_ONCE(sk->sk_prot)->close;
1695no_psock:
1696 rcu_read_unlock();
1697 release_sock(sk);
1698 }
1699
1700 /* Make sure we do not recurse. This is a bug.
1701 * Leak the socket instead of crashing on a stack overflow.
1702 */
1703 if (WARN_ON_ONCE(saved_close == sock_map_close))
1704 return;
1705 saved_close(sk, timeout);
1706}
1707EXPORT_SYMBOL_GPL(sock_map_close);
1708
1709struct sockmap_link {
1710 struct bpf_link link;
1711 struct bpf_map *map;
1712 enum bpf_attach_type attach_type;
1713};
1714
1715static void sock_map_link_release(struct bpf_link *link)
1716{
1717 struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
1718
1719 mutex_lock(&sockmap_mutex);
1720 if (!sockmap_link->map)
1721 goto out;
1722
1723 WARN_ON_ONCE(sock_map_prog_update(sockmap_link->map, NULL, link->prog, link,
1724 sockmap_link->attach_type));
1725
1726 bpf_map_put_with_uref(sockmap_link->map);
1727 sockmap_link->map = NULL;
1728out:
1729 mutex_unlock(&sockmap_mutex);
1730}
1731
1732static int sock_map_link_detach(struct bpf_link *link)
1733{
1734 sock_map_link_release(link);
1735 return 0;
1736}
1737
1738static void sock_map_link_dealloc(struct bpf_link *link)
1739{
1740 kfree(link);
1741}
1742
1743/* Handle the following two cases:
1744 * case 1: link != NULL, prog != NULL, old != NULL
1745 * case 2: link != NULL, prog != NULL, old == NULL
1746 */
1747static int sock_map_link_update_prog(struct bpf_link *link,
1748 struct bpf_prog *prog,
1749 struct bpf_prog *old)
1750{
1751 const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
1752 struct bpf_prog **pprog, *old_link_prog;
1753 struct bpf_link **plink;
1754 int ret = 0;
1755
1756 mutex_lock(&sockmap_mutex);
1757
1758 /* If old prog is not NULL, ensure old prog is the same as link->prog. */
1759 if (old && link->prog != old) {
1760 ret = -EPERM;
1761 goto out;
1762 }
1763 /* Ensure link->prog has the same type/attach_type as the new prog. */
1764 if (link->prog->type != prog->type ||
1765 link->prog->expected_attach_type != prog->expected_attach_type) {
1766 ret = -EINVAL;
1767 goto out;
1768 }
1769 if (!sockmap_link->map) {
1770 ret = -ENOLINK;
1771 goto out;
1772 }
1773
1774 ret = sock_map_prog_link_lookup(sockmap_link->map, &pprog, &plink,
1775 sockmap_link->attach_type);
1776 if (ret)
1777 goto out;
1778
1779 /* return error if the stored bpf_link does not match the incoming bpf_link. */
1780 if (link != *plink) {
1781 ret = -EBUSY;
1782 goto out;
1783 }
1784
1785 if (old) {
1786 ret = psock_replace_prog(pprog, prog, old);
1787 if (ret)
1788 goto out;
1789 } else {
1790 psock_set_prog(pprog, prog);
1791 }
1792
1793 bpf_prog_inc(prog);
1794 old_link_prog = xchg(&link->prog, prog);
1795 bpf_prog_put(old_link_prog);
1796
1797out:
1798 mutex_unlock(&sockmap_mutex);
1799 return ret;
1800}
1801
1802static u32 sock_map_link_get_map_id(const struct sockmap_link *sockmap_link)
1803{
1804 u32 map_id = 0;
1805
1806 mutex_lock(&sockmap_mutex);
1807 if (sockmap_link->map)
1808 map_id = sockmap_link->map->id;
1809 mutex_unlock(&sockmap_mutex);
1810 return map_id;
1811}
1812
1813static int sock_map_link_fill_info(const struct bpf_link *link,
1814 struct bpf_link_info *info)
1815{
1816 const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
1817 u32 map_id = sock_map_link_get_map_id(sockmap_link);
1818
1819 info->sockmap.map_id = map_id;
1820 info->sockmap.attach_type = sockmap_link->attach_type;
1821 return 0;
1822}
1823
1824static void sock_map_link_show_fdinfo(const struct bpf_link *link,
1825 struct seq_file *seq)
1826{
1827 const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
1828 u32 map_id = sock_map_link_get_map_id(sockmap_link);
1829
1830 seq_printf(seq, "map_id:\t%u\n", map_id);
1831 seq_printf(seq, "attach_type:\t%u\n", sockmap_link->attach_type);
1832}
1833
1834static const struct bpf_link_ops sock_map_link_ops = {
1835 .release = sock_map_link_release,
1836 .dealloc = sock_map_link_dealloc,
1837 .detach = sock_map_link_detach,
1838 .update_prog = sock_map_link_update_prog,
1839 .fill_link_info = sock_map_link_fill_info,
1840 .show_fdinfo = sock_map_link_show_fdinfo,
1841};
1842
1843int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
1844{
1845 struct bpf_link_primer link_primer;
1846 struct sockmap_link *sockmap_link;
1847 enum bpf_attach_type attach_type;
1848 struct bpf_map *map;
1849 int ret;
1850
1851 if (attr->link_create.flags)
1852 return -EINVAL;
1853
1854 map = bpf_map_get_with_uref(attr->link_create.target_fd);
1855 if (IS_ERR(map))
1856 return PTR_ERR(map);
1857 if (map->map_type != BPF_MAP_TYPE_SOCKMAP && map->map_type != BPF_MAP_TYPE_SOCKHASH) {
1858 ret = -EINVAL;
1859 goto out;
1860 }
1861
1862 sockmap_link = kzalloc(sizeof(*sockmap_link), GFP_USER);
1863 if (!sockmap_link) {
1864 ret = -ENOMEM;
1865 goto out;
1866 }
1867
1868 attach_type = attr->link_create.attach_type;
1869 bpf_link_init(&sockmap_link->link, BPF_LINK_TYPE_SOCKMAP, &sock_map_link_ops, prog);
1870 sockmap_link->map = map;
1871 sockmap_link->attach_type = attach_type;
1872
1873 ret = bpf_link_prime(&sockmap_link->link, &link_primer);
1874 if (ret) {
1875 kfree(sockmap_link);
1876 goto out;
1877 }
1878
1879 mutex_lock(&sockmap_mutex);
1880 ret = sock_map_prog_update(map, prog, NULL, &sockmap_link->link, attach_type);
1881 mutex_unlock(&sockmap_mutex);
1882 if (ret) {
1883 bpf_link_cleanup(&link_primer);
1884 goto out;
1885 }
1886
1887 /* Increase refcnt for the prog since when old prog is replaced with
1888 * psock_replace_prog() and psock_set_prog() its refcnt will be decreased.
1889 *
1890 * Actually, we do not need to increase refcnt for the prog since bpf_link
1891 * will hold a reference. But in order to have less complexity w.r.t.
1892 * replacing/setting prog, let us increase the refcnt to make things simpler.
1893 */
1894 bpf_prog_inc(prog);
1895
1896 return bpf_link_settle(&link_primer);
1897
1898out:
1899 bpf_map_put_with_uref(map);
1900 return ret;
1901}
1902
1903static int sock_map_iter_attach_target(struct bpf_prog *prog,
1904 union bpf_iter_link_info *linfo,
1905 struct bpf_iter_aux_info *aux)
1906{
1907 struct bpf_map *map;
1908 int err = -EINVAL;
1909
1910 if (!linfo->map.map_fd)
1911 return -EBADF;
1912
1913 map = bpf_map_get_with_uref(linfo->map.map_fd);
1914 if (IS_ERR(map))
1915 return PTR_ERR(map);
1916
1917 if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
1918 map->map_type != BPF_MAP_TYPE_SOCKHASH)
1919 goto put_map;
1920
1921 if (prog->aux->max_rdonly_access > map->key_size) {
1922 err = -EACCES;
1923 goto put_map;
1924 }
1925
1926 aux->map = map;
1927 return 0;
1928
1929put_map:
1930 bpf_map_put_with_uref(map);
1931 return err;
1932}
1933
1934static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
1935{
1936 bpf_map_put_with_uref(aux->map);
1937}
1938
1939static struct bpf_iter_reg sock_map_iter_reg = {
1940 .target = "sockmap",
1941 .attach_target = sock_map_iter_attach_target,
1942 .detach_target = sock_map_iter_detach_target,
1943 .show_fdinfo = bpf_iter_map_show_fdinfo,
1944 .fill_link_info = bpf_iter_map_fill_link_info,
1945 .ctx_arg_info_size = 2,
1946 .ctx_arg_info = {
1947 { offsetof(struct bpf_iter__sockmap, key),
1948 PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
1949 { offsetof(struct bpf_iter__sockmap, sk),
1950 PTR_TO_BTF_ID_OR_NULL },
1951 },
1952};
1953
1954static int __init bpf_sockmap_iter_init(void)
1955{
1956 sock_map_iter_reg.ctx_arg_info[1].btf_id =
1957 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
1958 return bpf_iter_reg_target(&sock_map_iter_reg);
1959}
1960late_initcall(bpf_sockmap_iter_init);