Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/types.h>
3#include <linux/sched.h>
4#include <linux/module.h>
5#include <linux/sunrpc/types.h>
6#include <linux/sunrpc/xdr.h>
7#include <linux/sunrpc/svcsock.h>
8#include <linux/sunrpc/svcauth.h>
9#include <linux/sunrpc/gss_api.h>
10#include <linux/sunrpc/addr.h>
11#include <linux/err.h>
12#include <linux/seq_file.h>
13#include <linux/hash.h>
14#include <linux/string.h>
15#include <linux/slab.h>
16#include <net/sock.h>
17#include <net/ipv6.h>
18#include <linux/kernel.h>
19#include <linux/user_namespace.h>
20#define RPCDBG_FACILITY RPCDBG_AUTH
21
22
23#include "netns.h"
24
25/*
26 * AUTHUNIX and AUTHNULL credentials are both handled here.
27 * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
28 * are always nobody (-2). i.e. we do the same IP address checks for
29 * AUTHNULL as for AUTHUNIX, and that is done here.
30 */
31
32
33struct unix_domain {
34 struct auth_domain h;
35 /* other stuff later */
36};
37
38extern struct auth_ops svcauth_null;
39extern struct auth_ops svcauth_unix;
40extern struct auth_ops svcauth_tls;
41
42static void svcauth_unix_domain_release_rcu(struct rcu_head *head)
43{
44 struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
45 struct unix_domain *ud = container_of(dom, struct unix_domain, h);
46
47 kfree(dom->name);
48 kfree(ud);
49}
50
51static void svcauth_unix_domain_release(struct auth_domain *dom)
52{
53 call_rcu(&dom->rcu_head, svcauth_unix_domain_release_rcu);
54}
55
56struct auth_domain *unix_domain_find(char *name)
57{
58 struct auth_domain *rv;
59 struct unix_domain *new = NULL;
60
61 rv = auth_domain_find(name);
62 while(1) {
63 if (rv) {
64 if (new && rv != &new->h)
65 svcauth_unix_domain_release(&new->h);
66
67 if (rv->flavour != &svcauth_unix) {
68 auth_domain_put(rv);
69 return NULL;
70 }
71 return rv;
72 }
73
74 new = kmalloc(sizeof(*new), GFP_KERNEL);
75 if (new == NULL)
76 return NULL;
77 kref_init(&new->h.ref);
78 new->h.name = kstrdup(name, GFP_KERNEL);
79 if (new->h.name == NULL) {
80 kfree(new);
81 return NULL;
82 }
83 new->h.flavour = &svcauth_unix;
84 rv = auth_domain_lookup(name, &new->h);
85 }
86}
87EXPORT_SYMBOL_GPL(unix_domain_find);
88
89
90/**************************************************
91 * cache for IP address to unix_domain
92 * as needed by AUTH_UNIX
93 */
94#define IP_HASHBITS 8
95#define IP_HASHMAX (1<<IP_HASHBITS)
96
97struct ip_map {
98 struct cache_head h;
99 char m_class[8]; /* e.g. "nfsd" */
100 struct in6_addr m_addr;
101 struct unix_domain *m_client;
102 struct rcu_head m_rcu;
103};
104
105static void ip_map_put(struct kref *kref)
106{
107 struct cache_head *item = container_of(kref, struct cache_head, ref);
108 struct ip_map *im = container_of(item, struct ip_map,h);
109
110 if (test_bit(CACHE_VALID, &item->flags) &&
111 !test_bit(CACHE_NEGATIVE, &item->flags))
112 auth_domain_put(&im->m_client->h);
113 kfree_rcu(im, m_rcu);
114}
115
116static inline int hash_ip6(const struct in6_addr *ip)
117{
118 return hash_32(ipv6_addr_hash(ip), IP_HASHBITS);
119}
120static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
121{
122 struct ip_map *orig = container_of(corig, struct ip_map, h);
123 struct ip_map *new = container_of(cnew, struct ip_map, h);
124 return strcmp(orig->m_class, new->m_class) == 0 &&
125 ipv6_addr_equal(&orig->m_addr, &new->m_addr);
126}
127static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
128{
129 struct ip_map *new = container_of(cnew, struct ip_map, h);
130 struct ip_map *item = container_of(citem, struct ip_map, h);
131
132 strcpy(new->m_class, item->m_class);
133 new->m_addr = item->m_addr;
134}
135static void update(struct cache_head *cnew, struct cache_head *citem)
136{
137 struct ip_map *new = container_of(cnew, struct ip_map, h);
138 struct ip_map *item = container_of(citem, struct ip_map, h);
139
140 kref_get(&item->m_client->h.ref);
141 new->m_client = item->m_client;
142}
143static struct cache_head *ip_map_alloc(void)
144{
145 struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
146 if (i)
147 return &i->h;
148 else
149 return NULL;
150}
151
152static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
153{
154 return sunrpc_cache_pipe_upcall(cd, h);
155}
156
157static void ip_map_request(struct cache_detail *cd,
158 struct cache_head *h,
159 char **bpp, int *blen)
160{
161 char text_addr[40];
162 struct ip_map *im = container_of(h, struct ip_map, h);
163
164 if (ipv6_addr_v4mapped(&(im->m_addr))) {
165 snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]);
166 } else {
167 snprintf(text_addr, 40, "%pI6", &im->m_addr);
168 }
169 qword_add(bpp, blen, im->m_class);
170 qword_add(bpp, blen, text_addr);
171 (*bpp)[-1] = '\n';
172}
173
174static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
175static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time64_t expiry);
176
177static int ip_map_parse(struct cache_detail *cd,
178 char *mesg, int mlen)
179{
180 /* class ipaddress [domainname] */
181 /* should be safe just to use the start of the input buffer
182 * for scratch: */
183 char *buf = mesg;
184 int len;
185 char class[8];
186 union {
187 struct sockaddr sa;
188 struct sockaddr_in s4;
189 struct sockaddr_in6 s6;
190 } address;
191 struct sockaddr_in6 sin6;
192 int err;
193
194 struct ip_map *ipmp;
195 struct auth_domain *dom;
196 time64_t expiry;
197
198 if (mesg[mlen-1] != '\n')
199 return -EINVAL;
200 mesg[mlen-1] = 0;
201
202 /* class */
203 len = qword_get(&mesg, class, sizeof(class));
204 if (len <= 0) return -EINVAL;
205
206 /* ip address */
207 len = qword_get(&mesg, buf, mlen);
208 if (len <= 0) return -EINVAL;
209
210 if (rpc_pton(cd->net, buf, len, &address.sa, sizeof(address)) == 0)
211 return -EINVAL;
212 switch (address.sa.sa_family) {
213 case AF_INET:
214 /* Form a mapped IPv4 address in sin6 */
215 sin6.sin6_family = AF_INET6;
216 ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
217 &sin6.sin6_addr);
218 break;
219#if IS_ENABLED(CONFIG_IPV6)
220 case AF_INET6:
221 memcpy(&sin6, &address.s6, sizeof(sin6));
222 break;
223#endif
224 default:
225 return -EINVAL;
226 }
227
228 expiry = get_expiry(&mesg);
229 if (expiry ==0)
230 return -EINVAL;
231
232 /* domainname, or empty for NEGATIVE */
233 len = qword_get(&mesg, buf, mlen);
234 if (len < 0) return -EINVAL;
235
236 if (len) {
237 dom = unix_domain_find(buf);
238 if (dom == NULL)
239 return -ENOENT;
240 } else
241 dom = NULL;
242
243 /* IPv6 scope IDs are ignored for now */
244 ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr);
245 if (ipmp) {
246 err = __ip_map_update(cd, ipmp,
247 container_of(dom, struct unix_domain, h),
248 expiry);
249 } else
250 err = -ENOMEM;
251
252 if (dom)
253 auth_domain_put(dom);
254
255 cache_flush();
256 return err;
257}
258
259static int ip_map_show(struct seq_file *m,
260 struct cache_detail *cd,
261 struct cache_head *h)
262{
263 struct ip_map *im;
264 struct in6_addr addr;
265 char *dom = "-no-domain-";
266
267 if (h == NULL) {
268 seq_puts(m, "#class IP domain\n");
269 return 0;
270 }
271 im = container_of(h, struct ip_map, h);
272 /* class addr domain */
273 addr = im->m_addr;
274
275 if (test_bit(CACHE_VALID, &h->flags) &&
276 !test_bit(CACHE_NEGATIVE, &h->flags))
277 dom = im->m_client->h.name;
278
279 if (ipv6_addr_v4mapped(&addr)) {
280 seq_printf(m, "%s %pI4 %s\n",
281 im->m_class, &addr.s6_addr32[3], dom);
282 } else {
283 seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom);
284 }
285 return 0;
286}
287
288
289static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
290 struct in6_addr *addr)
291{
292 struct ip_map ip;
293 struct cache_head *ch;
294
295 strcpy(ip.m_class, class);
296 ip.m_addr = *addr;
297 ch = sunrpc_cache_lookup_rcu(cd, &ip.h,
298 hash_str(class, IP_HASHBITS) ^
299 hash_ip6(addr));
300
301 if (ch)
302 return container_of(ch, struct ip_map, h);
303 else
304 return NULL;
305}
306
307static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
308 struct unix_domain *udom, time64_t expiry)
309{
310 struct ip_map ip;
311 struct cache_head *ch;
312
313 ip.m_client = udom;
314 ip.h.flags = 0;
315 if (!udom)
316 set_bit(CACHE_NEGATIVE, &ip.h.flags);
317 ip.h.expiry_time = expiry;
318 ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
319 hash_str(ipm->m_class, IP_HASHBITS) ^
320 hash_ip6(&ipm->m_addr));
321 if (!ch)
322 return -ENOMEM;
323 cache_put(ch, cd);
324 return 0;
325}
326
327void svcauth_unix_purge(struct net *net)
328{
329 struct sunrpc_net *sn;
330
331 sn = net_generic(net, sunrpc_net_id);
332 cache_purge(sn->ip_map_cache);
333}
334EXPORT_SYMBOL_GPL(svcauth_unix_purge);
335
336static inline struct ip_map *
337ip_map_cached_get(struct svc_xprt *xprt)
338{
339 struct ip_map *ipm = NULL;
340 struct sunrpc_net *sn;
341
342 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
343 spin_lock(&xprt->xpt_lock);
344 ipm = xprt->xpt_auth_cache;
345 if (ipm != NULL) {
346 sn = net_generic(xprt->xpt_net, sunrpc_net_id);
347 if (cache_is_expired(sn->ip_map_cache, &ipm->h)) {
348 /*
349 * The entry has been invalidated since it was
350 * remembered, e.g. by a second mount from the
351 * same IP address.
352 */
353 xprt->xpt_auth_cache = NULL;
354 spin_unlock(&xprt->xpt_lock);
355 cache_put(&ipm->h, sn->ip_map_cache);
356 return NULL;
357 }
358 cache_get(&ipm->h);
359 }
360 spin_unlock(&xprt->xpt_lock);
361 }
362 return ipm;
363}
364
365static inline void
366ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm)
367{
368 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
369 spin_lock(&xprt->xpt_lock);
370 if (xprt->xpt_auth_cache == NULL) {
371 /* newly cached, keep the reference */
372 xprt->xpt_auth_cache = ipm;
373 ipm = NULL;
374 }
375 spin_unlock(&xprt->xpt_lock);
376 }
377 if (ipm) {
378 struct sunrpc_net *sn;
379
380 sn = net_generic(xprt->xpt_net, sunrpc_net_id);
381 cache_put(&ipm->h, sn->ip_map_cache);
382 }
383}
384
385void
386svcauth_unix_info_release(struct svc_xprt *xpt)
387{
388 struct ip_map *ipm;
389
390 ipm = xpt->xpt_auth_cache;
391 if (ipm != NULL) {
392 struct sunrpc_net *sn;
393
394 sn = net_generic(xpt->xpt_net, sunrpc_net_id);
395 cache_put(&ipm->h, sn->ip_map_cache);
396 }
397}
398
399/****************************************************************************
400 * auth.unix.gid cache
401 * simple cache to map a UID to a list of GIDs
402 * because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS
403 */
404#define GID_HASHBITS 8
405#define GID_HASHMAX (1<<GID_HASHBITS)
406
407struct unix_gid {
408 struct cache_head h;
409 kuid_t uid;
410 struct group_info *gi;
411 struct rcu_head rcu;
412};
413
414static int unix_gid_hash(kuid_t uid)
415{
416 return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
417}
418
419static void unix_gid_put(struct kref *kref)
420{
421 struct cache_head *item = container_of(kref, struct cache_head, ref);
422 struct unix_gid *ug = container_of(item, struct unix_gid, h);
423 if (test_bit(CACHE_VALID, &item->flags) &&
424 !test_bit(CACHE_NEGATIVE, &item->flags))
425 put_group_info(ug->gi);
426 kfree_rcu(ug, rcu);
427}
428
429static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
430{
431 struct unix_gid *orig = container_of(corig, struct unix_gid, h);
432 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
433 return uid_eq(orig->uid, new->uid);
434}
435static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
436{
437 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
438 struct unix_gid *item = container_of(citem, struct unix_gid, h);
439 new->uid = item->uid;
440}
441static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
442{
443 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
444 struct unix_gid *item = container_of(citem, struct unix_gid, h);
445
446 get_group_info(item->gi);
447 new->gi = item->gi;
448}
449static struct cache_head *unix_gid_alloc(void)
450{
451 struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
452 if (g)
453 return &g->h;
454 else
455 return NULL;
456}
457
458static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
459{
460 return sunrpc_cache_pipe_upcall_timeout(cd, h);
461}
462
463static void unix_gid_request(struct cache_detail *cd,
464 struct cache_head *h,
465 char **bpp, int *blen)
466{
467 char tuid[20];
468 struct unix_gid *ug = container_of(h, struct unix_gid, h);
469
470 snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid));
471 qword_add(bpp, blen, tuid);
472 (*bpp)[-1] = '\n';
473}
474
475static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
476
477static int unix_gid_parse(struct cache_detail *cd,
478 char *mesg, int mlen)
479{
480 /* uid expiry Ngid gid0 gid1 ... gidN-1 */
481 int id;
482 kuid_t uid;
483 int gids;
484 int rv;
485 int i;
486 int err;
487 time64_t expiry;
488 struct unix_gid ug, *ugp;
489
490 if (mesg[mlen - 1] != '\n')
491 return -EINVAL;
492 mesg[mlen-1] = 0;
493
494 rv = get_int(&mesg, &id);
495 if (rv)
496 return -EINVAL;
497 uid = make_kuid(current_user_ns(), id);
498 ug.uid = uid;
499
500 expiry = get_expiry(&mesg);
501 if (expiry == 0)
502 return -EINVAL;
503
504 rv = get_int(&mesg, &gids);
505 if (rv || gids < 0 || gids > 8192)
506 return -EINVAL;
507
508 ug.gi = groups_alloc(gids);
509 if (!ug.gi)
510 return -ENOMEM;
511
512 for (i = 0 ; i < gids ; i++) {
513 int gid;
514 kgid_t kgid;
515 rv = get_int(&mesg, &gid);
516 err = -EINVAL;
517 if (rv)
518 goto out;
519 kgid = make_kgid(current_user_ns(), gid);
520 if (!gid_valid(kgid))
521 goto out;
522 ug.gi->gid[i] = kgid;
523 }
524
525 groups_sort(ug.gi);
526 ugp = unix_gid_lookup(cd, uid);
527 if (ugp) {
528 struct cache_head *ch;
529 ug.h.flags = 0;
530 ug.h.expiry_time = expiry;
531 ch = sunrpc_cache_update(cd,
532 &ug.h, &ugp->h,
533 unix_gid_hash(uid));
534 if (!ch)
535 err = -ENOMEM;
536 else {
537 err = 0;
538 cache_put(ch, cd);
539 }
540 } else
541 err = -ENOMEM;
542 out:
543 if (ug.gi)
544 put_group_info(ug.gi);
545 return err;
546}
547
548static int unix_gid_show(struct seq_file *m,
549 struct cache_detail *cd,
550 struct cache_head *h)
551{
552 struct user_namespace *user_ns = m->file->f_cred->user_ns;
553 struct unix_gid *ug;
554 int i;
555 int glen;
556
557 if (h == NULL) {
558 seq_puts(m, "#uid cnt: gids...\n");
559 return 0;
560 }
561 ug = container_of(h, struct unix_gid, h);
562 if (test_bit(CACHE_VALID, &h->flags) &&
563 !test_bit(CACHE_NEGATIVE, &h->flags))
564 glen = ug->gi->ngroups;
565 else
566 glen = 0;
567
568 seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen);
569 for (i = 0; i < glen; i++)
570 seq_printf(m, " %d", from_kgid_munged(user_ns, ug->gi->gid[i]));
571 seq_printf(m, "\n");
572 return 0;
573}
574
575static const struct cache_detail unix_gid_cache_template = {
576 .owner = THIS_MODULE,
577 .hash_size = GID_HASHMAX,
578 .name = "auth.unix.gid",
579 .cache_put = unix_gid_put,
580 .cache_upcall = unix_gid_upcall,
581 .cache_request = unix_gid_request,
582 .cache_parse = unix_gid_parse,
583 .cache_show = unix_gid_show,
584 .match = unix_gid_match,
585 .init = unix_gid_init,
586 .update = unix_gid_update,
587 .alloc = unix_gid_alloc,
588};
589
590int unix_gid_cache_create(struct net *net)
591{
592 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
593 struct cache_detail *cd;
594 int err;
595
596 cd = cache_create_net(&unix_gid_cache_template, net);
597 if (IS_ERR(cd))
598 return PTR_ERR(cd);
599 err = cache_register_net(cd, net);
600 if (err) {
601 cache_destroy_net(cd, net);
602 return err;
603 }
604 sn->unix_gid_cache = cd;
605 return 0;
606}
607
608void unix_gid_cache_destroy(struct net *net)
609{
610 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
611 struct cache_detail *cd = sn->unix_gid_cache;
612
613 sn->unix_gid_cache = NULL;
614 cache_purge(cd);
615 cache_unregister_net(cd, net);
616 cache_destroy_net(cd, net);
617}
618
619static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid)
620{
621 struct unix_gid ug;
622 struct cache_head *ch;
623
624 ug.uid = uid;
625 ch = sunrpc_cache_lookup_rcu(cd, &ug.h, unix_gid_hash(uid));
626 if (ch)
627 return container_of(ch, struct unix_gid, h);
628 else
629 return NULL;
630}
631
632static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp)
633{
634 struct unix_gid *ug;
635 struct group_info *gi;
636 int ret;
637 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net,
638 sunrpc_net_id);
639
640 ug = unix_gid_lookup(sn->unix_gid_cache, uid);
641 if (!ug)
642 return ERR_PTR(-EAGAIN);
643 ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle);
644 switch (ret) {
645 case -ENOENT:
646 return ERR_PTR(-ENOENT);
647 case -ETIMEDOUT:
648 return ERR_PTR(-ESHUTDOWN);
649 case 0:
650 gi = get_group_info(ug->gi);
651 cache_put(&ug->h, sn->unix_gid_cache);
652 return gi;
653 default:
654 return ERR_PTR(-EAGAIN);
655 }
656}
657
658int
659svcauth_unix_set_client(struct svc_rqst *rqstp)
660{
661 struct sockaddr_in *sin;
662 struct sockaddr_in6 *sin6, sin6_storage;
663 struct ip_map *ipm;
664 struct group_info *gi;
665 struct svc_cred *cred = &rqstp->rq_cred;
666 struct svc_xprt *xprt = rqstp->rq_xprt;
667 struct net *net = xprt->xpt_net;
668 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
669
670 switch (rqstp->rq_addr.ss_family) {
671 case AF_INET:
672 sin = svc_addr_in(rqstp);
673 sin6 = &sin6_storage;
674 ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr);
675 break;
676 case AF_INET6:
677 sin6 = svc_addr_in6(rqstp);
678 break;
679 default:
680 BUG();
681 }
682
683 rqstp->rq_client = NULL;
684 if (rqstp->rq_proc == 0)
685 goto out;
686
687 rqstp->rq_auth_stat = rpc_autherr_badcred;
688 ipm = ip_map_cached_get(xprt);
689 if (ipm == NULL)
690 ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class,
691 &sin6->sin6_addr);
692
693 if (ipm == NULL)
694 return SVC_DENIED;
695
696 switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
697 default:
698 BUG();
699 case -ETIMEDOUT:
700 return SVC_CLOSE;
701 case -EAGAIN:
702 return SVC_DROP;
703 case -ENOENT:
704 return SVC_DENIED;
705 case 0:
706 rqstp->rq_client = &ipm->m_client->h;
707 kref_get(&rqstp->rq_client->ref);
708 ip_map_cached_put(xprt, ipm);
709 break;
710 }
711
712 gi = unix_gid_find(cred->cr_uid, rqstp);
713 switch (PTR_ERR(gi)) {
714 case -EAGAIN:
715 return SVC_DROP;
716 case -ESHUTDOWN:
717 return SVC_CLOSE;
718 case -ENOENT:
719 break;
720 default:
721 put_group_info(cred->cr_group_info);
722 cred->cr_group_info = gi;
723 }
724
725out:
726 rqstp->rq_auth_stat = rpc_auth_ok;
727 return SVC_OK;
728}
729
730EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
731
732static int
733svcauth_null_accept(struct svc_rqst *rqstp)
734{
735 struct kvec *argv = &rqstp->rq_arg.head[0];
736 struct kvec *resv = &rqstp->rq_res.head[0];
737 struct svc_cred *cred = &rqstp->rq_cred;
738
739 if (argv->iov_len < 3*4)
740 return SVC_GARBAGE;
741
742 if (svc_getu32(argv) != 0) {
743 dprintk("svc: bad null cred\n");
744 rqstp->rq_auth_stat = rpc_autherr_badcred;
745 return SVC_DENIED;
746 }
747 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
748 dprintk("svc: bad null verf\n");
749 rqstp->rq_auth_stat = rpc_autherr_badverf;
750 return SVC_DENIED;
751 }
752
753 /* Signal that mapping to nobody uid/gid is required */
754 cred->cr_uid = INVALID_UID;
755 cred->cr_gid = INVALID_GID;
756 cred->cr_group_info = groups_alloc(0);
757 if (cred->cr_group_info == NULL)
758 return SVC_CLOSE; /* kmalloc failure - client must retry */
759
760 /* Put NULL verifier */
761 svc_putnl(resv, RPC_AUTH_NULL);
762 svc_putnl(resv, 0);
763
764 rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL;
765 return SVC_OK;
766}
767
768static int
769svcauth_null_release(struct svc_rqst *rqstp)
770{
771 if (rqstp->rq_client)
772 auth_domain_put(rqstp->rq_client);
773 rqstp->rq_client = NULL;
774 if (rqstp->rq_cred.cr_group_info)
775 put_group_info(rqstp->rq_cred.cr_group_info);
776 rqstp->rq_cred.cr_group_info = NULL;
777
778 return 0; /* don't drop */
779}
780
781
782struct auth_ops svcauth_null = {
783 .name = "null",
784 .owner = THIS_MODULE,
785 .flavour = RPC_AUTH_NULL,
786 .accept = svcauth_null_accept,
787 .release = svcauth_null_release,
788 .set_client = svcauth_unix_set_client,
789};
790
791
792static int
793svcauth_tls_accept(struct svc_rqst *rqstp)
794{
795 struct svc_cred *cred = &rqstp->rq_cred;
796 struct kvec *argv = rqstp->rq_arg.head;
797 struct kvec *resv = rqstp->rq_res.head;
798
799 if (argv->iov_len < XDR_UNIT * 3)
800 return SVC_GARBAGE;
801
802 /* Call's cred length */
803 if (svc_getu32(argv) != xdr_zero) {
804 rqstp->rq_auth_stat = rpc_autherr_badcred;
805 return SVC_DENIED;
806 }
807
808 /* Call's verifier flavor and its length */
809 if (svc_getu32(argv) != rpc_auth_null ||
810 svc_getu32(argv) != xdr_zero) {
811 rqstp->rq_auth_stat = rpc_autherr_badverf;
812 return SVC_DENIED;
813 }
814
815 /* AUTH_TLS is not valid on non-NULL procedures */
816 if (rqstp->rq_proc != 0) {
817 rqstp->rq_auth_stat = rpc_autherr_badcred;
818 return SVC_DENIED;
819 }
820
821 /* Mapping to nobody uid/gid is required */
822 cred->cr_uid = INVALID_UID;
823 cred->cr_gid = INVALID_GID;
824 cred->cr_group_info = groups_alloc(0);
825 if (cred->cr_group_info == NULL)
826 return SVC_CLOSE; /* kmalloc failure - client must retry */
827
828 /* Reply's verifier */
829 svc_putnl(resv, RPC_AUTH_NULL);
830 if (rqstp->rq_xprt->xpt_ops->xpo_start_tls) {
831 svc_putnl(resv, 8);
832 memcpy(resv->iov_base + resv->iov_len, "STARTTLS", 8);
833 resv->iov_len += 8;
834 } else
835 svc_putnl(resv, 0);
836
837 rqstp->rq_cred.cr_flavor = RPC_AUTH_TLS;
838 return SVC_OK;
839}
840
841struct auth_ops svcauth_tls = {
842 .name = "tls",
843 .owner = THIS_MODULE,
844 .flavour = RPC_AUTH_TLS,
845 .accept = svcauth_tls_accept,
846 .release = svcauth_null_release,
847 .set_client = svcauth_unix_set_client,
848};
849
850
851static int
852svcauth_unix_accept(struct svc_rqst *rqstp)
853{
854 struct kvec *argv = &rqstp->rq_arg.head[0];
855 struct kvec *resv = &rqstp->rq_res.head[0];
856 struct svc_cred *cred = &rqstp->rq_cred;
857 struct user_namespace *userns;
858 u32 slen, i;
859 int len = argv->iov_len;
860
861 if ((len -= 3*4) < 0)
862 return SVC_GARBAGE;
863
864 svc_getu32(argv); /* length */
865 svc_getu32(argv); /* time stamp */
866 slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */
867 if (slen > 64 || (len -= (slen + 3)*4) < 0)
868 goto badcred;
869 argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
870 argv->iov_len -= slen*4;
871 /*
872 * Note: we skip uid_valid()/gid_valid() checks here for
873 * backwards compatibility with clients that use -1 id's.
874 * Instead, -1 uid or gid is later mapped to the
875 * (export-specific) anonymous id by nfsd_setuser.
876 * Supplementary gid's will be left alone.
877 */
878 userns = (rqstp->rq_xprt && rqstp->rq_xprt->xpt_cred) ?
879 rqstp->rq_xprt->xpt_cred->user_ns : &init_user_ns;
880 cred->cr_uid = make_kuid(userns, svc_getnl(argv)); /* uid */
881 cred->cr_gid = make_kgid(userns, svc_getnl(argv)); /* gid */
882 slen = svc_getnl(argv); /* gids length */
883 if (slen > UNX_NGROUPS || (len -= (slen + 2)*4) < 0)
884 goto badcred;
885 cred->cr_group_info = groups_alloc(slen);
886 if (cred->cr_group_info == NULL)
887 return SVC_CLOSE;
888 for (i = 0; i < slen; i++) {
889 kgid_t kgid = make_kgid(userns, svc_getnl(argv));
890 cred->cr_group_info->gid[i] = kgid;
891 }
892 groups_sort(cred->cr_group_info);
893 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
894 rqstp->rq_auth_stat = rpc_autherr_badverf;
895 return SVC_DENIED;
896 }
897
898 /* Put NULL verifier */
899 svc_putnl(resv, RPC_AUTH_NULL);
900 svc_putnl(resv, 0);
901
902 rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX;
903 return SVC_OK;
904
905badcred:
906 rqstp->rq_auth_stat = rpc_autherr_badcred;
907 return SVC_DENIED;
908}
909
910static int
911svcauth_unix_release(struct svc_rqst *rqstp)
912{
913 /* Verifier (such as it is) is already in place.
914 */
915 if (rqstp->rq_client)
916 auth_domain_put(rqstp->rq_client);
917 rqstp->rq_client = NULL;
918 if (rqstp->rq_cred.cr_group_info)
919 put_group_info(rqstp->rq_cred.cr_group_info);
920 rqstp->rq_cred.cr_group_info = NULL;
921
922 return 0;
923}
924
925
926struct auth_ops svcauth_unix = {
927 .name = "unix",
928 .owner = THIS_MODULE,
929 .flavour = RPC_AUTH_UNIX,
930 .accept = svcauth_unix_accept,
931 .release = svcauth_unix_release,
932 .domain_release = svcauth_unix_domain_release,
933 .set_client = svcauth_unix_set_client,
934};
935
936static const struct cache_detail ip_map_cache_template = {
937 .owner = THIS_MODULE,
938 .hash_size = IP_HASHMAX,
939 .name = "auth.unix.ip",
940 .cache_put = ip_map_put,
941 .cache_upcall = ip_map_upcall,
942 .cache_request = ip_map_request,
943 .cache_parse = ip_map_parse,
944 .cache_show = ip_map_show,
945 .match = ip_map_match,
946 .init = ip_map_init,
947 .update = update,
948 .alloc = ip_map_alloc,
949};
950
951int ip_map_cache_create(struct net *net)
952{
953 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
954 struct cache_detail *cd;
955 int err;
956
957 cd = cache_create_net(&ip_map_cache_template, net);
958 if (IS_ERR(cd))
959 return PTR_ERR(cd);
960 err = cache_register_net(cd, net);
961 if (err) {
962 cache_destroy_net(cd, net);
963 return err;
964 }
965 sn->ip_map_cache = cd;
966 return 0;
967}
968
969void ip_map_cache_destroy(struct net *net)
970{
971 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
972 struct cache_detail *cd = sn->ip_map_cache;
973
974 sn->ip_map_cache = NULL;
975 cache_purge(cd);
976 cache_unregister_net(cd, net);
977 cache_destroy_net(cd, net);
978}
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/types.h>
3#include <linux/sched.h>
4#include <linux/module.h>
5#include <linux/sunrpc/types.h>
6#include <linux/sunrpc/xdr.h>
7#include <linux/sunrpc/svcsock.h>
8#include <linux/sunrpc/svcauth.h>
9#include <linux/sunrpc/gss_api.h>
10#include <linux/sunrpc/addr.h>
11#include <linux/err.h>
12#include <linux/seq_file.h>
13#include <linux/hash.h>
14#include <linux/string.h>
15#include <linux/slab.h>
16#include <net/sock.h>
17#include <net/ipv6.h>
18#include <linux/kernel.h>
19#include <linux/user_namespace.h>
20#include <trace/events/sunrpc.h>
21
22#define RPCDBG_FACILITY RPCDBG_AUTH
23
24#include "netns.h"
25
26/*
27 * AUTHUNIX and AUTHNULL credentials are both handled here.
28 * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
29 * are always nobody (-2). i.e. we do the same IP address checks for
30 * AUTHNULL as for AUTHUNIX, and that is done here.
31 */
32
33
34struct unix_domain {
35 struct auth_domain h;
36 /* other stuff later */
37};
38
39extern struct auth_ops svcauth_null;
40extern struct auth_ops svcauth_unix;
41extern struct auth_ops svcauth_tls;
42
43static void svcauth_unix_domain_release_rcu(struct rcu_head *head)
44{
45 struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
46 struct unix_domain *ud = container_of(dom, struct unix_domain, h);
47
48 kfree(dom->name);
49 kfree(ud);
50}
51
52static void svcauth_unix_domain_release(struct auth_domain *dom)
53{
54 call_rcu(&dom->rcu_head, svcauth_unix_domain_release_rcu);
55}
56
57struct auth_domain *unix_domain_find(char *name)
58{
59 struct auth_domain *rv;
60 struct unix_domain *new = NULL;
61
62 rv = auth_domain_find(name);
63 while(1) {
64 if (rv) {
65 if (new && rv != &new->h)
66 svcauth_unix_domain_release(&new->h);
67
68 if (rv->flavour != &svcauth_unix) {
69 auth_domain_put(rv);
70 return NULL;
71 }
72 return rv;
73 }
74
75 new = kmalloc(sizeof(*new), GFP_KERNEL);
76 if (new == NULL)
77 return NULL;
78 kref_init(&new->h.ref);
79 new->h.name = kstrdup(name, GFP_KERNEL);
80 if (new->h.name == NULL) {
81 kfree(new);
82 return NULL;
83 }
84 new->h.flavour = &svcauth_unix;
85 rv = auth_domain_lookup(name, &new->h);
86 }
87}
88EXPORT_SYMBOL_GPL(unix_domain_find);
89
90
91/**************************************************
92 * cache for IP address to unix_domain
93 * as needed by AUTH_UNIX
94 */
95#define IP_HASHBITS 8
96#define IP_HASHMAX (1<<IP_HASHBITS)
97
98struct ip_map {
99 struct cache_head h;
100 char m_class[8]; /* e.g. "nfsd" */
101 struct in6_addr m_addr;
102 struct unix_domain *m_client;
103 struct rcu_head m_rcu;
104};
105
106static void ip_map_put(struct kref *kref)
107{
108 struct cache_head *item = container_of(kref, struct cache_head, ref);
109 struct ip_map *im = container_of(item, struct ip_map,h);
110
111 if (test_bit(CACHE_VALID, &item->flags) &&
112 !test_bit(CACHE_NEGATIVE, &item->flags))
113 auth_domain_put(&im->m_client->h);
114 kfree_rcu(im, m_rcu);
115}
116
117static inline int hash_ip6(const struct in6_addr *ip)
118{
119 return hash_32(ipv6_addr_hash(ip), IP_HASHBITS);
120}
121static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
122{
123 struct ip_map *orig = container_of(corig, struct ip_map, h);
124 struct ip_map *new = container_of(cnew, struct ip_map, h);
125 return strcmp(orig->m_class, new->m_class) == 0 &&
126 ipv6_addr_equal(&orig->m_addr, &new->m_addr);
127}
128static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
129{
130 struct ip_map *new = container_of(cnew, struct ip_map, h);
131 struct ip_map *item = container_of(citem, struct ip_map, h);
132
133 strcpy(new->m_class, item->m_class);
134 new->m_addr = item->m_addr;
135}
136static void update(struct cache_head *cnew, struct cache_head *citem)
137{
138 struct ip_map *new = container_of(cnew, struct ip_map, h);
139 struct ip_map *item = container_of(citem, struct ip_map, h);
140
141 kref_get(&item->m_client->h.ref);
142 new->m_client = item->m_client;
143}
144static struct cache_head *ip_map_alloc(void)
145{
146 struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
147 if (i)
148 return &i->h;
149 else
150 return NULL;
151}
152
153static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
154{
155 return sunrpc_cache_pipe_upcall(cd, h);
156}
157
158static void ip_map_request(struct cache_detail *cd,
159 struct cache_head *h,
160 char **bpp, int *blen)
161{
162 char text_addr[40];
163 struct ip_map *im = container_of(h, struct ip_map, h);
164
165 if (ipv6_addr_v4mapped(&(im->m_addr))) {
166 snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]);
167 } else {
168 snprintf(text_addr, 40, "%pI6", &im->m_addr);
169 }
170 qword_add(bpp, blen, im->m_class);
171 qword_add(bpp, blen, text_addr);
172 (*bpp)[-1] = '\n';
173}
174
175static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
176static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time64_t expiry);
177
178static int ip_map_parse(struct cache_detail *cd,
179 char *mesg, int mlen)
180{
181 /* class ipaddress [domainname] */
182 /* should be safe just to use the start of the input buffer
183 * for scratch: */
184 char *buf = mesg;
185 int len;
186 char class[8];
187 union {
188 struct sockaddr sa;
189 struct sockaddr_in s4;
190 struct sockaddr_in6 s6;
191 } address;
192 struct sockaddr_in6 sin6;
193 int err;
194
195 struct ip_map *ipmp;
196 struct auth_domain *dom;
197 time64_t expiry;
198
199 if (mesg[mlen-1] != '\n')
200 return -EINVAL;
201 mesg[mlen-1] = 0;
202
203 /* class */
204 len = qword_get(&mesg, class, sizeof(class));
205 if (len <= 0) return -EINVAL;
206
207 /* ip address */
208 len = qword_get(&mesg, buf, mlen);
209 if (len <= 0) return -EINVAL;
210
211 if (rpc_pton(cd->net, buf, len, &address.sa, sizeof(address)) == 0)
212 return -EINVAL;
213 switch (address.sa.sa_family) {
214 case AF_INET:
215 /* Form a mapped IPv4 address in sin6 */
216 sin6.sin6_family = AF_INET6;
217 ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
218 &sin6.sin6_addr);
219 break;
220#if IS_ENABLED(CONFIG_IPV6)
221 case AF_INET6:
222 memcpy(&sin6, &address.s6, sizeof(sin6));
223 break;
224#endif
225 default:
226 return -EINVAL;
227 }
228
229 err = get_expiry(&mesg, &expiry);
230 if (err)
231 return err;
232
233 /* domainname, or empty for NEGATIVE */
234 len = qword_get(&mesg, buf, mlen);
235 if (len < 0) return -EINVAL;
236
237 if (len) {
238 dom = unix_domain_find(buf);
239 if (dom == NULL)
240 return -ENOENT;
241 } else
242 dom = NULL;
243
244 /* IPv6 scope IDs are ignored for now */
245 ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr);
246 if (ipmp) {
247 err = __ip_map_update(cd, ipmp,
248 container_of(dom, struct unix_domain, h),
249 expiry);
250 } else
251 err = -ENOMEM;
252
253 if (dom)
254 auth_domain_put(dom);
255
256 cache_flush();
257 return err;
258}
259
260static int ip_map_show(struct seq_file *m,
261 struct cache_detail *cd,
262 struct cache_head *h)
263{
264 struct ip_map *im;
265 struct in6_addr addr;
266 char *dom = "-no-domain-";
267
268 if (h == NULL) {
269 seq_puts(m, "#class IP domain\n");
270 return 0;
271 }
272 im = container_of(h, struct ip_map, h);
273 /* class addr domain */
274 addr = im->m_addr;
275
276 if (test_bit(CACHE_VALID, &h->flags) &&
277 !test_bit(CACHE_NEGATIVE, &h->flags))
278 dom = im->m_client->h.name;
279
280 if (ipv6_addr_v4mapped(&addr)) {
281 seq_printf(m, "%s %pI4 %s\n",
282 im->m_class, &addr.s6_addr32[3], dom);
283 } else {
284 seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom);
285 }
286 return 0;
287}
288
289
290static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
291 struct in6_addr *addr)
292{
293 struct ip_map ip;
294 struct cache_head *ch;
295
296 strcpy(ip.m_class, class);
297 ip.m_addr = *addr;
298 ch = sunrpc_cache_lookup_rcu(cd, &ip.h,
299 hash_str(class, IP_HASHBITS) ^
300 hash_ip6(addr));
301
302 if (ch)
303 return container_of(ch, struct ip_map, h);
304 else
305 return NULL;
306}
307
308static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
309 struct unix_domain *udom, time64_t expiry)
310{
311 struct ip_map ip;
312 struct cache_head *ch;
313
314 ip.m_client = udom;
315 ip.h.flags = 0;
316 if (!udom)
317 set_bit(CACHE_NEGATIVE, &ip.h.flags);
318 ip.h.expiry_time = expiry;
319 ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
320 hash_str(ipm->m_class, IP_HASHBITS) ^
321 hash_ip6(&ipm->m_addr));
322 if (!ch)
323 return -ENOMEM;
324 cache_put(ch, cd);
325 return 0;
326}
327
328void svcauth_unix_purge(struct net *net)
329{
330 struct sunrpc_net *sn;
331
332 sn = net_generic(net, sunrpc_net_id);
333 cache_purge(sn->ip_map_cache);
334}
335EXPORT_SYMBOL_GPL(svcauth_unix_purge);
336
337static inline struct ip_map *
338ip_map_cached_get(struct svc_xprt *xprt)
339{
340 struct ip_map *ipm = NULL;
341 struct sunrpc_net *sn;
342
343 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
344 spin_lock(&xprt->xpt_lock);
345 ipm = xprt->xpt_auth_cache;
346 if (ipm != NULL) {
347 sn = net_generic(xprt->xpt_net, sunrpc_net_id);
348 if (cache_is_expired(sn->ip_map_cache, &ipm->h)) {
349 /*
350 * The entry has been invalidated since it was
351 * remembered, e.g. by a second mount from the
352 * same IP address.
353 */
354 xprt->xpt_auth_cache = NULL;
355 spin_unlock(&xprt->xpt_lock);
356 cache_put(&ipm->h, sn->ip_map_cache);
357 return NULL;
358 }
359 cache_get(&ipm->h);
360 }
361 spin_unlock(&xprt->xpt_lock);
362 }
363 return ipm;
364}
365
366static inline void
367ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm)
368{
369 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
370 spin_lock(&xprt->xpt_lock);
371 if (xprt->xpt_auth_cache == NULL) {
372 /* newly cached, keep the reference */
373 xprt->xpt_auth_cache = ipm;
374 ipm = NULL;
375 }
376 spin_unlock(&xprt->xpt_lock);
377 }
378 if (ipm) {
379 struct sunrpc_net *sn;
380
381 sn = net_generic(xprt->xpt_net, sunrpc_net_id);
382 cache_put(&ipm->h, sn->ip_map_cache);
383 }
384}
385
386void
387svcauth_unix_info_release(struct svc_xprt *xpt)
388{
389 struct ip_map *ipm;
390
391 ipm = xpt->xpt_auth_cache;
392 if (ipm != NULL) {
393 struct sunrpc_net *sn;
394
395 sn = net_generic(xpt->xpt_net, sunrpc_net_id);
396 cache_put(&ipm->h, sn->ip_map_cache);
397 }
398}
399
400/****************************************************************************
401 * auth.unix.gid cache
402 * simple cache to map a UID to a list of GIDs
403 * because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS
404 */
405#define GID_HASHBITS 8
406#define GID_HASHMAX (1<<GID_HASHBITS)
407
408struct unix_gid {
409 struct cache_head h;
410 kuid_t uid;
411 struct group_info *gi;
412 struct rcu_head rcu;
413};
414
415static int unix_gid_hash(kuid_t uid)
416{
417 return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
418}
419
420static void unix_gid_free(struct rcu_head *rcu)
421{
422 struct unix_gid *ug = container_of(rcu, struct unix_gid, rcu);
423 struct cache_head *item = &ug->h;
424
425 if (test_bit(CACHE_VALID, &item->flags) &&
426 !test_bit(CACHE_NEGATIVE, &item->flags))
427 put_group_info(ug->gi);
428 kfree(ug);
429}
430
431static void unix_gid_put(struct kref *kref)
432{
433 struct cache_head *item = container_of(kref, struct cache_head, ref);
434 struct unix_gid *ug = container_of(item, struct unix_gid, h);
435
436 call_rcu(&ug->rcu, unix_gid_free);
437}
438
439static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
440{
441 struct unix_gid *orig = container_of(corig, struct unix_gid, h);
442 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
443 return uid_eq(orig->uid, new->uid);
444}
445static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
446{
447 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
448 struct unix_gid *item = container_of(citem, struct unix_gid, h);
449 new->uid = item->uid;
450}
451static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
452{
453 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
454 struct unix_gid *item = container_of(citem, struct unix_gid, h);
455
456 get_group_info(item->gi);
457 new->gi = item->gi;
458}
459static struct cache_head *unix_gid_alloc(void)
460{
461 struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
462 if (g)
463 return &g->h;
464 else
465 return NULL;
466}
467
468static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
469{
470 return sunrpc_cache_pipe_upcall_timeout(cd, h);
471}
472
473static void unix_gid_request(struct cache_detail *cd,
474 struct cache_head *h,
475 char **bpp, int *blen)
476{
477 char tuid[20];
478 struct unix_gid *ug = container_of(h, struct unix_gid, h);
479
480 snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid));
481 qword_add(bpp, blen, tuid);
482 (*bpp)[-1] = '\n';
483}
484
485static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
486
487static int unix_gid_parse(struct cache_detail *cd,
488 char *mesg, int mlen)
489{
490 /* uid expiry Ngid gid0 gid1 ... gidN-1 */
491 int id;
492 kuid_t uid;
493 int gids;
494 int rv;
495 int i;
496 int err;
497 time64_t expiry;
498 struct unix_gid ug, *ugp;
499
500 if (mesg[mlen - 1] != '\n')
501 return -EINVAL;
502 mesg[mlen-1] = 0;
503
504 rv = get_int(&mesg, &id);
505 if (rv)
506 return -EINVAL;
507 uid = make_kuid(current_user_ns(), id);
508 ug.uid = uid;
509
510 err = get_expiry(&mesg, &expiry);
511 if (err)
512 return err;
513
514 rv = get_int(&mesg, &gids);
515 if (rv || gids < 0 || gids > 8192)
516 return -EINVAL;
517
518 ug.gi = groups_alloc(gids);
519 if (!ug.gi)
520 return -ENOMEM;
521
522 for (i = 0 ; i < gids ; i++) {
523 int gid;
524 kgid_t kgid;
525 rv = get_int(&mesg, &gid);
526 err = -EINVAL;
527 if (rv)
528 goto out;
529 kgid = make_kgid(current_user_ns(), gid);
530 if (!gid_valid(kgid))
531 goto out;
532 ug.gi->gid[i] = kgid;
533 }
534
535 groups_sort(ug.gi);
536 ugp = unix_gid_lookup(cd, uid);
537 if (ugp) {
538 struct cache_head *ch;
539 ug.h.flags = 0;
540 ug.h.expiry_time = expiry;
541 ch = sunrpc_cache_update(cd,
542 &ug.h, &ugp->h,
543 unix_gid_hash(uid));
544 if (!ch)
545 err = -ENOMEM;
546 else {
547 err = 0;
548 cache_put(ch, cd);
549 }
550 } else
551 err = -ENOMEM;
552 out:
553 if (ug.gi)
554 put_group_info(ug.gi);
555 return err;
556}
557
558static int unix_gid_show(struct seq_file *m,
559 struct cache_detail *cd,
560 struct cache_head *h)
561{
562 struct user_namespace *user_ns = m->file->f_cred->user_ns;
563 struct unix_gid *ug;
564 int i;
565 int glen;
566
567 if (h == NULL) {
568 seq_puts(m, "#uid cnt: gids...\n");
569 return 0;
570 }
571 ug = container_of(h, struct unix_gid, h);
572 if (test_bit(CACHE_VALID, &h->flags) &&
573 !test_bit(CACHE_NEGATIVE, &h->flags))
574 glen = ug->gi->ngroups;
575 else
576 glen = 0;
577
578 seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen);
579 for (i = 0; i < glen; i++)
580 seq_printf(m, " %d", from_kgid_munged(user_ns, ug->gi->gid[i]));
581 seq_printf(m, "\n");
582 return 0;
583}
584
585static const struct cache_detail unix_gid_cache_template = {
586 .owner = THIS_MODULE,
587 .hash_size = GID_HASHMAX,
588 .name = "auth.unix.gid",
589 .cache_put = unix_gid_put,
590 .cache_upcall = unix_gid_upcall,
591 .cache_request = unix_gid_request,
592 .cache_parse = unix_gid_parse,
593 .cache_show = unix_gid_show,
594 .match = unix_gid_match,
595 .init = unix_gid_init,
596 .update = unix_gid_update,
597 .alloc = unix_gid_alloc,
598};
599
600int unix_gid_cache_create(struct net *net)
601{
602 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
603 struct cache_detail *cd;
604 int err;
605
606 cd = cache_create_net(&unix_gid_cache_template, net);
607 if (IS_ERR(cd))
608 return PTR_ERR(cd);
609 err = cache_register_net(cd, net);
610 if (err) {
611 cache_destroy_net(cd, net);
612 return err;
613 }
614 sn->unix_gid_cache = cd;
615 return 0;
616}
617
618void unix_gid_cache_destroy(struct net *net)
619{
620 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
621 struct cache_detail *cd = sn->unix_gid_cache;
622
623 sn->unix_gid_cache = NULL;
624 cache_purge(cd);
625 cache_unregister_net(cd, net);
626 cache_destroy_net(cd, net);
627}
628
629static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid)
630{
631 struct unix_gid ug;
632 struct cache_head *ch;
633
634 ug.uid = uid;
635 ch = sunrpc_cache_lookup_rcu(cd, &ug.h, unix_gid_hash(uid));
636 if (ch)
637 return container_of(ch, struct unix_gid, h);
638 else
639 return NULL;
640}
641
642static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp)
643{
644 struct unix_gid *ug;
645 struct group_info *gi;
646 int ret;
647 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net,
648 sunrpc_net_id);
649
650 ug = unix_gid_lookup(sn->unix_gid_cache, uid);
651 if (!ug)
652 return ERR_PTR(-EAGAIN);
653 ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle);
654 switch (ret) {
655 case -ENOENT:
656 return ERR_PTR(-ENOENT);
657 case -ETIMEDOUT:
658 return ERR_PTR(-ESHUTDOWN);
659 case 0:
660 gi = get_group_info(ug->gi);
661 cache_put(&ug->h, sn->unix_gid_cache);
662 return gi;
663 default:
664 return ERR_PTR(-EAGAIN);
665 }
666}
667
668enum svc_auth_status
669svcauth_unix_set_client(struct svc_rqst *rqstp)
670{
671 struct sockaddr_in *sin;
672 struct sockaddr_in6 *sin6, sin6_storage;
673 struct ip_map *ipm;
674 struct group_info *gi;
675 struct svc_cred *cred = &rqstp->rq_cred;
676 struct svc_xprt *xprt = rqstp->rq_xprt;
677 struct net *net = xprt->xpt_net;
678 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
679
680 switch (rqstp->rq_addr.ss_family) {
681 case AF_INET:
682 sin = svc_addr_in(rqstp);
683 sin6 = &sin6_storage;
684 ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr);
685 break;
686 case AF_INET6:
687 sin6 = svc_addr_in6(rqstp);
688 break;
689 default:
690 BUG();
691 }
692
693 rqstp->rq_client = NULL;
694 if (rqstp->rq_proc == 0)
695 goto out;
696
697 rqstp->rq_auth_stat = rpc_autherr_badcred;
698 ipm = ip_map_cached_get(xprt);
699 if (ipm == NULL)
700 ipm = __ip_map_lookup(sn->ip_map_cache,
701 rqstp->rq_server->sv_programs->pg_class,
702 &sin6->sin6_addr);
703
704 if (ipm == NULL)
705 return SVC_DENIED;
706
707 switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
708 default:
709 BUG();
710 case -ETIMEDOUT:
711 return SVC_CLOSE;
712 case -EAGAIN:
713 return SVC_DROP;
714 case -ENOENT:
715 return SVC_DENIED;
716 case 0:
717 rqstp->rq_client = &ipm->m_client->h;
718 kref_get(&rqstp->rq_client->ref);
719 ip_map_cached_put(xprt, ipm);
720 break;
721 }
722
723 gi = unix_gid_find(cred->cr_uid, rqstp);
724 switch (PTR_ERR(gi)) {
725 case -EAGAIN:
726 return SVC_DROP;
727 case -ESHUTDOWN:
728 return SVC_CLOSE;
729 case -ENOENT:
730 break;
731 default:
732 put_group_info(cred->cr_group_info);
733 cred->cr_group_info = gi;
734 }
735
736out:
737 rqstp->rq_auth_stat = rpc_auth_ok;
738 return SVC_OK;
739}
740EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
741
742/**
743 * svcauth_null_accept - Decode and validate incoming RPC_AUTH_NULL credential
744 * @rqstp: RPC transaction
745 *
746 * Return values:
747 * %SVC_OK: Both credential and verifier are valid
748 * %SVC_DENIED: Credential or verifier is not valid
749 * %SVC_GARBAGE: Failed to decode credential or verifier
750 * %SVC_CLOSE: Temporary failure
751 *
752 * rqstp->rq_auth_stat is set as mandated by RFC 5531.
753 */
754static enum svc_auth_status
755svcauth_null_accept(struct svc_rqst *rqstp)
756{
757 struct xdr_stream *xdr = &rqstp->rq_arg_stream;
758 struct svc_cred *cred = &rqstp->rq_cred;
759 u32 flavor, len;
760 void *body;
761
762 /* Length of Call's credential body field: */
763 if (xdr_stream_decode_u32(xdr, &len) < 0)
764 return SVC_GARBAGE;
765 if (len != 0) {
766 rqstp->rq_auth_stat = rpc_autherr_badcred;
767 return SVC_DENIED;
768 }
769
770 /* Call's verf field: */
771 if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
772 return SVC_GARBAGE;
773 if (flavor != RPC_AUTH_NULL || len != 0) {
774 rqstp->rq_auth_stat = rpc_autherr_badverf;
775 return SVC_DENIED;
776 }
777
778 /* Signal that mapping to nobody uid/gid is required */
779 cred->cr_uid = INVALID_UID;
780 cred->cr_gid = INVALID_GID;
781 cred->cr_group_info = groups_alloc(0);
782 if (cred->cr_group_info == NULL)
783 return SVC_CLOSE; /* kmalloc failure - client must retry */
784
785 if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream,
786 RPC_AUTH_NULL, NULL, 0) < 0)
787 return SVC_CLOSE;
788 if (!svcxdr_set_accept_stat(rqstp))
789 return SVC_CLOSE;
790
791 rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL;
792 return SVC_OK;
793}
794
795static int
796svcauth_null_release(struct svc_rqst *rqstp)
797{
798 if (rqstp->rq_client)
799 auth_domain_put(rqstp->rq_client);
800 rqstp->rq_client = NULL;
801 if (rqstp->rq_cred.cr_group_info)
802 put_group_info(rqstp->rq_cred.cr_group_info);
803 rqstp->rq_cred.cr_group_info = NULL;
804
805 return 0; /* don't drop */
806}
807
808
809struct auth_ops svcauth_null = {
810 .name = "null",
811 .owner = THIS_MODULE,
812 .flavour = RPC_AUTH_NULL,
813 .accept = svcauth_null_accept,
814 .release = svcauth_null_release,
815 .set_client = svcauth_unix_set_client,
816};
817
818
819/**
820 * svcauth_tls_accept - Decode and validate incoming RPC_AUTH_TLS credential
821 * @rqstp: RPC transaction
822 *
823 * Return values:
824 * %SVC_OK: Both credential and verifier are valid
825 * %SVC_DENIED: Credential or verifier is not valid
826 * %SVC_GARBAGE: Failed to decode credential or verifier
827 * %SVC_CLOSE: Temporary failure
828 *
829 * rqstp->rq_auth_stat is set as mandated by RFC 5531.
830 */
831static enum svc_auth_status
832svcauth_tls_accept(struct svc_rqst *rqstp)
833{
834 struct xdr_stream *xdr = &rqstp->rq_arg_stream;
835 struct svc_cred *cred = &rqstp->rq_cred;
836 struct svc_xprt *xprt = rqstp->rq_xprt;
837 u32 flavor, len;
838 void *body;
839 __be32 *p;
840
841 /* Length of Call's credential body field: */
842 if (xdr_stream_decode_u32(xdr, &len) < 0)
843 return SVC_GARBAGE;
844 if (len != 0) {
845 rqstp->rq_auth_stat = rpc_autherr_badcred;
846 return SVC_DENIED;
847 }
848
849 /* Call's verf field: */
850 if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
851 return SVC_GARBAGE;
852 if (flavor != RPC_AUTH_NULL || len != 0) {
853 rqstp->rq_auth_stat = rpc_autherr_badverf;
854 return SVC_DENIED;
855 }
856
857 /* AUTH_TLS is not valid on non-NULL procedures */
858 if (rqstp->rq_proc != 0) {
859 rqstp->rq_auth_stat = rpc_autherr_badcred;
860 return SVC_DENIED;
861 }
862
863 /* Signal that mapping to nobody uid/gid is required */
864 cred->cr_uid = INVALID_UID;
865 cred->cr_gid = INVALID_GID;
866 cred->cr_group_info = groups_alloc(0);
867 if (cred->cr_group_info == NULL)
868 return SVC_CLOSE;
869
870 if (xprt->xpt_ops->xpo_handshake) {
871 p = xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2 + 8);
872 if (!p)
873 return SVC_CLOSE;
874 trace_svc_tls_start(xprt);
875 *p++ = rpc_auth_null;
876 *p++ = cpu_to_be32(8);
877 memcpy(p, "STARTTLS", 8);
878
879 set_bit(XPT_HANDSHAKE, &xprt->xpt_flags);
880 svc_xprt_enqueue(xprt);
881 } else {
882 trace_svc_tls_unavailable(xprt);
883 if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream,
884 RPC_AUTH_NULL, NULL, 0) < 0)
885 return SVC_CLOSE;
886 }
887 if (!svcxdr_set_accept_stat(rqstp))
888 return SVC_CLOSE;
889
890 rqstp->rq_cred.cr_flavor = RPC_AUTH_TLS;
891 return SVC_OK;
892}
893
894struct auth_ops svcauth_tls = {
895 .name = "tls",
896 .owner = THIS_MODULE,
897 .flavour = RPC_AUTH_TLS,
898 .accept = svcauth_tls_accept,
899 .release = svcauth_null_release,
900 .set_client = svcauth_unix_set_client,
901};
902
903
904/**
905 * svcauth_unix_accept - Decode and validate incoming RPC_AUTH_SYS credential
906 * @rqstp: RPC transaction
907 *
908 * Return values:
909 * %SVC_OK: Both credential and verifier are valid
910 * %SVC_DENIED: Credential or verifier is not valid
911 * %SVC_GARBAGE: Failed to decode credential or verifier
912 * %SVC_CLOSE: Temporary failure
913 *
914 * rqstp->rq_auth_stat is set as mandated by RFC 5531.
915 */
916static enum svc_auth_status
917svcauth_unix_accept(struct svc_rqst *rqstp)
918{
919 struct xdr_stream *xdr = &rqstp->rq_arg_stream;
920 struct svc_cred *cred = &rqstp->rq_cred;
921 struct user_namespace *userns;
922 u32 flavor, len, i;
923 void *body;
924 __be32 *p;
925
926 /*
927 * This implementation ignores the length of the Call's
928 * credential body field and the timestamp and machinename
929 * fields.
930 */
931 p = xdr_inline_decode(xdr, XDR_UNIT * 3);
932 if (!p)
933 return SVC_GARBAGE;
934 len = be32_to_cpup(p + 2);
935 if (len > RPC_MAX_MACHINENAME)
936 return SVC_GARBAGE;
937 if (!xdr_inline_decode(xdr, len))
938 return SVC_GARBAGE;
939
940 /*
941 * Note: we skip uid_valid()/gid_valid() checks here for
942 * backwards compatibility with clients that use -1 id's.
943 * Instead, -1 uid or gid is later mapped to the
944 * (export-specific) anonymous id by nfsd_setuser.
945 * Supplementary gid's will be left alone.
946 */
947 userns = (rqstp->rq_xprt && rqstp->rq_xprt->xpt_cred) ?
948 rqstp->rq_xprt->xpt_cred->user_ns : &init_user_ns;
949 if (xdr_stream_decode_u32(xdr, &i) < 0)
950 return SVC_GARBAGE;
951 cred->cr_uid = make_kuid(userns, i);
952 if (xdr_stream_decode_u32(xdr, &i) < 0)
953 return SVC_GARBAGE;
954 cred->cr_gid = make_kgid(userns, i);
955
956 if (xdr_stream_decode_u32(xdr, &len) < 0)
957 return SVC_GARBAGE;
958 if (len > UNX_NGROUPS)
959 goto badcred;
960 p = xdr_inline_decode(xdr, XDR_UNIT * len);
961 if (!p)
962 return SVC_GARBAGE;
963 cred->cr_group_info = groups_alloc(len);
964 if (cred->cr_group_info == NULL)
965 return SVC_CLOSE;
966 for (i = 0; i < len; i++) {
967 kgid_t kgid = make_kgid(userns, be32_to_cpup(p++));
968 cred->cr_group_info->gid[i] = kgid;
969 }
970 groups_sort(cred->cr_group_info);
971
972 /* Call's verf field: */
973 if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
974 return SVC_GARBAGE;
975 if (flavor != RPC_AUTH_NULL || len != 0) {
976 rqstp->rq_auth_stat = rpc_autherr_badverf;
977 return SVC_DENIED;
978 }
979
980 if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream,
981 RPC_AUTH_NULL, NULL, 0) < 0)
982 return SVC_CLOSE;
983 if (!svcxdr_set_accept_stat(rqstp))
984 return SVC_CLOSE;
985
986 rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX;
987 return SVC_OK;
988
989badcred:
990 rqstp->rq_auth_stat = rpc_autherr_badcred;
991 return SVC_DENIED;
992}
993
994static int
995svcauth_unix_release(struct svc_rqst *rqstp)
996{
997 /* Verifier (such as it is) is already in place.
998 */
999 if (rqstp->rq_client)
1000 auth_domain_put(rqstp->rq_client);
1001 rqstp->rq_client = NULL;
1002 if (rqstp->rq_cred.cr_group_info)
1003 put_group_info(rqstp->rq_cred.cr_group_info);
1004 rqstp->rq_cred.cr_group_info = NULL;
1005
1006 return 0;
1007}
1008
1009
1010struct auth_ops svcauth_unix = {
1011 .name = "unix",
1012 .owner = THIS_MODULE,
1013 .flavour = RPC_AUTH_UNIX,
1014 .accept = svcauth_unix_accept,
1015 .release = svcauth_unix_release,
1016 .domain_release = svcauth_unix_domain_release,
1017 .set_client = svcauth_unix_set_client,
1018};
1019
1020static const struct cache_detail ip_map_cache_template = {
1021 .owner = THIS_MODULE,
1022 .hash_size = IP_HASHMAX,
1023 .name = "auth.unix.ip",
1024 .cache_put = ip_map_put,
1025 .cache_upcall = ip_map_upcall,
1026 .cache_request = ip_map_request,
1027 .cache_parse = ip_map_parse,
1028 .cache_show = ip_map_show,
1029 .match = ip_map_match,
1030 .init = ip_map_init,
1031 .update = update,
1032 .alloc = ip_map_alloc,
1033};
1034
1035int ip_map_cache_create(struct net *net)
1036{
1037 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1038 struct cache_detail *cd;
1039 int err;
1040
1041 cd = cache_create_net(&ip_map_cache_template, net);
1042 if (IS_ERR(cd))
1043 return PTR_ERR(cd);
1044 err = cache_register_net(cd, net);
1045 if (err) {
1046 cache_destroy_net(cd, net);
1047 return err;
1048 }
1049 sn->ip_map_cache = cd;
1050 return 0;
1051}
1052
1053void ip_map_cache_destroy(struct net *net)
1054{
1055 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1056 struct cache_detail *cd = sn->ip_map_cache;
1057
1058 sn->ip_map_cache = NULL;
1059 cache_purge(cd);
1060 cache_unregister_net(cd, net);
1061 cache_destroy_net(cd, net);
1062}