Loading...
1#include <linux/ceph/ceph_debug.h>
2
3#include "super.h"
4#include "mds_client.h"
5
6#include <linux/ceph/decode.h>
7
8#include <linux/xattr.h>
9#include <linux/slab.h>
10
11static bool ceph_is_valid_xattr(const char *name)
12{
13 return !strncmp(name, "ceph.", 5) ||
14 !strncmp(name, XATTR_SECURITY_PREFIX,
15 XATTR_SECURITY_PREFIX_LEN) ||
16 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
17 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
18}
19
20/*
21 * These define virtual xattrs exposing the recursive directory
22 * statistics and layout metadata.
23 */
24struct ceph_vxattr_cb {
25 bool readonly;
26 char *name;
27 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
28 size_t size);
29};
30
31/* directories */
32
33static size_t ceph_vxattrcb_entries(struct ceph_inode_info *ci, char *val,
34 size_t size)
35{
36 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
37}
38
39static size_t ceph_vxattrcb_files(struct ceph_inode_info *ci, char *val,
40 size_t size)
41{
42 return snprintf(val, size, "%lld", ci->i_files);
43}
44
45static size_t ceph_vxattrcb_subdirs(struct ceph_inode_info *ci, char *val,
46 size_t size)
47{
48 return snprintf(val, size, "%lld", ci->i_subdirs);
49}
50
51static size_t ceph_vxattrcb_rentries(struct ceph_inode_info *ci, char *val,
52 size_t size)
53{
54 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
55}
56
57static size_t ceph_vxattrcb_rfiles(struct ceph_inode_info *ci, char *val,
58 size_t size)
59{
60 return snprintf(val, size, "%lld", ci->i_rfiles);
61}
62
63static size_t ceph_vxattrcb_rsubdirs(struct ceph_inode_info *ci, char *val,
64 size_t size)
65{
66 return snprintf(val, size, "%lld", ci->i_rsubdirs);
67}
68
69static size_t ceph_vxattrcb_rbytes(struct ceph_inode_info *ci, char *val,
70 size_t size)
71{
72 return snprintf(val, size, "%lld", ci->i_rbytes);
73}
74
75static size_t ceph_vxattrcb_rctime(struct ceph_inode_info *ci, char *val,
76 size_t size)
77{
78 return snprintf(val, size, "%ld.%ld", (long)ci->i_rctime.tv_sec,
79 (long)ci->i_rctime.tv_nsec);
80}
81
82static struct ceph_vxattr_cb ceph_dir_vxattrs[] = {
83 { true, "ceph.dir.entries", ceph_vxattrcb_entries},
84 { true, "ceph.dir.files", ceph_vxattrcb_files},
85 { true, "ceph.dir.subdirs", ceph_vxattrcb_subdirs},
86 { true, "ceph.dir.rentries", ceph_vxattrcb_rentries},
87 { true, "ceph.dir.rfiles", ceph_vxattrcb_rfiles},
88 { true, "ceph.dir.rsubdirs", ceph_vxattrcb_rsubdirs},
89 { true, "ceph.dir.rbytes", ceph_vxattrcb_rbytes},
90 { true, "ceph.dir.rctime", ceph_vxattrcb_rctime},
91 { true, NULL, NULL }
92};
93
94/* files */
95
96static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
97 size_t size)
98{
99 int ret;
100
101 ret = snprintf(val, size,
102 "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
103 (unsigned long long)ceph_file_layout_su(ci->i_layout),
104 (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
105 (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
106 if (ceph_file_layout_pg_preferred(ci->i_layout))
107 ret += snprintf(val + ret, size, "preferred_osd=%lld\n",
108 (unsigned long long)ceph_file_layout_pg_preferred(
109 ci->i_layout));
110 return ret;
111}
112
113static struct ceph_vxattr_cb ceph_file_vxattrs[] = {
114 { true, "ceph.layout", ceph_vxattrcb_layout},
115 { NULL, NULL }
116};
117
118static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode)
119{
120 if (S_ISDIR(inode->i_mode))
121 return ceph_dir_vxattrs;
122 else if (S_ISREG(inode->i_mode))
123 return ceph_file_vxattrs;
124 return NULL;
125}
126
127static struct ceph_vxattr_cb *ceph_match_vxattr(struct ceph_vxattr_cb *vxattr,
128 const char *name)
129{
130 do {
131 if (strcmp(vxattr->name, name) == 0)
132 return vxattr;
133 vxattr++;
134 } while (vxattr->name);
135 return NULL;
136}
137
138static int __set_xattr(struct ceph_inode_info *ci,
139 const char *name, int name_len,
140 const char *val, int val_len,
141 int dirty,
142 int should_free_name, int should_free_val,
143 struct ceph_inode_xattr **newxattr)
144{
145 struct rb_node **p;
146 struct rb_node *parent = NULL;
147 struct ceph_inode_xattr *xattr = NULL;
148 int c;
149 int new = 0;
150
151 p = &ci->i_xattrs.index.rb_node;
152 while (*p) {
153 parent = *p;
154 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
155 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
156 if (c < 0)
157 p = &(*p)->rb_left;
158 else if (c > 0)
159 p = &(*p)->rb_right;
160 else {
161 if (name_len == xattr->name_len)
162 break;
163 else if (name_len < xattr->name_len)
164 p = &(*p)->rb_left;
165 else
166 p = &(*p)->rb_right;
167 }
168 xattr = NULL;
169 }
170
171 if (!xattr) {
172 new = 1;
173 xattr = *newxattr;
174 xattr->name = name;
175 xattr->name_len = name_len;
176 xattr->should_free_name = should_free_name;
177
178 ci->i_xattrs.count++;
179 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
180 } else {
181 kfree(*newxattr);
182 *newxattr = NULL;
183 if (xattr->should_free_val)
184 kfree((void *)xattr->val);
185
186 if (should_free_name) {
187 kfree((void *)name);
188 name = xattr->name;
189 }
190 ci->i_xattrs.names_size -= xattr->name_len;
191 ci->i_xattrs.vals_size -= xattr->val_len;
192 }
193 ci->i_xattrs.names_size += name_len;
194 ci->i_xattrs.vals_size += val_len;
195 if (val)
196 xattr->val = val;
197 else
198 xattr->val = "";
199
200 xattr->val_len = val_len;
201 xattr->dirty = dirty;
202 xattr->should_free_val = (val && should_free_val);
203
204 if (new) {
205 rb_link_node(&xattr->node, parent, p);
206 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
207 dout("__set_xattr_val p=%p\n", p);
208 }
209
210 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
211 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
212
213 return 0;
214}
215
216static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
217 const char *name)
218{
219 struct rb_node **p;
220 struct rb_node *parent = NULL;
221 struct ceph_inode_xattr *xattr = NULL;
222 int name_len = strlen(name);
223 int c;
224
225 p = &ci->i_xattrs.index.rb_node;
226 while (*p) {
227 parent = *p;
228 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
229 c = strncmp(name, xattr->name, xattr->name_len);
230 if (c == 0 && name_len > xattr->name_len)
231 c = 1;
232 if (c < 0)
233 p = &(*p)->rb_left;
234 else if (c > 0)
235 p = &(*p)->rb_right;
236 else {
237 dout("__get_xattr %s: found %.*s\n", name,
238 xattr->val_len, xattr->val);
239 return xattr;
240 }
241 }
242
243 dout("__get_xattr %s: not found\n", name);
244
245 return NULL;
246}
247
248static void __free_xattr(struct ceph_inode_xattr *xattr)
249{
250 BUG_ON(!xattr);
251
252 if (xattr->should_free_name)
253 kfree((void *)xattr->name);
254 if (xattr->should_free_val)
255 kfree((void *)xattr->val);
256
257 kfree(xattr);
258}
259
260static int __remove_xattr(struct ceph_inode_info *ci,
261 struct ceph_inode_xattr *xattr)
262{
263 if (!xattr)
264 return -EOPNOTSUPP;
265
266 rb_erase(&xattr->node, &ci->i_xattrs.index);
267
268 if (xattr->should_free_name)
269 kfree((void *)xattr->name);
270 if (xattr->should_free_val)
271 kfree((void *)xattr->val);
272
273 ci->i_xattrs.names_size -= xattr->name_len;
274 ci->i_xattrs.vals_size -= xattr->val_len;
275 ci->i_xattrs.count--;
276 kfree(xattr);
277
278 return 0;
279}
280
281static int __remove_xattr_by_name(struct ceph_inode_info *ci,
282 const char *name)
283{
284 struct rb_node **p;
285 struct ceph_inode_xattr *xattr;
286 int err;
287
288 p = &ci->i_xattrs.index.rb_node;
289 xattr = __get_xattr(ci, name);
290 err = __remove_xattr(ci, xattr);
291 return err;
292}
293
294static char *__copy_xattr_names(struct ceph_inode_info *ci,
295 char *dest)
296{
297 struct rb_node *p;
298 struct ceph_inode_xattr *xattr = NULL;
299
300 p = rb_first(&ci->i_xattrs.index);
301 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
302
303 while (p) {
304 xattr = rb_entry(p, struct ceph_inode_xattr, node);
305 memcpy(dest, xattr->name, xattr->name_len);
306 dest[xattr->name_len] = '\0';
307
308 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
309 xattr->name_len, ci->i_xattrs.names_size);
310
311 dest += xattr->name_len + 1;
312 p = rb_next(p);
313 }
314
315 return dest;
316}
317
318void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
319{
320 struct rb_node *p, *tmp;
321 struct ceph_inode_xattr *xattr = NULL;
322
323 p = rb_first(&ci->i_xattrs.index);
324
325 dout("__ceph_destroy_xattrs p=%p\n", p);
326
327 while (p) {
328 xattr = rb_entry(p, struct ceph_inode_xattr, node);
329 tmp = p;
330 p = rb_next(tmp);
331 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
332 xattr->name_len, xattr->name);
333 rb_erase(tmp, &ci->i_xattrs.index);
334
335 __free_xattr(xattr);
336 }
337
338 ci->i_xattrs.names_size = 0;
339 ci->i_xattrs.vals_size = 0;
340 ci->i_xattrs.index_version = 0;
341 ci->i_xattrs.count = 0;
342 ci->i_xattrs.index = RB_ROOT;
343}
344
345static int __build_xattrs(struct inode *inode)
346 __releases(inode->i_lock)
347 __acquires(inode->i_lock)
348{
349 u32 namelen;
350 u32 numattr = 0;
351 void *p, *end;
352 u32 len;
353 const char *name, *val;
354 struct ceph_inode_info *ci = ceph_inode(inode);
355 int xattr_version;
356 struct ceph_inode_xattr **xattrs = NULL;
357 int err = 0;
358 int i;
359
360 dout("__build_xattrs() len=%d\n",
361 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
362
363 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
364 return 0; /* already built */
365
366 __ceph_destroy_xattrs(ci);
367
368start:
369 /* updated internal xattr rb tree */
370 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
371 p = ci->i_xattrs.blob->vec.iov_base;
372 end = p + ci->i_xattrs.blob->vec.iov_len;
373 ceph_decode_32_safe(&p, end, numattr, bad);
374 xattr_version = ci->i_xattrs.version;
375 spin_unlock(&inode->i_lock);
376
377 xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
378 GFP_NOFS);
379 err = -ENOMEM;
380 if (!xattrs)
381 goto bad_lock;
382 memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
383 for (i = 0; i < numattr; i++) {
384 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
385 GFP_NOFS);
386 if (!xattrs[i])
387 goto bad_lock;
388 }
389
390 spin_lock(&inode->i_lock);
391 if (ci->i_xattrs.version != xattr_version) {
392 /* lost a race, retry */
393 for (i = 0; i < numattr; i++)
394 kfree(xattrs[i]);
395 kfree(xattrs);
396 goto start;
397 }
398 err = -EIO;
399 while (numattr--) {
400 ceph_decode_32_safe(&p, end, len, bad);
401 namelen = len;
402 name = p;
403 p += len;
404 ceph_decode_32_safe(&p, end, len, bad);
405 val = p;
406 p += len;
407
408 err = __set_xattr(ci, name, namelen, val, len,
409 0, 0, 0, &xattrs[numattr]);
410
411 if (err < 0)
412 goto bad;
413 }
414 kfree(xattrs);
415 }
416 ci->i_xattrs.index_version = ci->i_xattrs.version;
417 ci->i_xattrs.dirty = false;
418
419 return err;
420bad_lock:
421 spin_lock(&inode->i_lock);
422bad:
423 if (xattrs) {
424 for (i = 0; i < numattr; i++)
425 kfree(xattrs[i]);
426 kfree(xattrs);
427 }
428 ci->i_xattrs.names_size = 0;
429 return err;
430}
431
432static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
433 int val_size)
434{
435 /*
436 * 4 bytes for the length, and additional 4 bytes per each xattr name,
437 * 4 bytes per each value
438 */
439 int size = 4 + ci->i_xattrs.count*(4 + 4) +
440 ci->i_xattrs.names_size +
441 ci->i_xattrs.vals_size;
442 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
443 ci->i_xattrs.count, ci->i_xattrs.names_size,
444 ci->i_xattrs.vals_size);
445
446 if (name_size)
447 size += 4 + 4 + name_size + val_size;
448
449 return size;
450}
451
452/*
453 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
454 * and swap into place.
455 */
456void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
457{
458 struct rb_node *p;
459 struct ceph_inode_xattr *xattr = NULL;
460 void *dest;
461
462 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
463 if (ci->i_xattrs.dirty) {
464 int need = __get_required_blob_size(ci, 0, 0);
465
466 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
467
468 p = rb_first(&ci->i_xattrs.index);
469 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
470
471 ceph_encode_32(&dest, ci->i_xattrs.count);
472 while (p) {
473 xattr = rb_entry(p, struct ceph_inode_xattr, node);
474
475 ceph_encode_32(&dest, xattr->name_len);
476 memcpy(dest, xattr->name, xattr->name_len);
477 dest += xattr->name_len;
478 ceph_encode_32(&dest, xattr->val_len);
479 memcpy(dest, xattr->val, xattr->val_len);
480 dest += xattr->val_len;
481
482 p = rb_next(p);
483 }
484
485 /* adjust buffer len; it may be larger than we need */
486 ci->i_xattrs.prealloc_blob->vec.iov_len =
487 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
488
489 if (ci->i_xattrs.blob)
490 ceph_buffer_put(ci->i_xattrs.blob);
491 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
492 ci->i_xattrs.prealloc_blob = NULL;
493 ci->i_xattrs.dirty = false;
494 ci->i_xattrs.version++;
495 }
496}
497
498ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
499 size_t size)
500{
501 struct inode *inode = dentry->d_inode;
502 struct ceph_inode_info *ci = ceph_inode(inode);
503 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
504 int err;
505 struct ceph_inode_xattr *xattr;
506 struct ceph_vxattr_cb *vxattr = NULL;
507
508 if (!ceph_is_valid_xattr(name))
509 return -ENODATA;
510
511 /* let's see if a virtual xattr was requested */
512 if (vxattrs)
513 vxattr = ceph_match_vxattr(vxattrs, name);
514
515 spin_lock(&inode->i_lock);
516 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
517 ci->i_xattrs.version, ci->i_xattrs.index_version);
518
519 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
520 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
521 goto get_xattr;
522 } else {
523 spin_unlock(&inode->i_lock);
524 /* get xattrs from mds (if we don't already have them) */
525 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
526 if (err)
527 return err;
528 }
529
530 spin_lock(&inode->i_lock);
531
532 if (vxattr && vxattr->readonly) {
533 err = vxattr->getxattr_cb(ci, value, size);
534 goto out;
535 }
536
537 err = __build_xattrs(inode);
538 if (err < 0)
539 goto out;
540
541get_xattr:
542 err = -ENODATA; /* == ENOATTR */
543 xattr = __get_xattr(ci, name);
544 if (!xattr) {
545 if (vxattr)
546 err = vxattr->getxattr_cb(ci, value, size);
547 goto out;
548 }
549
550 err = -ERANGE;
551 if (size && size < xattr->val_len)
552 goto out;
553
554 err = xattr->val_len;
555 if (size == 0)
556 goto out;
557
558 memcpy(value, xattr->val, xattr->val_len);
559
560out:
561 spin_unlock(&inode->i_lock);
562 return err;
563}
564
565ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
566{
567 struct inode *inode = dentry->d_inode;
568 struct ceph_inode_info *ci = ceph_inode(inode);
569 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
570 u32 vir_namelen = 0;
571 u32 namelen;
572 int err;
573 u32 len;
574 int i;
575
576 spin_lock(&inode->i_lock);
577 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
578 ci->i_xattrs.version, ci->i_xattrs.index_version);
579
580 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
581 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
582 goto list_xattr;
583 } else {
584 spin_unlock(&inode->i_lock);
585 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
586 if (err)
587 return err;
588 }
589
590 spin_lock(&inode->i_lock);
591
592 err = __build_xattrs(inode);
593 if (err < 0)
594 goto out;
595
596list_xattr:
597 vir_namelen = 0;
598 /* include virtual dir xattrs */
599 if (vxattrs)
600 for (i = 0; vxattrs[i].name; i++)
601 vir_namelen += strlen(vxattrs[i].name) + 1;
602 /* adding 1 byte per each variable due to the null termination */
603 namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
604 err = -ERANGE;
605 if (size && namelen > size)
606 goto out;
607
608 err = namelen;
609 if (size == 0)
610 goto out;
611
612 names = __copy_xattr_names(ci, names);
613
614 /* virtual xattr names, too */
615 if (vxattrs)
616 for (i = 0; vxattrs[i].name; i++) {
617 len = sprintf(names, "%s", vxattrs[i].name);
618 names += len + 1;
619 }
620
621out:
622 spin_unlock(&inode->i_lock);
623 return err;
624}
625
626static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
627 const char *value, size_t size, int flags)
628{
629 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
630 struct inode *inode = dentry->d_inode;
631 struct ceph_inode_info *ci = ceph_inode(inode);
632 struct inode *parent_inode;
633 struct ceph_mds_request *req;
634 struct ceph_mds_client *mdsc = fsc->mdsc;
635 int err;
636 int i, nr_pages;
637 struct page **pages = NULL;
638 void *kaddr;
639
640 /* copy value into some pages */
641 nr_pages = calc_pages_for(0, size);
642 if (nr_pages) {
643 pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
644 if (!pages)
645 return -ENOMEM;
646 err = -ENOMEM;
647 for (i = 0; i < nr_pages; i++) {
648 pages[i] = __page_cache_alloc(GFP_NOFS);
649 if (!pages[i]) {
650 nr_pages = i;
651 goto out;
652 }
653 kaddr = kmap(pages[i]);
654 memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
655 min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
656 }
657 }
658
659 dout("setxattr value=%.*s\n", (int)size, value);
660
661 /* do request */
662 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
663 USE_AUTH_MDS);
664 if (IS_ERR(req)) {
665 err = PTR_ERR(req);
666 goto out;
667 }
668 req->r_inode = inode;
669 ihold(inode);
670 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
671 req->r_num_caps = 1;
672 req->r_args.setxattr.flags = cpu_to_le32(flags);
673 req->r_path2 = kstrdup(name, GFP_NOFS);
674
675 req->r_pages = pages;
676 req->r_num_pages = nr_pages;
677 req->r_data_len = size;
678
679 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
680 parent_inode = ceph_get_dentry_parent_inode(dentry);
681 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
682 iput(parent_inode);
683 ceph_mdsc_put_request(req);
684 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
685
686out:
687 if (pages) {
688 for (i = 0; i < nr_pages; i++)
689 __free_page(pages[i]);
690 kfree(pages);
691 }
692 return err;
693}
694
695int ceph_setxattr(struct dentry *dentry, const char *name,
696 const void *value, size_t size, int flags)
697{
698 struct inode *inode = dentry->d_inode;
699 struct ceph_inode_info *ci = ceph_inode(inode);
700 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
701 int err;
702 int name_len = strlen(name);
703 int val_len = size;
704 char *newname = NULL;
705 char *newval = NULL;
706 struct ceph_inode_xattr *xattr = NULL;
707 int issued;
708 int required_blob_size;
709 int dirty;
710
711 if (ceph_snap(inode) != CEPH_NOSNAP)
712 return -EROFS;
713
714 if (!ceph_is_valid_xattr(name))
715 return -EOPNOTSUPP;
716
717 if (vxattrs) {
718 struct ceph_vxattr_cb *vxattr =
719 ceph_match_vxattr(vxattrs, name);
720 if (vxattr && vxattr->readonly)
721 return -EOPNOTSUPP;
722 }
723
724 /* preallocate memory for xattr name, value, index node */
725 err = -ENOMEM;
726 newname = kmemdup(name, name_len + 1, GFP_NOFS);
727 if (!newname)
728 goto out;
729
730 if (val_len) {
731 newval = kmalloc(val_len + 1, GFP_NOFS);
732 if (!newval)
733 goto out;
734 memcpy(newval, value, val_len);
735 newval[val_len] = '\0';
736 }
737
738 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
739 if (!xattr)
740 goto out;
741
742 spin_lock(&inode->i_lock);
743retry:
744 issued = __ceph_caps_issued(ci, NULL);
745 if (!(issued & CEPH_CAP_XATTR_EXCL))
746 goto do_sync;
747 __build_xattrs(inode);
748
749 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
750
751 if (!ci->i_xattrs.prealloc_blob ||
752 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
753 struct ceph_buffer *blob = NULL;
754
755 spin_unlock(&inode->i_lock);
756 dout(" preaallocating new blob size=%d\n", required_blob_size);
757 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
758 if (!blob)
759 goto out;
760 spin_lock(&inode->i_lock);
761 if (ci->i_xattrs.prealloc_blob)
762 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
763 ci->i_xattrs.prealloc_blob = blob;
764 goto retry;
765 }
766
767 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
768 err = __set_xattr(ci, newname, name_len, newval,
769 val_len, 1, 1, 1, &xattr);
770 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
771 ci->i_xattrs.dirty = true;
772 inode->i_ctime = CURRENT_TIME;
773 spin_unlock(&inode->i_lock);
774 if (dirty)
775 __mark_inode_dirty(inode, dirty);
776 return err;
777
778do_sync:
779 spin_unlock(&inode->i_lock);
780 err = ceph_sync_setxattr(dentry, name, value, size, flags);
781out:
782 kfree(newname);
783 kfree(newval);
784 kfree(xattr);
785 return err;
786}
787
788static int ceph_send_removexattr(struct dentry *dentry, const char *name)
789{
790 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
791 struct ceph_mds_client *mdsc = fsc->mdsc;
792 struct inode *inode = dentry->d_inode;
793 struct inode *parent_inode;
794 struct ceph_mds_request *req;
795 int err;
796
797 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
798 USE_AUTH_MDS);
799 if (IS_ERR(req))
800 return PTR_ERR(req);
801 req->r_inode = inode;
802 ihold(inode);
803 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
804 req->r_num_caps = 1;
805 req->r_path2 = kstrdup(name, GFP_NOFS);
806
807 parent_inode = ceph_get_dentry_parent_inode(dentry);
808 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
809 iput(parent_inode);
810 ceph_mdsc_put_request(req);
811 return err;
812}
813
814int ceph_removexattr(struct dentry *dentry, const char *name)
815{
816 struct inode *inode = dentry->d_inode;
817 struct ceph_inode_info *ci = ceph_inode(inode);
818 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
819 int issued;
820 int err;
821 int dirty;
822
823 if (ceph_snap(inode) != CEPH_NOSNAP)
824 return -EROFS;
825
826 if (!ceph_is_valid_xattr(name))
827 return -EOPNOTSUPP;
828
829 if (vxattrs) {
830 struct ceph_vxattr_cb *vxattr =
831 ceph_match_vxattr(vxattrs, name);
832 if (vxattr && vxattr->readonly)
833 return -EOPNOTSUPP;
834 }
835
836 spin_lock(&inode->i_lock);
837 __build_xattrs(inode);
838 issued = __ceph_caps_issued(ci, NULL);
839 dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
840
841 if (!(issued & CEPH_CAP_XATTR_EXCL))
842 goto do_sync;
843
844 err = __remove_xattr_by_name(ceph_inode(inode), name);
845 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
846 ci->i_xattrs.dirty = true;
847 inode->i_ctime = CURRENT_TIME;
848
849 spin_unlock(&inode->i_lock);
850 if (dirty)
851 __mark_inode_dirty(inode, dirty);
852 return err;
853do_sync:
854 spin_unlock(&inode->i_lock);
855 err = ceph_send_removexattr(dentry, name);
856 return err;
857}
858
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3#include <linux/ceph/pagelist.h>
4
5#include "super.h"
6#include "mds_client.h"
7
8#include <linux/ceph/decode.h>
9
10#include <linux/xattr.h>
11#include <linux/security.h>
12#include <linux/posix_acl_xattr.h>
13#include <linux/slab.h>
14
15#define XATTR_CEPH_PREFIX "ceph."
16#define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
17
18static int __remove_xattr(struct ceph_inode_info *ci,
19 struct ceph_inode_xattr *xattr);
20
21static bool ceph_is_valid_xattr(const char *name)
22{
23 return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) ||
24 !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
25 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
26 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
27}
28
29/*
30 * These define virtual xattrs exposing the recursive directory
31 * statistics and layout metadata.
32 */
33struct ceph_vxattr {
34 char *name;
35 size_t name_size; /* strlen(name) + 1 (for '\0') */
36 ssize_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
37 size_t size);
38 bool (*exists_cb)(struct ceph_inode_info *ci);
39 unsigned int flags;
40};
41
42#define VXATTR_FLAG_READONLY (1<<0)
43#define VXATTR_FLAG_HIDDEN (1<<1)
44#define VXATTR_FLAG_RSTAT (1<<2)
45#define VXATTR_FLAG_DIRSTAT (1<<3)
46
47/* layouts */
48
49static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
50{
51 struct ceph_file_layout *fl = &ci->i_layout;
52 return (fl->stripe_unit > 0 || fl->stripe_count > 0 ||
53 fl->object_size > 0 || fl->pool_id >= 0 ||
54 rcu_dereference_raw(fl->pool_ns) != NULL);
55}
56
57static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
58 size_t size)
59{
60 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
61 struct ceph_client *cl = fsc->client;
62 struct ceph_osd_client *osdc = &fsc->client->osdc;
63 struct ceph_string *pool_ns;
64 s64 pool = ci->i_layout.pool_id;
65 const char *pool_name;
66 const char *ns_field = " pool_namespace=";
67 char buf[128];
68 size_t len, total_len = 0;
69 ssize_t ret;
70
71 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
72
73 doutc(cl, "%p\n", &ci->netfs.inode);
74 down_read(&osdc->lock);
75 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
76 if (pool_name) {
77 len = snprintf(buf, sizeof(buf),
78 "stripe_unit=%u stripe_count=%u object_size=%u pool=",
79 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
80 ci->i_layout.object_size);
81 total_len = len + strlen(pool_name);
82 } else {
83 len = snprintf(buf, sizeof(buf),
84 "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld",
85 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
86 ci->i_layout.object_size, pool);
87 total_len = len;
88 }
89
90 if (pool_ns)
91 total_len += strlen(ns_field) + pool_ns->len;
92
93 ret = total_len;
94 if (size >= total_len) {
95 memcpy(val, buf, len);
96 ret = len;
97 if (pool_name) {
98 len = strlen(pool_name);
99 memcpy(val + ret, pool_name, len);
100 ret += len;
101 }
102 if (pool_ns) {
103 len = strlen(ns_field);
104 memcpy(val + ret, ns_field, len);
105 ret += len;
106 memcpy(val + ret, pool_ns->str, pool_ns->len);
107 ret += pool_ns->len;
108 }
109 }
110 up_read(&osdc->lock);
111 ceph_put_string(pool_ns);
112 return ret;
113}
114
115/*
116 * The convention with strings in xattrs is that they should not be NULL
117 * terminated, since we're returning the length with them. snprintf always
118 * NULL terminates however, so call it on a temporary buffer and then memcpy
119 * the result into place.
120 */
121static __printf(3, 4)
122int ceph_fmt_xattr(char *val, size_t size, const char *fmt, ...)
123{
124 int ret;
125 va_list args;
126 char buf[96]; /* NB: reevaluate size if new vxattrs are added */
127
128 va_start(args, fmt);
129 ret = vsnprintf(buf, size ? sizeof(buf) : 0, fmt, args);
130 va_end(args);
131
132 /* Sanity check */
133 if (size && ret + 1 > sizeof(buf)) {
134 WARN_ONCE(true, "Returned length too big (%d)", ret);
135 return -E2BIG;
136 }
137
138 if (ret <= size)
139 memcpy(val, buf, ret);
140 return ret;
141}
142
143static ssize_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
144 char *val, size_t size)
145{
146 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.stripe_unit);
147}
148
149static ssize_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
150 char *val, size_t size)
151{
152 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.stripe_count);
153}
154
155static ssize_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
156 char *val, size_t size)
157{
158 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.object_size);
159}
160
161static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
162 char *val, size_t size)
163{
164 ssize_t ret;
165 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
166 struct ceph_osd_client *osdc = &fsc->client->osdc;
167 s64 pool = ci->i_layout.pool_id;
168 const char *pool_name;
169
170 down_read(&osdc->lock);
171 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
172 if (pool_name) {
173 ret = strlen(pool_name);
174 if (ret <= size)
175 memcpy(val, pool_name, ret);
176 } else {
177 ret = ceph_fmt_xattr(val, size, "%lld", pool);
178 }
179 up_read(&osdc->lock);
180 return ret;
181}
182
183static ssize_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info *ci,
184 char *val, size_t size)
185{
186 ssize_t ret = 0;
187 struct ceph_string *ns = ceph_try_get_string(ci->i_layout.pool_ns);
188
189 if (ns) {
190 ret = ns->len;
191 if (ret <= size)
192 memcpy(val, ns->str, ret);
193 ceph_put_string(ns);
194 }
195 return ret;
196}
197
198/* directories */
199
200static ssize_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
201 size_t size)
202{
203 return ceph_fmt_xattr(val, size, "%lld", ci->i_files + ci->i_subdirs);
204}
205
206static ssize_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
207 size_t size)
208{
209 return ceph_fmt_xattr(val, size, "%lld", ci->i_files);
210}
211
212static ssize_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
213 size_t size)
214{
215 return ceph_fmt_xattr(val, size, "%lld", ci->i_subdirs);
216}
217
218static ssize_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
219 size_t size)
220{
221 return ceph_fmt_xattr(val, size, "%lld",
222 ci->i_rfiles + ci->i_rsubdirs);
223}
224
225static ssize_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
226 size_t size)
227{
228 return ceph_fmt_xattr(val, size, "%lld", ci->i_rfiles);
229}
230
231static ssize_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
232 size_t size)
233{
234 return ceph_fmt_xattr(val, size, "%lld", ci->i_rsubdirs);
235}
236
237static ssize_t ceph_vxattrcb_dir_rsnaps(struct ceph_inode_info *ci, char *val,
238 size_t size)
239{
240 return ceph_fmt_xattr(val, size, "%lld", ci->i_rsnaps);
241}
242
243static ssize_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
244 size_t size)
245{
246 return ceph_fmt_xattr(val, size, "%lld", ci->i_rbytes);
247}
248
249static ssize_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
250 size_t size)
251{
252 return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_rctime.tv_sec,
253 ci->i_rctime.tv_nsec);
254}
255
256/* dir pin */
257static bool ceph_vxattrcb_dir_pin_exists(struct ceph_inode_info *ci)
258{
259 return ci->i_dir_pin != -ENODATA;
260}
261
262static ssize_t ceph_vxattrcb_dir_pin(struct ceph_inode_info *ci, char *val,
263 size_t size)
264{
265 return ceph_fmt_xattr(val, size, "%d", (int)ci->i_dir_pin);
266}
267
268/* quotas */
269static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci)
270{
271 bool ret = false;
272 spin_lock(&ci->i_ceph_lock);
273 if ((ci->i_max_files || ci->i_max_bytes) &&
274 ci->i_vino.snap == CEPH_NOSNAP &&
275 ci->i_snap_realm &&
276 ci->i_snap_realm->ino == ci->i_vino.ino)
277 ret = true;
278 spin_unlock(&ci->i_ceph_lock);
279 return ret;
280}
281
282static ssize_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val,
283 size_t size)
284{
285 return ceph_fmt_xattr(val, size, "max_bytes=%llu max_files=%llu",
286 ci->i_max_bytes, ci->i_max_files);
287}
288
289static ssize_t ceph_vxattrcb_quota_max_bytes(struct ceph_inode_info *ci,
290 char *val, size_t size)
291{
292 return ceph_fmt_xattr(val, size, "%llu", ci->i_max_bytes);
293}
294
295static ssize_t ceph_vxattrcb_quota_max_files(struct ceph_inode_info *ci,
296 char *val, size_t size)
297{
298 return ceph_fmt_xattr(val, size, "%llu", ci->i_max_files);
299}
300
301/* snapshots */
302static bool ceph_vxattrcb_snap_btime_exists(struct ceph_inode_info *ci)
303{
304 return (ci->i_snap_btime.tv_sec != 0 || ci->i_snap_btime.tv_nsec != 0);
305}
306
307static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
308 size_t size)
309{
310 return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_snap_btime.tv_sec,
311 ci->i_snap_btime.tv_nsec);
312}
313
314static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
315 char *val, size_t size)
316{
317 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
318
319 return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid);
320}
321
322static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci,
323 char *val, size_t size)
324{
325 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
326
327 return ceph_fmt_xattr(val, size, "client%lld",
328 ceph_client_gid(fsc->client));
329}
330
331static ssize_t ceph_vxattrcb_caps(struct ceph_inode_info *ci, char *val,
332 size_t size)
333{
334 int issued;
335
336 spin_lock(&ci->i_ceph_lock);
337 issued = __ceph_caps_issued(ci, NULL);
338 spin_unlock(&ci->i_ceph_lock);
339
340 return ceph_fmt_xattr(val, size, "%s/0x%x",
341 ceph_cap_string(issued), issued);
342}
343
344static ssize_t ceph_vxattrcb_auth_mds(struct ceph_inode_info *ci,
345 char *val, size_t size)
346{
347 int ret;
348
349 spin_lock(&ci->i_ceph_lock);
350 ret = ceph_fmt_xattr(val, size, "%d",
351 ci->i_auth_cap ? ci->i_auth_cap->session->s_mds : -1);
352 spin_unlock(&ci->i_ceph_lock);
353 return ret;
354}
355
356#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
357static bool ceph_vxattrcb_fscrypt_auth_exists(struct ceph_inode_info *ci)
358{
359 return ci->fscrypt_auth_len;
360}
361
362static ssize_t ceph_vxattrcb_fscrypt_auth(struct ceph_inode_info *ci,
363 char *val, size_t size)
364{
365 if (size) {
366 if (size < ci->fscrypt_auth_len)
367 return -ERANGE;
368 memcpy(val, ci->fscrypt_auth, ci->fscrypt_auth_len);
369 }
370 return ci->fscrypt_auth_len;
371}
372#endif /* CONFIG_FS_ENCRYPTION */
373
374#define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
375#define CEPH_XATTR_NAME2(_type, _name, _name2) \
376 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
377
378#define XATTR_NAME_CEPH(_type, _name, _flags) \
379 { \
380 .name = CEPH_XATTR_NAME(_type, _name), \
381 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
382 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
383 .exists_cb = NULL, \
384 .flags = (VXATTR_FLAG_READONLY | _flags), \
385 }
386#define XATTR_RSTAT_FIELD(_type, _name) \
387 XATTR_NAME_CEPH(_type, _name, VXATTR_FLAG_RSTAT)
388#define XATTR_RSTAT_FIELD_UPDATABLE(_type, _name) \
389 { \
390 .name = CEPH_XATTR_NAME(_type, _name), \
391 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
392 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
393 .exists_cb = NULL, \
394 .flags = VXATTR_FLAG_RSTAT, \
395 }
396#define XATTR_LAYOUT_FIELD(_type, _name, _field) \
397 { \
398 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
399 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
400 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
401 .exists_cb = ceph_vxattrcb_layout_exists, \
402 .flags = VXATTR_FLAG_HIDDEN, \
403 }
404#define XATTR_QUOTA_FIELD(_type, _name) \
405 { \
406 .name = CEPH_XATTR_NAME(_type, _name), \
407 .name_size = sizeof(CEPH_XATTR_NAME(_type, _name)), \
408 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
409 .exists_cb = ceph_vxattrcb_quota_exists, \
410 .flags = VXATTR_FLAG_HIDDEN, \
411 }
412
413static struct ceph_vxattr ceph_dir_vxattrs[] = {
414 {
415 .name = "ceph.dir.layout",
416 .name_size = sizeof("ceph.dir.layout"),
417 .getxattr_cb = ceph_vxattrcb_layout,
418 .exists_cb = ceph_vxattrcb_layout_exists,
419 .flags = VXATTR_FLAG_HIDDEN,
420 },
421 XATTR_LAYOUT_FIELD(dir, layout, stripe_unit),
422 XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
423 XATTR_LAYOUT_FIELD(dir, layout, object_size),
424 XATTR_LAYOUT_FIELD(dir, layout, pool),
425 XATTR_LAYOUT_FIELD(dir, layout, pool_namespace),
426 XATTR_NAME_CEPH(dir, entries, VXATTR_FLAG_DIRSTAT),
427 XATTR_NAME_CEPH(dir, files, VXATTR_FLAG_DIRSTAT),
428 XATTR_NAME_CEPH(dir, subdirs, VXATTR_FLAG_DIRSTAT),
429 XATTR_RSTAT_FIELD(dir, rentries),
430 XATTR_RSTAT_FIELD(dir, rfiles),
431 XATTR_RSTAT_FIELD(dir, rsubdirs),
432 XATTR_RSTAT_FIELD(dir, rsnaps),
433 XATTR_RSTAT_FIELD(dir, rbytes),
434 XATTR_RSTAT_FIELD_UPDATABLE(dir, rctime),
435 {
436 .name = "ceph.dir.pin",
437 .name_size = sizeof("ceph.dir.pin"),
438 .getxattr_cb = ceph_vxattrcb_dir_pin,
439 .exists_cb = ceph_vxattrcb_dir_pin_exists,
440 .flags = VXATTR_FLAG_HIDDEN,
441 },
442 {
443 .name = "ceph.quota",
444 .name_size = sizeof("ceph.quota"),
445 .getxattr_cb = ceph_vxattrcb_quota,
446 .exists_cb = ceph_vxattrcb_quota_exists,
447 .flags = VXATTR_FLAG_HIDDEN,
448 },
449 XATTR_QUOTA_FIELD(quota, max_bytes),
450 XATTR_QUOTA_FIELD(quota, max_files),
451 {
452 .name = "ceph.snap.btime",
453 .name_size = sizeof("ceph.snap.btime"),
454 .getxattr_cb = ceph_vxattrcb_snap_btime,
455 .exists_cb = ceph_vxattrcb_snap_btime_exists,
456 .flags = VXATTR_FLAG_READONLY,
457 },
458 {
459 .name = "ceph.caps",
460 .name_size = sizeof("ceph.caps"),
461 .getxattr_cb = ceph_vxattrcb_caps,
462 .exists_cb = NULL,
463 .flags = VXATTR_FLAG_HIDDEN,
464 },
465 { .name = NULL, 0 } /* Required table terminator */
466};
467
468/* files */
469
470static struct ceph_vxattr ceph_file_vxattrs[] = {
471 {
472 .name = "ceph.file.layout",
473 .name_size = sizeof("ceph.file.layout"),
474 .getxattr_cb = ceph_vxattrcb_layout,
475 .exists_cb = ceph_vxattrcb_layout_exists,
476 .flags = VXATTR_FLAG_HIDDEN,
477 },
478 XATTR_LAYOUT_FIELD(file, layout, stripe_unit),
479 XATTR_LAYOUT_FIELD(file, layout, stripe_count),
480 XATTR_LAYOUT_FIELD(file, layout, object_size),
481 XATTR_LAYOUT_FIELD(file, layout, pool),
482 XATTR_LAYOUT_FIELD(file, layout, pool_namespace),
483 {
484 .name = "ceph.snap.btime",
485 .name_size = sizeof("ceph.snap.btime"),
486 .getxattr_cb = ceph_vxattrcb_snap_btime,
487 .exists_cb = ceph_vxattrcb_snap_btime_exists,
488 .flags = VXATTR_FLAG_READONLY,
489 },
490 {
491 .name = "ceph.caps",
492 .name_size = sizeof("ceph.caps"),
493 .getxattr_cb = ceph_vxattrcb_caps,
494 .exists_cb = NULL,
495 .flags = VXATTR_FLAG_HIDDEN,
496 },
497 { .name = NULL, 0 } /* Required table terminator */
498};
499
500static struct ceph_vxattr ceph_common_vxattrs[] = {
501 {
502 .name = "ceph.cluster_fsid",
503 .name_size = sizeof("ceph.cluster_fsid"),
504 .getxattr_cb = ceph_vxattrcb_cluster_fsid,
505 .exists_cb = NULL,
506 .flags = VXATTR_FLAG_READONLY,
507 },
508 {
509 .name = "ceph.client_id",
510 .name_size = sizeof("ceph.client_id"),
511 .getxattr_cb = ceph_vxattrcb_client_id,
512 .exists_cb = NULL,
513 .flags = VXATTR_FLAG_READONLY,
514 },
515 {
516 .name = "ceph.auth_mds",
517 .name_size = sizeof("ceph.auth_mds"),
518 .getxattr_cb = ceph_vxattrcb_auth_mds,
519 .exists_cb = NULL,
520 .flags = VXATTR_FLAG_READONLY,
521 },
522#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
523 {
524 .name = "ceph.fscrypt.auth",
525 .name_size = sizeof("ceph.fscrypt.auth"),
526 .getxattr_cb = ceph_vxattrcb_fscrypt_auth,
527 .exists_cb = ceph_vxattrcb_fscrypt_auth_exists,
528 .flags = VXATTR_FLAG_READONLY,
529 },
530#endif /* CONFIG_FS_ENCRYPTION */
531 { .name = NULL, 0 } /* Required table terminator */
532};
533
534static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
535{
536 if (S_ISDIR(inode->i_mode))
537 return ceph_dir_vxattrs;
538 else if (S_ISREG(inode->i_mode))
539 return ceph_file_vxattrs;
540 return NULL;
541}
542
543static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
544 const char *name)
545{
546 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
547
548 if (vxattr) {
549 while (vxattr->name) {
550 if (!strcmp(vxattr->name, name))
551 return vxattr;
552 vxattr++;
553 }
554 }
555
556 vxattr = ceph_common_vxattrs;
557 while (vxattr->name) {
558 if (!strcmp(vxattr->name, name))
559 return vxattr;
560 vxattr++;
561 }
562
563 return NULL;
564}
565
566#define MAX_XATTR_VAL_PRINT_LEN 256
567
568static int __set_xattr(struct ceph_inode_info *ci,
569 const char *name, int name_len,
570 const char *val, int val_len,
571 int flags, int update_xattr,
572 struct ceph_inode_xattr **newxattr)
573{
574 struct inode *inode = &ci->netfs.inode;
575 struct ceph_client *cl = ceph_inode_to_client(inode);
576 struct rb_node **p;
577 struct rb_node *parent = NULL;
578 struct ceph_inode_xattr *xattr = NULL;
579 int c;
580 int new = 0;
581
582 p = &ci->i_xattrs.index.rb_node;
583 while (*p) {
584 parent = *p;
585 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
586 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
587 if (c < 0)
588 p = &(*p)->rb_left;
589 else if (c > 0)
590 p = &(*p)->rb_right;
591 else {
592 if (name_len == xattr->name_len)
593 break;
594 else if (name_len < xattr->name_len)
595 p = &(*p)->rb_left;
596 else
597 p = &(*p)->rb_right;
598 }
599 xattr = NULL;
600 }
601
602 if (update_xattr) {
603 int err = 0;
604
605 if (xattr && (flags & XATTR_CREATE))
606 err = -EEXIST;
607 else if (!xattr && (flags & XATTR_REPLACE))
608 err = -ENODATA;
609 if (err) {
610 kfree(name);
611 kfree(val);
612 kfree(*newxattr);
613 return err;
614 }
615 if (update_xattr < 0) {
616 if (xattr)
617 __remove_xattr(ci, xattr);
618 kfree(name);
619 kfree(*newxattr);
620 return 0;
621 }
622 }
623
624 if (!xattr) {
625 new = 1;
626 xattr = *newxattr;
627 xattr->name = name;
628 xattr->name_len = name_len;
629 xattr->should_free_name = update_xattr;
630
631 ci->i_xattrs.count++;
632 doutc(cl, "count=%d\n", ci->i_xattrs.count);
633 } else {
634 kfree(*newxattr);
635 *newxattr = NULL;
636 if (xattr->should_free_val)
637 kfree(xattr->val);
638
639 if (update_xattr) {
640 kfree(name);
641 name = xattr->name;
642 }
643 ci->i_xattrs.names_size -= xattr->name_len;
644 ci->i_xattrs.vals_size -= xattr->val_len;
645 }
646 ci->i_xattrs.names_size += name_len;
647 ci->i_xattrs.vals_size += val_len;
648 if (val)
649 xattr->val = val;
650 else
651 xattr->val = "";
652
653 xattr->val_len = val_len;
654 xattr->dirty = update_xattr;
655 xattr->should_free_val = (val && update_xattr);
656
657 if (new) {
658 rb_link_node(&xattr->node, parent, p);
659 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
660 doutc(cl, "p=%p\n", p);
661 }
662
663 doutc(cl, "added %p %llx.%llx xattr %p %.*s=%.*s%s\n", inode,
664 ceph_vinop(inode), xattr, name_len, name, min(val_len,
665 MAX_XATTR_VAL_PRINT_LEN), val,
666 val_len > MAX_XATTR_VAL_PRINT_LEN ? "..." : "");
667
668 return 0;
669}
670
671static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
672 const char *name)
673{
674 struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
675 struct rb_node **p;
676 struct rb_node *parent = NULL;
677 struct ceph_inode_xattr *xattr = NULL;
678 int name_len = strlen(name);
679 int c;
680
681 p = &ci->i_xattrs.index.rb_node;
682 while (*p) {
683 parent = *p;
684 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
685 c = strncmp(name, xattr->name, xattr->name_len);
686 if (c == 0 && name_len > xattr->name_len)
687 c = 1;
688 if (c < 0)
689 p = &(*p)->rb_left;
690 else if (c > 0)
691 p = &(*p)->rb_right;
692 else {
693 int len = min(xattr->val_len, MAX_XATTR_VAL_PRINT_LEN);
694
695 doutc(cl, "%s found %.*s%s\n", name, len, xattr->val,
696 xattr->val_len > len ? "..." : "");
697 return xattr;
698 }
699 }
700
701 doutc(cl, "%s not found\n", name);
702
703 return NULL;
704}
705
706static void __free_xattr(struct ceph_inode_xattr *xattr)
707{
708 BUG_ON(!xattr);
709
710 if (xattr->should_free_name)
711 kfree(xattr->name);
712 if (xattr->should_free_val)
713 kfree(xattr->val);
714
715 kfree(xattr);
716}
717
718static int __remove_xattr(struct ceph_inode_info *ci,
719 struct ceph_inode_xattr *xattr)
720{
721 if (!xattr)
722 return -ENODATA;
723
724 rb_erase(&xattr->node, &ci->i_xattrs.index);
725
726 if (xattr->should_free_name)
727 kfree(xattr->name);
728 if (xattr->should_free_val)
729 kfree(xattr->val);
730
731 ci->i_xattrs.names_size -= xattr->name_len;
732 ci->i_xattrs.vals_size -= xattr->val_len;
733 ci->i_xattrs.count--;
734 kfree(xattr);
735
736 return 0;
737}
738
739static char *__copy_xattr_names(struct ceph_inode_info *ci,
740 char *dest)
741{
742 struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
743 struct rb_node *p;
744 struct ceph_inode_xattr *xattr = NULL;
745
746 p = rb_first(&ci->i_xattrs.index);
747 doutc(cl, "count=%d\n", ci->i_xattrs.count);
748
749 while (p) {
750 xattr = rb_entry(p, struct ceph_inode_xattr, node);
751 memcpy(dest, xattr->name, xattr->name_len);
752 dest[xattr->name_len] = '\0';
753
754 doutc(cl, "dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
755 xattr->name_len, ci->i_xattrs.names_size);
756
757 dest += xattr->name_len + 1;
758 p = rb_next(p);
759 }
760
761 return dest;
762}
763
764void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
765{
766 struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
767 struct rb_node *p, *tmp;
768 struct ceph_inode_xattr *xattr = NULL;
769
770 p = rb_first(&ci->i_xattrs.index);
771
772 doutc(cl, "p=%p\n", p);
773
774 while (p) {
775 xattr = rb_entry(p, struct ceph_inode_xattr, node);
776 tmp = p;
777 p = rb_next(tmp);
778 doutc(cl, "next p=%p (%.*s)\n", p, xattr->name_len, xattr->name);
779 rb_erase(tmp, &ci->i_xattrs.index);
780
781 __free_xattr(xattr);
782 }
783
784 ci->i_xattrs.names_size = 0;
785 ci->i_xattrs.vals_size = 0;
786 ci->i_xattrs.index_version = 0;
787 ci->i_xattrs.count = 0;
788 ci->i_xattrs.index = RB_ROOT;
789}
790
791static int __build_xattrs(struct inode *inode)
792 __releases(ci->i_ceph_lock)
793 __acquires(ci->i_ceph_lock)
794{
795 struct ceph_client *cl = ceph_inode_to_client(inode);
796 u32 namelen;
797 u32 numattr = 0;
798 void *p, *end;
799 u32 len;
800 const char *name, *val;
801 struct ceph_inode_info *ci = ceph_inode(inode);
802 u64 xattr_version;
803 struct ceph_inode_xattr **xattrs = NULL;
804 int err = 0;
805 int i;
806
807 doutc(cl, "len=%d\n",
808 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
809
810 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
811 return 0; /* already built */
812
813 __ceph_destroy_xattrs(ci);
814
815start:
816 /* updated internal xattr rb tree */
817 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
818 p = ci->i_xattrs.blob->vec.iov_base;
819 end = p + ci->i_xattrs.blob->vec.iov_len;
820 ceph_decode_32_safe(&p, end, numattr, bad);
821 xattr_version = ci->i_xattrs.version;
822 spin_unlock(&ci->i_ceph_lock);
823
824 xattrs = kcalloc(numattr, sizeof(struct ceph_inode_xattr *),
825 GFP_NOFS);
826 err = -ENOMEM;
827 if (!xattrs)
828 goto bad_lock;
829
830 for (i = 0; i < numattr; i++) {
831 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
832 GFP_NOFS);
833 if (!xattrs[i])
834 goto bad_lock;
835 }
836
837 spin_lock(&ci->i_ceph_lock);
838 if (ci->i_xattrs.version != xattr_version) {
839 /* lost a race, retry */
840 for (i = 0; i < numattr; i++)
841 kfree(xattrs[i]);
842 kfree(xattrs);
843 xattrs = NULL;
844 goto start;
845 }
846 err = -EIO;
847 while (numattr--) {
848 ceph_decode_32_safe(&p, end, len, bad);
849 namelen = len;
850 name = p;
851 p += len;
852 ceph_decode_32_safe(&p, end, len, bad);
853 val = p;
854 p += len;
855
856 err = __set_xattr(ci, name, namelen, val, len,
857 0, 0, &xattrs[numattr]);
858
859 if (err < 0)
860 goto bad;
861 }
862 kfree(xattrs);
863 }
864 ci->i_xattrs.index_version = ci->i_xattrs.version;
865 ci->i_xattrs.dirty = false;
866
867 return err;
868bad_lock:
869 spin_lock(&ci->i_ceph_lock);
870bad:
871 if (xattrs) {
872 for (i = 0; i < numattr; i++)
873 kfree(xattrs[i]);
874 kfree(xattrs);
875 }
876 ci->i_xattrs.names_size = 0;
877 return err;
878}
879
880static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
881 int val_size)
882{
883 struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
884
885 /*
886 * 4 bytes for the length, and additional 4 bytes per each xattr name,
887 * 4 bytes per each value
888 */
889 int size = 4 + ci->i_xattrs.count*(4 + 4) +
890 ci->i_xattrs.names_size +
891 ci->i_xattrs.vals_size;
892 doutc(cl, "c=%d names.size=%d vals.size=%d\n", ci->i_xattrs.count,
893 ci->i_xattrs.names_size, ci->i_xattrs.vals_size);
894
895 if (name_size)
896 size += 4 + 4 + name_size + val_size;
897
898 return size;
899}
900
901/*
902 * If there are dirty xattrs, re-encode xattrs into the prealloc_blob
903 * and swap into place. It returns the old i_xattrs.blob (or NULL) so
904 * that it can be freed by the caller as the i_ceph_lock is likely to be
905 * held.
906 */
907struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
908{
909 struct inode *inode = &ci->netfs.inode;
910 struct ceph_client *cl = ceph_inode_to_client(inode);
911 struct rb_node *p;
912 struct ceph_inode_xattr *xattr = NULL;
913 struct ceph_buffer *old_blob = NULL;
914 void *dest;
915
916 doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
917 if (ci->i_xattrs.dirty) {
918 int need = __get_required_blob_size(ci, 0, 0);
919
920 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
921
922 p = rb_first(&ci->i_xattrs.index);
923 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
924
925 ceph_encode_32(&dest, ci->i_xattrs.count);
926 while (p) {
927 xattr = rb_entry(p, struct ceph_inode_xattr, node);
928
929 ceph_encode_32(&dest, xattr->name_len);
930 memcpy(dest, xattr->name, xattr->name_len);
931 dest += xattr->name_len;
932 ceph_encode_32(&dest, xattr->val_len);
933 memcpy(dest, xattr->val, xattr->val_len);
934 dest += xattr->val_len;
935
936 p = rb_next(p);
937 }
938
939 /* adjust buffer len; it may be larger than we need */
940 ci->i_xattrs.prealloc_blob->vec.iov_len =
941 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
942
943 if (ci->i_xattrs.blob)
944 old_blob = ci->i_xattrs.blob;
945 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
946 ci->i_xattrs.prealloc_blob = NULL;
947 ci->i_xattrs.dirty = false;
948 ci->i_xattrs.version++;
949 }
950
951 return old_blob;
952}
953
954static inline int __get_request_mask(struct inode *in) {
955 struct ceph_mds_request *req = current->journal_info;
956 int mask = 0;
957 if (req && req->r_target_inode == in) {
958 if (req->r_op == CEPH_MDS_OP_LOOKUP ||
959 req->r_op == CEPH_MDS_OP_LOOKUPINO ||
960 req->r_op == CEPH_MDS_OP_LOOKUPPARENT ||
961 req->r_op == CEPH_MDS_OP_GETATTR) {
962 mask = le32_to_cpu(req->r_args.getattr.mask);
963 } else if (req->r_op == CEPH_MDS_OP_OPEN ||
964 req->r_op == CEPH_MDS_OP_CREATE) {
965 mask = le32_to_cpu(req->r_args.open.mask);
966 }
967 }
968 return mask;
969}
970
971ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
972 size_t size)
973{
974 struct ceph_client *cl = ceph_inode_to_client(inode);
975 struct ceph_inode_info *ci = ceph_inode(inode);
976 struct ceph_inode_xattr *xattr;
977 struct ceph_vxattr *vxattr;
978 int req_mask;
979 ssize_t err;
980
981 if (strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
982 goto handle_non_vxattrs;
983
984 /* let's see if a virtual xattr was requested */
985 vxattr = ceph_match_vxattr(inode, name);
986 if (vxattr) {
987 int mask = 0;
988 if (vxattr->flags & VXATTR_FLAG_RSTAT)
989 mask |= CEPH_STAT_RSTAT;
990 if (vxattr->flags & VXATTR_FLAG_DIRSTAT)
991 mask |= CEPH_CAP_FILE_SHARED;
992 err = ceph_do_getattr(inode, mask, true);
993 if (err)
994 return err;
995 err = -ENODATA;
996 if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
997 err = vxattr->getxattr_cb(ci, value, size);
998 if (size && size < err)
999 err = -ERANGE;
1000 }
1001 return err;
1002 } else {
1003 err = ceph_do_getvxattr(inode, name, value, size);
1004 /* this would happen with a new client and old server combo */
1005 if (err == -EOPNOTSUPP)
1006 err = -ENODATA;
1007 return err;
1008 }
1009handle_non_vxattrs:
1010 req_mask = __get_request_mask(inode);
1011
1012 spin_lock(&ci->i_ceph_lock);
1013 doutc(cl, "%p %llx.%llx name '%s' ver=%lld index_ver=%lld\n", inode,
1014 ceph_vinop(inode), name, ci->i_xattrs.version,
1015 ci->i_xattrs.index_version);
1016
1017 if (ci->i_xattrs.version == 0 ||
1018 !((req_mask & CEPH_CAP_XATTR_SHARED) ||
1019 __ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1))) {
1020 spin_unlock(&ci->i_ceph_lock);
1021
1022 /* security module gets xattr while filling trace */
1023 if (current->journal_info) {
1024 pr_warn_ratelimited_client(cl,
1025 "sync %p %llx.%llx during filling trace\n",
1026 inode, ceph_vinop(inode));
1027 return -EBUSY;
1028 }
1029
1030 /* get xattrs from mds (if we don't already have them) */
1031 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
1032 if (err)
1033 return err;
1034 spin_lock(&ci->i_ceph_lock);
1035 }
1036
1037 err = __build_xattrs(inode);
1038 if (err < 0)
1039 goto out;
1040
1041 err = -ENODATA; /* == ENOATTR */
1042 xattr = __get_xattr(ci, name);
1043 if (!xattr)
1044 goto out;
1045
1046 err = -ERANGE;
1047 if (size && size < xattr->val_len)
1048 goto out;
1049
1050 err = xattr->val_len;
1051 if (size == 0)
1052 goto out;
1053
1054 memcpy(value, xattr->val, xattr->val_len);
1055
1056 if (current->journal_info &&
1057 !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
1058 security_ismaclabel(name + XATTR_SECURITY_PREFIX_LEN))
1059 ci->i_ceph_flags |= CEPH_I_SEC_INITED;
1060out:
1061 spin_unlock(&ci->i_ceph_lock);
1062 return err;
1063}
1064
1065ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
1066{
1067 struct inode *inode = d_inode(dentry);
1068 struct ceph_client *cl = ceph_inode_to_client(inode);
1069 struct ceph_inode_info *ci = ceph_inode(inode);
1070 bool len_only = (size == 0);
1071 u32 namelen;
1072 int err;
1073
1074 spin_lock(&ci->i_ceph_lock);
1075 doutc(cl, "%p %llx.%llx ver=%lld index_ver=%lld\n", inode,
1076 ceph_vinop(inode), ci->i_xattrs.version,
1077 ci->i_xattrs.index_version);
1078
1079 if (ci->i_xattrs.version == 0 ||
1080 !__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1)) {
1081 spin_unlock(&ci->i_ceph_lock);
1082 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
1083 if (err)
1084 return err;
1085 spin_lock(&ci->i_ceph_lock);
1086 }
1087
1088 err = __build_xattrs(inode);
1089 if (err < 0)
1090 goto out;
1091
1092 /* add 1 byte for each xattr due to the null termination */
1093 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
1094 if (!len_only) {
1095 if (namelen > size) {
1096 err = -ERANGE;
1097 goto out;
1098 }
1099 names = __copy_xattr_names(ci, names);
1100 size -= namelen;
1101 }
1102 err = namelen;
1103out:
1104 spin_unlock(&ci->i_ceph_lock);
1105 return err;
1106}
1107
1108static int ceph_sync_setxattr(struct inode *inode, const char *name,
1109 const char *value, size_t size, int flags)
1110{
1111 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
1112 struct ceph_client *cl = ceph_inode_to_client(inode);
1113 struct ceph_inode_info *ci = ceph_inode(inode);
1114 struct ceph_mds_request *req;
1115 struct ceph_mds_client *mdsc = fsc->mdsc;
1116 struct ceph_osd_client *osdc = &fsc->client->osdc;
1117 struct ceph_pagelist *pagelist = NULL;
1118 int op = CEPH_MDS_OP_SETXATTR;
1119 int err;
1120
1121 if (size > 0) {
1122 /* copy value into pagelist */
1123 pagelist = ceph_pagelist_alloc(GFP_NOFS);
1124 if (!pagelist)
1125 return -ENOMEM;
1126
1127 err = ceph_pagelist_append(pagelist, value, size);
1128 if (err)
1129 goto out;
1130 } else if (!value) {
1131 if (flags & CEPH_XATTR_REPLACE)
1132 op = CEPH_MDS_OP_RMXATTR;
1133 else
1134 flags |= CEPH_XATTR_REMOVE;
1135 }
1136
1137 doutc(cl, "name %s value size %zu\n", name, size);
1138
1139 /* do request */
1140 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1141 if (IS_ERR(req)) {
1142 err = PTR_ERR(req);
1143 goto out;
1144 }
1145
1146 req->r_path2 = kstrdup(name, GFP_NOFS);
1147 if (!req->r_path2) {
1148 ceph_mdsc_put_request(req);
1149 err = -ENOMEM;
1150 goto out;
1151 }
1152
1153 if (op == CEPH_MDS_OP_SETXATTR) {
1154 req->r_args.setxattr.flags = cpu_to_le32(flags);
1155 req->r_args.setxattr.osdmap_epoch =
1156 cpu_to_le32(osdc->osdmap->epoch);
1157 req->r_pagelist = pagelist;
1158 pagelist = NULL;
1159 }
1160
1161 req->r_inode = inode;
1162 ihold(inode);
1163 req->r_num_caps = 1;
1164 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
1165
1166 doutc(cl, "xattr.ver (before): %lld\n", ci->i_xattrs.version);
1167 err = ceph_mdsc_do_request(mdsc, NULL, req);
1168 ceph_mdsc_put_request(req);
1169 doutc(cl, "xattr.ver (after): %lld\n", ci->i_xattrs.version);
1170
1171out:
1172 if (pagelist)
1173 ceph_pagelist_release(pagelist);
1174 return err;
1175}
1176
1177int __ceph_setxattr(struct inode *inode, const char *name,
1178 const void *value, size_t size, int flags)
1179{
1180 struct ceph_client *cl = ceph_inode_to_client(inode);
1181 struct ceph_vxattr *vxattr;
1182 struct ceph_inode_info *ci = ceph_inode(inode);
1183 struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
1184 struct ceph_cap_flush *prealloc_cf = NULL;
1185 struct ceph_buffer *old_blob = NULL;
1186 int issued;
1187 int err;
1188 int dirty = 0;
1189 int name_len = strlen(name);
1190 int val_len = size;
1191 char *newname = NULL;
1192 char *newval = NULL;
1193 struct ceph_inode_xattr *xattr = NULL;
1194 int required_blob_size;
1195 bool check_realm = false;
1196 bool lock_snap_rwsem = false;
1197
1198 if (ceph_snap(inode) != CEPH_NOSNAP)
1199 return -EROFS;
1200
1201 vxattr = ceph_match_vxattr(inode, name);
1202 if (vxattr) {
1203 if (vxattr->flags & VXATTR_FLAG_READONLY)
1204 return -EOPNOTSUPP;
1205 if (value && !strncmp(vxattr->name, "ceph.quota", 10))
1206 check_realm = true;
1207 }
1208
1209 /* pass any unhandled ceph.* xattrs through to the MDS */
1210 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
1211 goto do_sync_unlocked;
1212
1213 /* preallocate memory for xattr name, value, index node */
1214 err = -ENOMEM;
1215 newname = kmemdup(name, name_len + 1, GFP_NOFS);
1216 if (!newname)
1217 goto out;
1218
1219 if (val_len) {
1220 newval = kmemdup(value, val_len, GFP_NOFS);
1221 if (!newval)
1222 goto out;
1223 }
1224
1225 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
1226 if (!xattr)
1227 goto out;
1228
1229 prealloc_cf = ceph_alloc_cap_flush();
1230 if (!prealloc_cf)
1231 goto out;
1232
1233 spin_lock(&ci->i_ceph_lock);
1234retry:
1235 issued = __ceph_caps_issued(ci, NULL);
1236 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
1237 if ((ci->i_xattrs.version == 0) || !(issued & CEPH_CAP_XATTR_EXCL) ||
1238 (required_blob_size > mdsc->mdsmap->m_max_xattr_size)) {
1239 doutc(cl, "sync version: %llu size: %d max: %llu\n",
1240 ci->i_xattrs.version, required_blob_size,
1241 mdsc->mdsmap->m_max_xattr_size);
1242 goto do_sync;
1243 }
1244
1245 if (!lock_snap_rwsem && !ci->i_head_snapc) {
1246 lock_snap_rwsem = true;
1247 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1248 spin_unlock(&ci->i_ceph_lock);
1249 down_read(&mdsc->snap_rwsem);
1250 spin_lock(&ci->i_ceph_lock);
1251 goto retry;
1252 }
1253 }
1254
1255 doutc(cl, "%p %llx.%llx name '%s' issued %s\n", inode,
1256 ceph_vinop(inode), name, ceph_cap_string(issued));
1257 __build_xattrs(inode);
1258
1259 if (!ci->i_xattrs.prealloc_blob ||
1260 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
1261 struct ceph_buffer *blob;
1262
1263 spin_unlock(&ci->i_ceph_lock);
1264 ceph_buffer_put(old_blob); /* Shouldn't be required */
1265 doutc(cl, " pre-allocating new blob size=%d\n",
1266 required_blob_size);
1267 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1268 if (!blob)
1269 goto do_sync_unlocked;
1270 spin_lock(&ci->i_ceph_lock);
1271 /* prealloc_blob can't be released while holding i_ceph_lock */
1272 if (ci->i_xattrs.prealloc_blob)
1273 old_blob = ci->i_xattrs.prealloc_blob;
1274 ci->i_xattrs.prealloc_blob = blob;
1275 goto retry;
1276 }
1277
1278 err = __set_xattr(ci, newname, name_len, newval, val_len,
1279 flags, value ? 1 : -1, &xattr);
1280
1281 if (!err) {
1282 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
1283 &prealloc_cf);
1284 ci->i_xattrs.dirty = true;
1285 inode_set_ctime_current(inode);
1286 }
1287
1288 spin_unlock(&ci->i_ceph_lock);
1289 ceph_buffer_put(old_blob);
1290 if (lock_snap_rwsem)
1291 up_read(&mdsc->snap_rwsem);
1292 if (dirty)
1293 __mark_inode_dirty(inode, dirty);
1294 ceph_free_cap_flush(prealloc_cf);
1295 return err;
1296
1297do_sync:
1298 spin_unlock(&ci->i_ceph_lock);
1299do_sync_unlocked:
1300 if (lock_snap_rwsem)
1301 up_read(&mdsc->snap_rwsem);
1302
1303 /* security module set xattr while filling trace */
1304 if (current->journal_info) {
1305 pr_warn_ratelimited_client(cl,
1306 "sync %p %llx.%llx during filling trace\n",
1307 inode, ceph_vinop(inode));
1308 err = -EBUSY;
1309 } else {
1310 err = ceph_sync_setxattr(inode, name, value, size, flags);
1311 if (err >= 0 && check_realm) {
1312 /* check if snaprealm was created for quota inode */
1313 spin_lock(&ci->i_ceph_lock);
1314 if ((ci->i_max_files || ci->i_max_bytes) &&
1315 !(ci->i_snap_realm &&
1316 ci->i_snap_realm->ino == ci->i_vino.ino))
1317 err = -EOPNOTSUPP;
1318 spin_unlock(&ci->i_ceph_lock);
1319 }
1320 }
1321out:
1322 ceph_free_cap_flush(prealloc_cf);
1323 kfree(newname);
1324 kfree(newval);
1325 kfree(xattr);
1326 return err;
1327}
1328
1329static int ceph_get_xattr_handler(const struct xattr_handler *handler,
1330 struct dentry *dentry, struct inode *inode,
1331 const char *name, void *value, size_t size)
1332{
1333 if (!ceph_is_valid_xattr(name))
1334 return -EOPNOTSUPP;
1335 return __ceph_getxattr(inode, name, value, size);
1336}
1337
1338static int ceph_set_xattr_handler(const struct xattr_handler *handler,
1339 struct mnt_idmap *idmap,
1340 struct dentry *unused, struct inode *inode,
1341 const char *name, const void *value,
1342 size_t size, int flags)
1343{
1344 if (!ceph_is_valid_xattr(name))
1345 return -EOPNOTSUPP;
1346 return __ceph_setxattr(inode, name, value, size, flags);
1347}
1348
1349static const struct xattr_handler ceph_other_xattr_handler = {
1350 .prefix = "", /* match any name => handlers called with full name */
1351 .get = ceph_get_xattr_handler,
1352 .set = ceph_set_xattr_handler,
1353};
1354
1355#ifdef CONFIG_SECURITY
1356bool ceph_security_xattr_wanted(struct inode *in)
1357{
1358 return in->i_security != NULL;
1359}
1360
1361bool ceph_security_xattr_deadlock(struct inode *in)
1362{
1363 struct ceph_inode_info *ci;
1364 bool ret;
1365 if (!in->i_security)
1366 return false;
1367 ci = ceph_inode(in);
1368 spin_lock(&ci->i_ceph_lock);
1369 ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) &&
1370 !(ci->i_xattrs.version > 0 &&
1371 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0));
1372 spin_unlock(&ci->i_ceph_lock);
1373 return ret;
1374}
1375
1376#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
1377int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
1378 struct ceph_acl_sec_ctx *as_ctx)
1379{
1380 struct ceph_pagelist *pagelist = as_ctx->pagelist;
1381 const char *name;
1382 size_t name_len;
1383 int err;
1384
1385 err = security_dentry_init_security(dentry, mode, &dentry->d_name,
1386 &name, &as_ctx->sec_ctx,
1387 &as_ctx->sec_ctxlen);
1388 if (err < 0) {
1389 WARN_ON_ONCE(err != -EOPNOTSUPP);
1390 err = 0; /* do nothing */
1391 goto out;
1392 }
1393
1394 err = -ENOMEM;
1395 if (!pagelist) {
1396 pagelist = ceph_pagelist_alloc(GFP_KERNEL);
1397 if (!pagelist)
1398 goto out;
1399 err = ceph_pagelist_reserve(pagelist, PAGE_SIZE);
1400 if (err)
1401 goto out;
1402 ceph_pagelist_encode_32(pagelist, 1);
1403 }
1404
1405 /*
1406 * FIXME: Make security_dentry_init_security() generic. Currently
1407 * It only supports single security module and only selinux has
1408 * dentry_init_security hook.
1409 */
1410 name_len = strlen(name);
1411 err = ceph_pagelist_reserve(pagelist,
1412 4 * 2 + name_len + as_ctx->sec_ctxlen);
1413 if (err)
1414 goto out;
1415
1416 if (as_ctx->pagelist) {
1417 /* update count of KV pairs */
1418 BUG_ON(pagelist->length <= sizeof(__le32));
1419 if (list_is_singular(&pagelist->head)) {
1420 le32_add_cpu((__le32*)pagelist->mapped_tail, 1);
1421 } else {
1422 struct page *page = list_first_entry(&pagelist->head,
1423 struct page, lru);
1424 void *addr = kmap_atomic(page);
1425 le32_add_cpu((__le32*)addr, 1);
1426 kunmap_atomic(addr);
1427 }
1428 } else {
1429 as_ctx->pagelist = pagelist;
1430 }
1431
1432 ceph_pagelist_encode_32(pagelist, name_len);
1433 ceph_pagelist_append(pagelist, name, name_len);
1434
1435 ceph_pagelist_encode_32(pagelist, as_ctx->sec_ctxlen);
1436 ceph_pagelist_append(pagelist, as_ctx->sec_ctx, as_ctx->sec_ctxlen);
1437
1438 err = 0;
1439out:
1440 if (pagelist && !as_ctx->pagelist)
1441 ceph_pagelist_release(pagelist);
1442 return err;
1443}
1444#endif /* CONFIG_CEPH_FS_SECURITY_LABEL */
1445#endif /* CONFIG_SECURITY */
1446
1447void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx)
1448{
1449#ifdef CONFIG_CEPH_FS_POSIX_ACL
1450 posix_acl_release(as_ctx->acl);
1451 posix_acl_release(as_ctx->default_acl);
1452#endif
1453#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
1454 security_release_secctx(as_ctx->sec_ctx, as_ctx->sec_ctxlen);
1455#endif
1456#ifdef CONFIG_FS_ENCRYPTION
1457 kfree(as_ctx->fscrypt_auth);
1458#endif
1459 if (as_ctx->pagelist)
1460 ceph_pagelist_release(as_ctx->pagelist);
1461}
1462
1463/*
1464 * List of handlers for synthetic system.* attributes. Other
1465 * attributes are handled directly.
1466 */
1467const struct xattr_handler * const ceph_xattr_handlers[] = {
1468 &ceph_other_xattr_handler,
1469 NULL,
1470};