Loading...
1#include <linux/ceph/ceph_debug.h>
2
3#include "super.h"
4#include "mds_client.h"
5
6#include <linux/ceph/decode.h>
7
8#include <linux/xattr.h>
9#include <linux/slab.h>
10
11static bool ceph_is_valid_xattr(const char *name)
12{
13 return !strncmp(name, "ceph.", 5) ||
14 !strncmp(name, XATTR_SECURITY_PREFIX,
15 XATTR_SECURITY_PREFIX_LEN) ||
16 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
17 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
18}
19
20/*
21 * These define virtual xattrs exposing the recursive directory
22 * statistics and layout metadata.
23 */
24struct ceph_vxattr_cb {
25 bool readonly;
26 char *name;
27 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
28 size_t size);
29};
30
31/* directories */
32
33static size_t ceph_vxattrcb_entries(struct ceph_inode_info *ci, char *val,
34 size_t size)
35{
36 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
37}
38
39static size_t ceph_vxattrcb_files(struct ceph_inode_info *ci, char *val,
40 size_t size)
41{
42 return snprintf(val, size, "%lld", ci->i_files);
43}
44
45static size_t ceph_vxattrcb_subdirs(struct ceph_inode_info *ci, char *val,
46 size_t size)
47{
48 return snprintf(val, size, "%lld", ci->i_subdirs);
49}
50
51static size_t ceph_vxattrcb_rentries(struct ceph_inode_info *ci, char *val,
52 size_t size)
53{
54 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
55}
56
57static size_t ceph_vxattrcb_rfiles(struct ceph_inode_info *ci, char *val,
58 size_t size)
59{
60 return snprintf(val, size, "%lld", ci->i_rfiles);
61}
62
63static size_t ceph_vxattrcb_rsubdirs(struct ceph_inode_info *ci, char *val,
64 size_t size)
65{
66 return snprintf(val, size, "%lld", ci->i_rsubdirs);
67}
68
69static size_t ceph_vxattrcb_rbytes(struct ceph_inode_info *ci, char *val,
70 size_t size)
71{
72 return snprintf(val, size, "%lld", ci->i_rbytes);
73}
74
75static size_t ceph_vxattrcb_rctime(struct ceph_inode_info *ci, char *val,
76 size_t size)
77{
78 return snprintf(val, size, "%ld.%ld", (long)ci->i_rctime.tv_sec,
79 (long)ci->i_rctime.tv_nsec);
80}
81
82static struct ceph_vxattr_cb ceph_dir_vxattrs[] = {
83 { true, "ceph.dir.entries", ceph_vxattrcb_entries},
84 { true, "ceph.dir.files", ceph_vxattrcb_files},
85 { true, "ceph.dir.subdirs", ceph_vxattrcb_subdirs},
86 { true, "ceph.dir.rentries", ceph_vxattrcb_rentries},
87 { true, "ceph.dir.rfiles", ceph_vxattrcb_rfiles},
88 { true, "ceph.dir.rsubdirs", ceph_vxattrcb_rsubdirs},
89 { true, "ceph.dir.rbytes", ceph_vxattrcb_rbytes},
90 { true, "ceph.dir.rctime", ceph_vxattrcb_rctime},
91 { true, NULL, NULL }
92};
93
94/* files */
95
96static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
97 size_t size)
98{
99 int ret;
100
101 ret = snprintf(val, size,
102 "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
103 (unsigned long long)ceph_file_layout_su(ci->i_layout),
104 (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
105 (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
106 if (ceph_file_layout_pg_preferred(ci->i_layout))
107 ret += snprintf(val + ret, size, "preferred_osd=%lld\n",
108 (unsigned long long)ceph_file_layout_pg_preferred(
109 ci->i_layout));
110 return ret;
111}
112
113static struct ceph_vxattr_cb ceph_file_vxattrs[] = {
114 { true, "ceph.layout", ceph_vxattrcb_layout},
115 { NULL, NULL }
116};
117
118static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode)
119{
120 if (S_ISDIR(inode->i_mode))
121 return ceph_dir_vxattrs;
122 else if (S_ISREG(inode->i_mode))
123 return ceph_file_vxattrs;
124 return NULL;
125}
126
127static struct ceph_vxattr_cb *ceph_match_vxattr(struct ceph_vxattr_cb *vxattr,
128 const char *name)
129{
130 do {
131 if (strcmp(vxattr->name, name) == 0)
132 return vxattr;
133 vxattr++;
134 } while (vxattr->name);
135 return NULL;
136}
137
138static int __set_xattr(struct ceph_inode_info *ci,
139 const char *name, int name_len,
140 const char *val, int val_len,
141 int dirty,
142 int should_free_name, int should_free_val,
143 struct ceph_inode_xattr **newxattr)
144{
145 struct rb_node **p;
146 struct rb_node *parent = NULL;
147 struct ceph_inode_xattr *xattr = NULL;
148 int c;
149 int new = 0;
150
151 p = &ci->i_xattrs.index.rb_node;
152 while (*p) {
153 parent = *p;
154 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
155 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
156 if (c < 0)
157 p = &(*p)->rb_left;
158 else if (c > 0)
159 p = &(*p)->rb_right;
160 else {
161 if (name_len == xattr->name_len)
162 break;
163 else if (name_len < xattr->name_len)
164 p = &(*p)->rb_left;
165 else
166 p = &(*p)->rb_right;
167 }
168 xattr = NULL;
169 }
170
171 if (!xattr) {
172 new = 1;
173 xattr = *newxattr;
174 xattr->name = name;
175 xattr->name_len = name_len;
176 xattr->should_free_name = should_free_name;
177
178 ci->i_xattrs.count++;
179 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
180 } else {
181 kfree(*newxattr);
182 *newxattr = NULL;
183 if (xattr->should_free_val)
184 kfree((void *)xattr->val);
185
186 if (should_free_name) {
187 kfree((void *)name);
188 name = xattr->name;
189 }
190 ci->i_xattrs.names_size -= xattr->name_len;
191 ci->i_xattrs.vals_size -= xattr->val_len;
192 }
193 ci->i_xattrs.names_size += name_len;
194 ci->i_xattrs.vals_size += val_len;
195 if (val)
196 xattr->val = val;
197 else
198 xattr->val = "";
199
200 xattr->val_len = val_len;
201 xattr->dirty = dirty;
202 xattr->should_free_val = (val && should_free_val);
203
204 if (new) {
205 rb_link_node(&xattr->node, parent, p);
206 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
207 dout("__set_xattr_val p=%p\n", p);
208 }
209
210 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
211 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
212
213 return 0;
214}
215
216static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
217 const char *name)
218{
219 struct rb_node **p;
220 struct rb_node *parent = NULL;
221 struct ceph_inode_xattr *xattr = NULL;
222 int name_len = strlen(name);
223 int c;
224
225 p = &ci->i_xattrs.index.rb_node;
226 while (*p) {
227 parent = *p;
228 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
229 c = strncmp(name, xattr->name, xattr->name_len);
230 if (c == 0 && name_len > xattr->name_len)
231 c = 1;
232 if (c < 0)
233 p = &(*p)->rb_left;
234 else if (c > 0)
235 p = &(*p)->rb_right;
236 else {
237 dout("__get_xattr %s: found %.*s\n", name,
238 xattr->val_len, xattr->val);
239 return xattr;
240 }
241 }
242
243 dout("__get_xattr %s: not found\n", name);
244
245 return NULL;
246}
247
248static void __free_xattr(struct ceph_inode_xattr *xattr)
249{
250 BUG_ON(!xattr);
251
252 if (xattr->should_free_name)
253 kfree((void *)xattr->name);
254 if (xattr->should_free_val)
255 kfree((void *)xattr->val);
256
257 kfree(xattr);
258}
259
260static int __remove_xattr(struct ceph_inode_info *ci,
261 struct ceph_inode_xattr *xattr)
262{
263 if (!xattr)
264 return -EOPNOTSUPP;
265
266 rb_erase(&xattr->node, &ci->i_xattrs.index);
267
268 if (xattr->should_free_name)
269 kfree((void *)xattr->name);
270 if (xattr->should_free_val)
271 kfree((void *)xattr->val);
272
273 ci->i_xattrs.names_size -= xattr->name_len;
274 ci->i_xattrs.vals_size -= xattr->val_len;
275 ci->i_xattrs.count--;
276 kfree(xattr);
277
278 return 0;
279}
280
281static int __remove_xattr_by_name(struct ceph_inode_info *ci,
282 const char *name)
283{
284 struct rb_node **p;
285 struct ceph_inode_xattr *xattr;
286 int err;
287
288 p = &ci->i_xattrs.index.rb_node;
289 xattr = __get_xattr(ci, name);
290 err = __remove_xattr(ci, xattr);
291 return err;
292}
293
294static char *__copy_xattr_names(struct ceph_inode_info *ci,
295 char *dest)
296{
297 struct rb_node *p;
298 struct ceph_inode_xattr *xattr = NULL;
299
300 p = rb_first(&ci->i_xattrs.index);
301 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
302
303 while (p) {
304 xattr = rb_entry(p, struct ceph_inode_xattr, node);
305 memcpy(dest, xattr->name, xattr->name_len);
306 dest[xattr->name_len] = '\0';
307
308 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
309 xattr->name_len, ci->i_xattrs.names_size);
310
311 dest += xattr->name_len + 1;
312 p = rb_next(p);
313 }
314
315 return dest;
316}
317
318void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
319{
320 struct rb_node *p, *tmp;
321 struct ceph_inode_xattr *xattr = NULL;
322
323 p = rb_first(&ci->i_xattrs.index);
324
325 dout("__ceph_destroy_xattrs p=%p\n", p);
326
327 while (p) {
328 xattr = rb_entry(p, struct ceph_inode_xattr, node);
329 tmp = p;
330 p = rb_next(tmp);
331 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
332 xattr->name_len, xattr->name);
333 rb_erase(tmp, &ci->i_xattrs.index);
334
335 __free_xattr(xattr);
336 }
337
338 ci->i_xattrs.names_size = 0;
339 ci->i_xattrs.vals_size = 0;
340 ci->i_xattrs.index_version = 0;
341 ci->i_xattrs.count = 0;
342 ci->i_xattrs.index = RB_ROOT;
343}
344
345static int __build_xattrs(struct inode *inode)
346 __releases(inode->i_lock)
347 __acquires(inode->i_lock)
348{
349 u32 namelen;
350 u32 numattr = 0;
351 void *p, *end;
352 u32 len;
353 const char *name, *val;
354 struct ceph_inode_info *ci = ceph_inode(inode);
355 int xattr_version;
356 struct ceph_inode_xattr **xattrs = NULL;
357 int err = 0;
358 int i;
359
360 dout("__build_xattrs() len=%d\n",
361 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
362
363 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
364 return 0; /* already built */
365
366 __ceph_destroy_xattrs(ci);
367
368start:
369 /* updated internal xattr rb tree */
370 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
371 p = ci->i_xattrs.blob->vec.iov_base;
372 end = p + ci->i_xattrs.blob->vec.iov_len;
373 ceph_decode_32_safe(&p, end, numattr, bad);
374 xattr_version = ci->i_xattrs.version;
375 spin_unlock(&inode->i_lock);
376
377 xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
378 GFP_NOFS);
379 err = -ENOMEM;
380 if (!xattrs)
381 goto bad_lock;
382 memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
383 for (i = 0; i < numattr; i++) {
384 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
385 GFP_NOFS);
386 if (!xattrs[i])
387 goto bad_lock;
388 }
389
390 spin_lock(&inode->i_lock);
391 if (ci->i_xattrs.version != xattr_version) {
392 /* lost a race, retry */
393 for (i = 0; i < numattr; i++)
394 kfree(xattrs[i]);
395 kfree(xattrs);
396 goto start;
397 }
398 err = -EIO;
399 while (numattr--) {
400 ceph_decode_32_safe(&p, end, len, bad);
401 namelen = len;
402 name = p;
403 p += len;
404 ceph_decode_32_safe(&p, end, len, bad);
405 val = p;
406 p += len;
407
408 err = __set_xattr(ci, name, namelen, val, len,
409 0, 0, 0, &xattrs[numattr]);
410
411 if (err < 0)
412 goto bad;
413 }
414 kfree(xattrs);
415 }
416 ci->i_xattrs.index_version = ci->i_xattrs.version;
417 ci->i_xattrs.dirty = false;
418
419 return err;
420bad_lock:
421 spin_lock(&inode->i_lock);
422bad:
423 if (xattrs) {
424 for (i = 0; i < numattr; i++)
425 kfree(xattrs[i]);
426 kfree(xattrs);
427 }
428 ci->i_xattrs.names_size = 0;
429 return err;
430}
431
432static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
433 int val_size)
434{
435 /*
436 * 4 bytes for the length, and additional 4 bytes per each xattr name,
437 * 4 bytes per each value
438 */
439 int size = 4 + ci->i_xattrs.count*(4 + 4) +
440 ci->i_xattrs.names_size +
441 ci->i_xattrs.vals_size;
442 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
443 ci->i_xattrs.count, ci->i_xattrs.names_size,
444 ci->i_xattrs.vals_size);
445
446 if (name_size)
447 size += 4 + 4 + name_size + val_size;
448
449 return size;
450}
451
452/*
453 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
454 * and swap into place.
455 */
456void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
457{
458 struct rb_node *p;
459 struct ceph_inode_xattr *xattr = NULL;
460 void *dest;
461
462 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
463 if (ci->i_xattrs.dirty) {
464 int need = __get_required_blob_size(ci, 0, 0);
465
466 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
467
468 p = rb_first(&ci->i_xattrs.index);
469 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
470
471 ceph_encode_32(&dest, ci->i_xattrs.count);
472 while (p) {
473 xattr = rb_entry(p, struct ceph_inode_xattr, node);
474
475 ceph_encode_32(&dest, xattr->name_len);
476 memcpy(dest, xattr->name, xattr->name_len);
477 dest += xattr->name_len;
478 ceph_encode_32(&dest, xattr->val_len);
479 memcpy(dest, xattr->val, xattr->val_len);
480 dest += xattr->val_len;
481
482 p = rb_next(p);
483 }
484
485 /* adjust buffer len; it may be larger than we need */
486 ci->i_xattrs.prealloc_blob->vec.iov_len =
487 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
488
489 if (ci->i_xattrs.blob)
490 ceph_buffer_put(ci->i_xattrs.blob);
491 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
492 ci->i_xattrs.prealloc_blob = NULL;
493 ci->i_xattrs.dirty = false;
494 ci->i_xattrs.version++;
495 }
496}
497
498ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
499 size_t size)
500{
501 struct inode *inode = dentry->d_inode;
502 struct ceph_inode_info *ci = ceph_inode(inode);
503 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
504 int err;
505 struct ceph_inode_xattr *xattr;
506 struct ceph_vxattr_cb *vxattr = NULL;
507
508 if (!ceph_is_valid_xattr(name))
509 return -ENODATA;
510
511 /* let's see if a virtual xattr was requested */
512 if (vxattrs)
513 vxattr = ceph_match_vxattr(vxattrs, name);
514
515 spin_lock(&inode->i_lock);
516 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
517 ci->i_xattrs.version, ci->i_xattrs.index_version);
518
519 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
520 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
521 goto get_xattr;
522 } else {
523 spin_unlock(&inode->i_lock);
524 /* get xattrs from mds (if we don't already have them) */
525 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
526 if (err)
527 return err;
528 }
529
530 spin_lock(&inode->i_lock);
531
532 if (vxattr && vxattr->readonly) {
533 err = vxattr->getxattr_cb(ci, value, size);
534 goto out;
535 }
536
537 err = __build_xattrs(inode);
538 if (err < 0)
539 goto out;
540
541get_xattr:
542 err = -ENODATA; /* == ENOATTR */
543 xattr = __get_xattr(ci, name);
544 if (!xattr) {
545 if (vxattr)
546 err = vxattr->getxattr_cb(ci, value, size);
547 goto out;
548 }
549
550 err = -ERANGE;
551 if (size && size < xattr->val_len)
552 goto out;
553
554 err = xattr->val_len;
555 if (size == 0)
556 goto out;
557
558 memcpy(value, xattr->val, xattr->val_len);
559
560out:
561 spin_unlock(&inode->i_lock);
562 return err;
563}
564
565ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
566{
567 struct inode *inode = dentry->d_inode;
568 struct ceph_inode_info *ci = ceph_inode(inode);
569 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
570 u32 vir_namelen = 0;
571 u32 namelen;
572 int err;
573 u32 len;
574 int i;
575
576 spin_lock(&inode->i_lock);
577 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
578 ci->i_xattrs.version, ci->i_xattrs.index_version);
579
580 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
581 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
582 goto list_xattr;
583 } else {
584 spin_unlock(&inode->i_lock);
585 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
586 if (err)
587 return err;
588 }
589
590 spin_lock(&inode->i_lock);
591
592 err = __build_xattrs(inode);
593 if (err < 0)
594 goto out;
595
596list_xattr:
597 vir_namelen = 0;
598 /* include virtual dir xattrs */
599 if (vxattrs)
600 for (i = 0; vxattrs[i].name; i++)
601 vir_namelen += strlen(vxattrs[i].name) + 1;
602 /* adding 1 byte per each variable due to the null termination */
603 namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
604 err = -ERANGE;
605 if (size && namelen > size)
606 goto out;
607
608 err = namelen;
609 if (size == 0)
610 goto out;
611
612 names = __copy_xattr_names(ci, names);
613
614 /* virtual xattr names, too */
615 if (vxattrs)
616 for (i = 0; vxattrs[i].name; i++) {
617 len = sprintf(names, "%s", vxattrs[i].name);
618 names += len + 1;
619 }
620
621out:
622 spin_unlock(&inode->i_lock);
623 return err;
624}
625
626static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
627 const char *value, size_t size, int flags)
628{
629 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
630 struct inode *inode = dentry->d_inode;
631 struct ceph_inode_info *ci = ceph_inode(inode);
632 struct inode *parent_inode;
633 struct ceph_mds_request *req;
634 struct ceph_mds_client *mdsc = fsc->mdsc;
635 int err;
636 int i, nr_pages;
637 struct page **pages = NULL;
638 void *kaddr;
639
640 /* copy value into some pages */
641 nr_pages = calc_pages_for(0, size);
642 if (nr_pages) {
643 pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
644 if (!pages)
645 return -ENOMEM;
646 err = -ENOMEM;
647 for (i = 0; i < nr_pages; i++) {
648 pages[i] = __page_cache_alloc(GFP_NOFS);
649 if (!pages[i]) {
650 nr_pages = i;
651 goto out;
652 }
653 kaddr = kmap(pages[i]);
654 memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
655 min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
656 }
657 }
658
659 dout("setxattr value=%.*s\n", (int)size, value);
660
661 /* do request */
662 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
663 USE_AUTH_MDS);
664 if (IS_ERR(req)) {
665 err = PTR_ERR(req);
666 goto out;
667 }
668 req->r_inode = inode;
669 ihold(inode);
670 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
671 req->r_num_caps = 1;
672 req->r_args.setxattr.flags = cpu_to_le32(flags);
673 req->r_path2 = kstrdup(name, GFP_NOFS);
674
675 req->r_pages = pages;
676 req->r_num_pages = nr_pages;
677 req->r_data_len = size;
678
679 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
680 parent_inode = ceph_get_dentry_parent_inode(dentry);
681 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
682 iput(parent_inode);
683 ceph_mdsc_put_request(req);
684 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
685
686out:
687 if (pages) {
688 for (i = 0; i < nr_pages; i++)
689 __free_page(pages[i]);
690 kfree(pages);
691 }
692 return err;
693}
694
695int ceph_setxattr(struct dentry *dentry, const char *name,
696 const void *value, size_t size, int flags)
697{
698 struct inode *inode = dentry->d_inode;
699 struct ceph_inode_info *ci = ceph_inode(inode);
700 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
701 int err;
702 int name_len = strlen(name);
703 int val_len = size;
704 char *newname = NULL;
705 char *newval = NULL;
706 struct ceph_inode_xattr *xattr = NULL;
707 int issued;
708 int required_blob_size;
709 int dirty;
710
711 if (ceph_snap(inode) != CEPH_NOSNAP)
712 return -EROFS;
713
714 if (!ceph_is_valid_xattr(name))
715 return -EOPNOTSUPP;
716
717 if (vxattrs) {
718 struct ceph_vxattr_cb *vxattr =
719 ceph_match_vxattr(vxattrs, name);
720 if (vxattr && vxattr->readonly)
721 return -EOPNOTSUPP;
722 }
723
724 /* preallocate memory for xattr name, value, index node */
725 err = -ENOMEM;
726 newname = kmemdup(name, name_len + 1, GFP_NOFS);
727 if (!newname)
728 goto out;
729
730 if (val_len) {
731 newval = kmalloc(val_len + 1, GFP_NOFS);
732 if (!newval)
733 goto out;
734 memcpy(newval, value, val_len);
735 newval[val_len] = '\0';
736 }
737
738 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
739 if (!xattr)
740 goto out;
741
742 spin_lock(&inode->i_lock);
743retry:
744 issued = __ceph_caps_issued(ci, NULL);
745 if (!(issued & CEPH_CAP_XATTR_EXCL))
746 goto do_sync;
747 __build_xattrs(inode);
748
749 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
750
751 if (!ci->i_xattrs.prealloc_blob ||
752 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
753 struct ceph_buffer *blob = NULL;
754
755 spin_unlock(&inode->i_lock);
756 dout(" preaallocating new blob size=%d\n", required_blob_size);
757 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
758 if (!blob)
759 goto out;
760 spin_lock(&inode->i_lock);
761 if (ci->i_xattrs.prealloc_blob)
762 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
763 ci->i_xattrs.prealloc_blob = blob;
764 goto retry;
765 }
766
767 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
768 err = __set_xattr(ci, newname, name_len, newval,
769 val_len, 1, 1, 1, &xattr);
770 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
771 ci->i_xattrs.dirty = true;
772 inode->i_ctime = CURRENT_TIME;
773 spin_unlock(&inode->i_lock);
774 if (dirty)
775 __mark_inode_dirty(inode, dirty);
776 return err;
777
778do_sync:
779 spin_unlock(&inode->i_lock);
780 err = ceph_sync_setxattr(dentry, name, value, size, flags);
781out:
782 kfree(newname);
783 kfree(newval);
784 kfree(xattr);
785 return err;
786}
787
788static int ceph_send_removexattr(struct dentry *dentry, const char *name)
789{
790 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
791 struct ceph_mds_client *mdsc = fsc->mdsc;
792 struct inode *inode = dentry->d_inode;
793 struct inode *parent_inode;
794 struct ceph_mds_request *req;
795 int err;
796
797 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
798 USE_AUTH_MDS);
799 if (IS_ERR(req))
800 return PTR_ERR(req);
801 req->r_inode = inode;
802 ihold(inode);
803 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
804 req->r_num_caps = 1;
805 req->r_path2 = kstrdup(name, GFP_NOFS);
806
807 parent_inode = ceph_get_dentry_parent_inode(dentry);
808 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
809 iput(parent_inode);
810 ceph_mdsc_put_request(req);
811 return err;
812}
813
814int ceph_removexattr(struct dentry *dentry, const char *name)
815{
816 struct inode *inode = dentry->d_inode;
817 struct ceph_inode_info *ci = ceph_inode(inode);
818 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
819 int issued;
820 int err;
821 int dirty;
822
823 if (ceph_snap(inode) != CEPH_NOSNAP)
824 return -EROFS;
825
826 if (!ceph_is_valid_xattr(name))
827 return -EOPNOTSUPP;
828
829 if (vxattrs) {
830 struct ceph_vxattr_cb *vxattr =
831 ceph_match_vxattr(vxattrs, name);
832 if (vxattr && vxattr->readonly)
833 return -EOPNOTSUPP;
834 }
835
836 spin_lock(&inode->i_lock);
837 __build_xattrs(inode);
838 issued = __ceph_caps_issued(ci, NULL);
839 dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
840
841 if (!(issued & CEPH_CAP_XATTR_EXCL))
842 goto do_sync;
843
844 err = __remove_xattr_by_name(ceph_inode(inode), name);
845 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
846 ci->i_xattrs.dirty = true;
847 inode->i_ctime = CURRENT_TIME;
848
849 spin_unlock(&inode->i_lock);
850 if (dirty)
851 __mark_inode_dirty(inode, dirty);
852 return err;
853do_sync:
854 spin_unlock(&inode->i_lock);
855 err = ceph_send_removexattr(dentry, name);
856 return err;
857}
858
1#include <linux/ceph/ceph_debug.h>
2
3#include "super.h"
4#include "mds_client.h"
5
6#include <linux/ceph/decode.h>
7
8#include <linux/xattr.h>
9#include <linux/slab.h>
10
11#define XATTR_CEPH_PREFIX "ceph."
12#define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
13
14static bool ceph_is_valid_xattr(const char *name)
15{
16 return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
17 !strncmp(name, XATTR_SECURITY_PREFIX,
18 XATTR_SECURITY_PREFIX_LEN) ||
19 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
20 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
21}
22
23/*
24 * These define virtual xattrs exposing the recursive directory
25 * statistics and layout metadata.
26 */
27struct ceph_vxattr {
28 char *name;
29 size_t name_size; /* strlen(name) + 1 (for '\0') */
30 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
31 size_t size);
32 bool readonly;
33};
34
35/* directories */
36
37static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
38 size_t size)
39{
40 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
41}
42
43static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
44 size_t size)
45{
46 return snprintf(val, size, "%lld", ci->i_files);
47}
48
49static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
50 size_t size)
51{
52 return snprintf(val, size, "%lld", ci->i_subdirs);
53}
54
55static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
56 size_t size)
57{
58 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
59}
60
61static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
62 size_t size)
63{
64 return snprintf(val, size, "%lld", ci->i_rfiles);
65}
66
67static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
68 size_t size)
69{
70 return snprintf(val, size, "%lld", ci->i_rsubdirs);
71}
72
73static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
74 size_t size)
75{
76 return snprintf(val, size, "%lld", ci->i_rbytes);
77}
78
79static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
80 size_t size)
81{
82 return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec,
83 (long)ci->i_rctime.tv_nsec);
84}
85
86#define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
87
88#define XATTR_NAME_CEPH(_type, _name) \
89 { \
90 .name = CEPH_XATTR_NAME(_type, _name), \
91 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
92 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
93 .readonly = true, \
94 }
95
96static struct ceph_vxattr ceph_dir_vxattrs[] = {
97 XATTR_NAME_CEPH(dir, entries),
98 XATTR_NAME_CEPH(dir, files),
99 XATTR_NAME_CEPH(dir, subdirs),
100 XATTR_NAME_CEPH(dir, rentries),
101 XATTR_NAME_CEPH(dir, rfiles),
102 XATTR_NAME_CEPH(dir, rsubdirs),
103 XATTR_NAME_CEPH(dir, rbytes),
104 XATTR_NAME_CEPH(dir, rctime),
105 { 0 } /* Required table terminator */
106};
107static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
108
109/* files */
110
111static size_t ceph_vxattrcb_file_layout(struct ceph_inode_info *ci, char *val,
112 size_t size)
113{
114 int ret;
115
116 ret = snprintf(val, size,
117 "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
118 (unsigned long long)ceph_file_layout_su(ci->i_layout),
119 (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
120 (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
121 return ret;
122}
123
124static struct ceph_vxattr ceph_file_vxattrs[] = {
125 XATTR_NAME_CEPH(file, layout),
126 /* The following extended attribute name is deprecated */
127 {
128 .name = XATTR_CEPH_PREFIX "layout",
129 .name_size = sizeof (XATTR_CEPH_PREFIX "layout"),
130 .getxattr_cb = ceph_vxattrcb_file_layout,
131 .readonly = true,
132 },
133 { 0 } /* Required table terminator */
134};
135static size_t ceph_file_vxattrs_name_size; /* total size of all names */
136
137static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
138{
139 if (S_ISDIR(inode->i_mode))
140 return ceph_dir_vxattrs;
141 else if (S_ISREG(inode->i_mode))
142 return ceph_file_vxattrs;
143 return NULL;
144}
145
146static size_t ceph_vxattrs_name_size(struct ceph_vxattr *vxattrs)
147{
148 if (vxattrs == ceph_dir_vxattrs)
149 return ceph_dir_vxattrs_name_size;
150 if (vxattrs == ceph_file_vxattrs)
151 return ceph_file_vxattrs_name_size;
152 BUG();
153
154 return 0;
155}
156
157/*
158 * Compute the aggregate size (including terminating '\0') of all
159 * virtual extended attribute names in the given vxattr table.
160 */
161static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
162{
163 struct ceph_vxattr *vxattr;
164 size_t size = 0;
165
166 for (vxattr = vxattrs; vxattr->name; vxattr++)
167 size += vxattr->name_size;
168
169 return size;
170}
171
172/* Routines called at initialization and exit time */
173
174void __init ceph_xattr_init(void)
175{
176 ceph_dir_vxattrs_name_size = vxattrs_name_size(ceph_dir_vxattrs);
177 ceph_file_vxattrs_name_size = vxattrs_name_size(ceph_file_vxattrs);
178}
179
180void ceph_xattr_exit(void)
181{
182 ceph_dir_vxattrs_name_size = 0;
183 ceph_file_vxattrs_name_size = 0;
184}
185
186static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
187 const char *name)
188{
189 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
190
191 if (vxattr) {
192 while (vxattr->name) {
193 if (!strcmp(vxattr->name, name))
194 return vxattr;
195 vxattr++;
196 }
197 }
198
199 return NULL;
200}
201
202static int __set_xattr(struct ceph_inode_info *ci,
203 const char *name, int name_len,
204 const char *val, int val_len,
205 int dirty,
206 int should_free_name, int should_free_val,
207 struct ceph_inode_xattr **newxattr)
208{
209 struct rb_node **p;
210 struct rb_node *parent = NULL;
211 struct ceph_inode_xattr *xattr = NULL;
212 int c;
213 int new = 0;
214
215 p = &ci->i_xattrs.index.rb_node;
216 while (*p) {
217 parent = *p;
218 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
219 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
220 if (c < 0)
221 p = &(*p)->rb_left;
222 else if (c > 0)
223 p = &(*p)->rb_right;
224 else {
225 if (name_len == xattr->name_len)
226 break;
227 else if (name_len < xattr->name_len)
228 p = &(*p)->rb_left;
229 else
230 p = &(*p)->rb_right;
231 }
232 xattr = NULL;
233 }
234
235 if (!xattr) {
236 new = 1;
237 xattr = *newxattr;
238 xattr->name = name;
239 xattr->name_len = name_len;
240 xattr->should_free_name = should_free_name;
241
242 ci->i_xattrs.count++;
243 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
244 } else {
245 kfree(*newxattr);
246 *newxattr = NULL;
247 if (xattr->should_free_val)
248 kfree((void *)xattr->val);
249
250 if (should_free_name) {
251 kfree((void *)name);
252 name = xattr->name;
253 }
254 ci->i_xattrs.names_size -= xattr->name_len;
255 ci->i_xattrs.vals_size -= xattr->val_len;
256 }
257 ci->i_xattrs.names_size += name_len;
258 ci->i_xattrs.vals_size += val_len;
259 if (val)
260 xattr->val = val;
261 else
262 xattr->val = "";
263
264 xattr->val_len = val_len;
265 xattr->dirty = dirty;
266 xattr->should_free_val = (val && should_free_val);
267
268 if (new) {
269 rb_link_node(&xattr->node, parent, p);
270 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
271 dout("__set_xattr_val p=%p\n", p);
272 }
273
274 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
275 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
276
277 return 0;
278}
279
280static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
281 const char *name)
282{
283 struct rb_node **p;
284 struct rb_node *parent = NULL;
285 struct ceph_inode_xattr *xattr = NULL;
286 int name_len = strlen(name);
287 int c;
288
289 p = &ci->i_xattrs.index.rb_node;
290 while (*p) {
291 parent = *p;
292 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
293 c = strncmp(name, xattr->name, xattr->name_len);
294 if (c == 0 && name_len > xattr->name_len)
295 c = 1;
296 if (c < 0)
297 p = &(*p)->rb_left;
298 else if (c > 0)
299 p = &(*p)->rb_right;
300 else {
301 dout("__get_xattr %s: found %.*s\n", name,
302 xattr->val_len, xattr->val);
303 return xattr;
304 }
305 }
306
307 dout("__get_xattr %s: not found\n", name);
308
309 return NULL;
310}
311
312static void __free_xattr(struct ceph_inode_xattr *xattr)
313{
314 BUG_ON(!xattr);
315
316 if (xattr->should_free_name)
317 kfree((void *)xattr->name);
318 if (xattr->should_free_val)
319 kfree((void *)xattr->val);
320
321 kfree(xattr);
322}
323
324static int __remove_xattr(struct ceph_inode_info *ci,
325 struct ceph_inode_xattr *xattr)
326{
327 if (!xattr)
328 return -EOPNOTSUPP;
329
330 rb_erase(&xattr->node, &ci->i_xattrs.index);
331
332 if (xattr->should_free_name)
333 kfree((void *)xattr->name);
334 if (xattr->should_free_val)
335 kfree((void *)xattr->val);
336
337 ci->i_xattrs.names_size -= xattr->name_len;
338 ci->i_xattrs.vals_size -= xattr->val_len;
339 ci->i_xattrs.count--;
340 kfree(xattr);
341
342 return 0;
343}
344
345static int __remove_xattr_by_name(struct ceph_inode_info *ci,
346 const char *name)
347{
348 struct rb_node **p;
349 struct ceph_inode_xattr *xattr;
350 int err;
351
352 p = &ci->i_xattrs.index.rb_node;
353 xattr = __get_xattr(ci, name);
354 err = __remove_xattr(ci, xattr);
355 return err;
356}
357
358static char *__copy_xattr_names(struct ceph_inode_info *ci,
359 char *dest)
360{
361 struct rb_node *p;
362 struct ceph_inode_xattr *xattr = NULL;
363
364 p = rb_first(&ci->i_xattrs.index);
365 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
366
367 while (p) {
368 xattr = rb_entry(p, struct ceph_inode_xattr, node);
369 memcpy(dest, xattr->name, xattr->name_len);
370 dest[xattr->name_len] = '\0';
371
372 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
373 xattr->name_len, ci->i_xattrs.names_size);
374
375 dest += xattr->name_len + 1;
376 p = rb_next(p);
377 }
378
379 return dest;
380}
381
382void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
383{
384 struct rb_node *p, *tmp;
385 struct ceph_inode_xattr *xattr = NULL;
386
387 p = rb_first(&ci->i_xattrs.index);
388
389 dout("__ceph_destroy_xattrs p=%p\n", p);
390
391 while (p) {
392 xattr = rb_entry(p, struct ceph_inode_xattr, node);
393 tmp = p;
394 p = rb_next(tmp);
395 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
396 xattr->name_len, xattr->name);
397 rb_erase(tmp, &ci->i_xattrs.index);
398
399 __free_xattr(xattr);
400 }
401
402 ci->i_xattrs.names_size = 0;
403 ci->i_xattrs.vals_size = 0;
404 ci->i_xattrs.index_version = 0;
405 ci->i_xattrs.count = 0;
406 ci->i_xattrs.index = RB_ROOT;
407}
408
409static int __build_xattrs(struct inode *inode)
410 __releases(ci->i_ceph_lock)
411 __acquires(ci->i_ceph_lock)
412{
413 u32 namelen;
414 u32 numattr = 0;
415 void *p, *end;
416 u32 len;
417 const char *name, *val;
418 struct ceph_inode_info *ci = ceph_inode(inode);
419 int xattr_version;
420 struct ceph_inode_xattr **xattrs = NULL;
421 int err = 0;
422 int i;
423
424 dout("__build_xattrs() len=%d\n",
425 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
426
427 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
428 return 0; /* already built */
429
430 __ceph_destroy_xattrs(ci);
431
432start:
433 /* updated internal xattr rb tree */
434 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
435 p = ci->i_xattrs.blob->vec.iov_base;
436 end = p + ci->i_xattrs.blob->vec.iov_len;
437 ceph_decode_32_safe(&p, end, numattr, bad);
438 xattr_version = ci->i_xattrs.version;
439 spin_unlock(&ci->i_ceph_lock);
440
441 xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
442 GFP_NOFS);
443 err = -ENOMEM;
444 if (!xattrs)
445 goto bad_lock;
446 memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
447 for (i = 0; i < numattr; i++) {
448 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
449 GFP_NOFS);
450 if (!xattrs[i])
451 goto bad_lock;
452 }
453
454 spin_lock(&ci->i_ceph_lock);
455 if (ci->i_xattrs.version != xattr_version) {
456 /* lost a race, retry */
457 for (i = 0; i < numattr; i++)
458 kfree(xattrs[i]);
459 kfree(xattrs);
460 goto start;
461 }
462 err = -EIO;
463 while (numattr--) {
464 ceph_decode_32_safe(&p, end, len, bad);
465 namelen = len;
466 name = p;
467 p += len;
468 ceph_decode_32_safe(&p, end, len, bad);
469 val = p;
470 p += len;
471
472 err = __set_xattr(ci, name, namelen, val, len,
473 0, 0, 0, &xattrs[numattr]);
474
475 if (err < 0)
476 goto bad;
477 }
478 kfree(xattrs);
479 }
480 ci->i_xattrs.index_version = ci->i_xattrs.version;
481 ci->i_xattrs.dirty = false;
482
483 return err;
484bad_lock:
485 spin_lock(&ci->i_ceph_lock);
486bad:
487 if (xattrs) {
488 for (i = 0; i < numattr; i++)
489 kfree(xattrs[i]);
490 kfree(xattrs);
491 }
492 ci->i_xattrs.names_size = 0;
493 return err;
494}
495
496static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
497 int val_size)
498{
499 /*
500 * 4 bytes for the length, and additional 4 bytes per each xattr name,
501 * 4 bytes per each value
502 */
503 int size = 4 + ci->i_xattrs.count*(4 + 4) +
504 ci->i_xattrs.names_size +
505 ci->i_xattrs.vals_size;
506 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
507 ci->i_xattrs.count, ci->i_xattrs.names_size,
508 ci->i_xattrs.vals_size);
509
510 if (name_size)
511 size += 4 + 4 + name_size + val_size;
512
513 return size;
514}
515
516/*
517 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
518 * and swap into place.
519 */
520void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
521{
522 struct rb_node *p;
523 struct ceph_inode_xattr *xattr = NULL;
524 void *dest;
525
526 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
527 if (ci->i_xattrs.dirty) {
528 int need = __get_required_blob_size(ci, 0, 0);
529
530 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
531
532 p = rb_first(&ci->i_xattrs.index);
533 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
534
535 ceph_encode_32(&dest, ci->i_xattrs.count);
536 while (p) {
537 xattr = rb_entry(p, struct ceph_inode_xattr, node);
538
539 ceph_encode_32(&dest, xattr->name_len);
540 memcpy(dest, xattr->name, xattr->name_len);
541 dest += xattr->name_len;
542 ceph_encode_32(&dest, xattr->val_len);
543 memcpy(dest, xattr->val, xattr->val_len);
544 dest += xattr->val_len;
545
546 p = rb_next(p);
547 }
548
549 /* adjust buffer len; it may be larger than we need */
550 ci->i_xattrs.prealloc_blob->vec.iov_len =
551 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
552
553 if (ci->i_xattrs.blob)
554 ceph_buffer_put(ci->i_xattrs.blob);
555 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
556 ci->i_xattrs.prealloc_blob = NULL;
557 ci->i_xattrs.dirty = false;
558 ci->i_xattrs.version++;
559 }
560}
561
562ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
563 size_t size)
564{
565 struct inode *inode = dentry->d_inode;
566 struct ceph_inode_info *ci = ceph_inode(inode);
567 int err;
568 struct ceph_inode_xattr *xattr;
569 struct ceph_vxattr *vxattr = NULL;
570
571 if (!ceph_is_valid_xattr(name))
572 return -ENODATA;
573
574 /* let's see if a virtual xattr was requested */
575 vxattr = ceph_match_vxattr(inode, name);
576
577 spin_lock(&ci->i_ceph_lock);
578 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
579 ci->i_xattrs.version, ci->i_xattrs.index_version);
580
581 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
582 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
583 goto get_xattr;
584 } else {
585 spin_unlock(&ci->i_ceph_lock);
586 /* get xattrs from mds (if we don't already have them) */
587 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
588 if (err)
589 return err;
590 }
591
592 spin_lock(&ci->i_ceph_lock);
593
594 if (vxattr && vxattr->readonly) {
595 err = vxattr->getxattr_cb(ci, value, size);
596 goto out;
597 }
598
599 err = __build_xattrs(inode);
600 if (err < 0)
601 goto out;
602
603get_xattr:
604 err = -ENODATA; /* == ENOATTR */
605 xattr = __get_xattr(ci, name);
606 if (!xattr) {
607 if (vxattr)
608 err = vxattr->getxattr_cb(ci, value, size);
609 goto out;
610 }
611
612 err = -ERANGE;
613 if (size && size < xattr->val_len)
614 goto out;
615
616 err = xattr->val_len;
617 if (size == 0)
618 goto out;
619
620 memcpy(value, xattr->val, xattr->val_len);
621
622out:
623 spin_unlock(&ci->i_ceph_lock);
624 return err;
625}
626
627ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
628{
629 struct inode *inode = dentry->d_inode;
630 struct ceph_inode_info *ci = ceph_inode(inode);
631 struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
632 u32 vir_namelen = 0;
633 u32 namelen;
634 int err;
635 u32 len;
636 int i;
637
638 spin_lock(&ci->i_ceph_lock);
639 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
640 ci->i_xattrs.version, ci->i_xattrs.index_version);
641
642 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
643 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
644 goto list_xattr;
645 } else {
646 spin_unlock(&ci->i_ceph_lock);
647 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
648 if (err)
649 return err;
650 }
651
652 spin_lock(&ci->i_ceph_lock);
653
654 err = __build_xattrs(inode);
655 if (err < 0)
656 goto out;
657
658list_xattr:
659 /*
660 * Start with virtual dir xattr names (if any) (including
661 * terminating '\0' characters for each).
662 */
663 vir_namelen = ceph_vxattrs_name_size(vxattrs);
664
665 /* adding 1 byte per each variable due to the null termination */
666 namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
667 err = -ERANGE;
668 if (size && namelen > size)
669 goto out;
670
671 err = namelen;
672 if (size == 0)
673 goto out;
674
675 names = __copy_xattr_names(ci, names);
676
677 /* virtual xattr names, too */
678 if (vxattrs)
679 for (i = 0; vxattrs[i].name; i++) {
680 len = sprintf(names, "%s", vxattrs[i].name);
681 names += len + 1;
682 }
683
684out:
685 spin_unlock(&ci->i_ceph_lock);
686 return err;
687}
688
689static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
690 const char *value, size_t size, int flags)
691{
692 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
693 struct inode *inode = dentry->d_inode;
694 struct ceph_inode_info *ci = ceph_inode(inode);
695 struct inode *parent_inode;
696 struct ceph_mds_request *req;
697 struct ceph_mds_client *mdsc = fsc->mdsc;
698 int err;
699 int i, nr_pages;
700 struct page **pages = NULL;
701 void *kaddr;
702
703 /* copy value into some pages */
704 nr_pages = calc_pages_for(0, size);
705 if (nr_pages) {
706 pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
707 if (!pages)
708 return -ENOMEM;
709 err = -ENOMEM;
710 for (i = 0; i < nr_pages; i++) {
711 pages[i] = __page_cache_alloc(GFP_NOFS);
712 if (!pages[i]) {
713 nr_pages = i;
714 goto out;
715 }
716 kaddr = kmap(pages[i]);
717 memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
718 min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
719 }
720 }
721
722 dout("setxattr value=%.*s\n", (int)size, value);
723
724 /* do request */
725 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
726 USE_AUTH_MDS);
727 if (IS_ERR(req)) {
728 err = PTR_ERR(req);
729 goto out;
730 }
731 req->r_inode = inode;
732 ihold(inode);
733 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
734 req->r_num_caps = 1;
735 req->r_args.setxattr.flags = cpu_to_le32(flags);
736 req->r_path2 = kstrdup(name, GFP_NOFS);
737
738 req->r_pages = pages;
739 req->r_num_pages = nr_pages;
740 req->r_data_len = size;
741
742 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
743 parent_inode = ceph_get_dentry_parent_inode(dentry);
744 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
745 iput(parent_inode);
746 ceph_mdsc_put_request(req);
747 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
748
749out:
750 if (pages) {
751 for (i = 0; i < nr_pages; i++)
752 __free_page(pages[i]);
753 kfree(pages);
754 }
755 return err;
756}
757
758int ceph_setxattr(struct dentry *dentry, const char *name,
759 const void *value, size_t size, int flags)
760{
761 struct inode *inode = dentry->d_inode;
762 struct ceph_vxattr *vxattr;
763 struct ceph_inode_info *ci = ceph_inode(inode);
764 int issued;
765 int err;
766 int dirty;
767 int name_len = strlen(name);
768 int val_len = size;
769 char *newname = NULL;
770 char *newval = NULL;
771 struct ceph_inode_xattr *xattr = NULL;
772 int required_blob_size;
773
774 if (ceph_snap(inode) != CEPH_NOSNAP)
775 return -EROFS;
776
777 if (!ceph_is_valid_xattr(name))
778 return -EOPNOTSUPP;
779
780 vxattr = ceph_match_vxattr(inode, name);
781 if (vxattr && vxattr->readonly)
782 return -EOPNOTSUPP;
783
784 /* preallocate memory for xattr name, value, index node */
785 err = -ENOMEM;
786 newname = kmemdup(name, name_len + 1, GFP_NOFS);
787 if (!newname)
788 goto out;
789
790 if (val_len) {
791 newval = kmemdup(value, val_len, GFP_NOFS);
792 if (!newval)
793 goto out;
794 }
795
796 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
797 if (!xattr)
798 goto out;
799
800 spin_lock(&ci->i_ceph_lock);
801retry:
802 issued = __ceph_caps_issued(ci, NULL);
803 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
804 if (!(issued & CEPH_CAP_XATTR_EXCL))
805 goto do_sync;
806 __build_xattrs(inode);
807
808 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
809
810 if (!ci->i_xattrs.prealloc_blob ||
811 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
812 struct ceph_buffer *blob;
813
814 spin_unlock(&ci->i_ceph_lock);
815 dout(" preaallocating new blob size=%d\n", required_blob_size);
816 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
817 if (!blob)
818 goto out;
819 spin_lock(&ci->i_ceph_lock);
820 if (ci->i_xattrs.prealloc_blob)
821 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
822 ci->i_xattrs.prealloc_blob = blob;
823 goto retry;
824 }
825
826 err = __set_xattr(ci, newname, name_len, newval,
827 val_len, 1, 1, 1, &xattr);
828
829 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
830 ci->i_xattrs.dirty = true;
831 inode->i_ctime = CURRENT_TIME;
832
833 spin_unlock(&ci->i_ceph_lock);
834 if (dirty)
835 __mark_inode_dirty(inode, dirty);
836 return err;
837
838do_sync:
839 spin_unlock(&ci->i_ceph_lock);
840 err = ceph_sync_setxattr(dentry, name, value, size, flags);
841out:
842 kfree(newname);
843 kfree(newval);
844 kfree(xattr);
845 return err;
846}
847
848static int ceph_send_removexattr(struct dentry *dentry, const char *name)
849{
850 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
851 struct ceph_mds_client *mdsc = fsc->mdsc;
852 struct inode *inode = dentry->d_inode;
853 struct inode *parent_inode;
854 struct ceph_mds_request *req;
855 int err;
856
857 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
858 USE_AUTH_MDS);
859 if (IS_ERR(req))
860 return PTR_ERR(req);
861 req->r_inode = inode;
862 ihold(inode);
863 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
864 req->r_num_caps = 1;
865 req->r_path2 = kstrdup(name, GFP_NOFS);
866
867 parent_inode = ceph_get_dentry_parent_inode(dentry);
868 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
869 iput(parent_inode);
870 ceph_mdsc_put_request(req);
871 return err;
872}
873
874int ceph_removexattr(struct dentry *dentry, const char *name)
875{
876 struct inode *inode = dentry->d_inode;
877 struct ceph_vxattr *vxattr;
878 struct ceph_inode_info *ci = ceph_inode(inode);
879 int issued;
880 int err;
881 int required_blob_size;
882 int dirty;
883
884 if (ceph_snap(inode) != CEPH_NOSNAP)
885 return -EROFS;
886
887 if (!ceph_is_valid_xattr(name))
888 return -EOPNOTSUPP;
889
890 vxattr = ceph_match_vxattr(inode, name);
891 if (vxattr && vxattr->readonly)
892 return -EOPNOTSUPP;
893
894 err = -ENOMEM;
895 spin_lock(&ci->i_ceph_lock);
896retry:
897 issued = __ceph_caps_issued(ci, NULL);
898 dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
899
900 if (!(issued & CEPH_CAP_XATTR_EXCL))
901 goto do_sync;
902 __build_xattrs(inode);
903
904 required_blob_size = __get_required_blob_size(ci, 0, 0);
905
906 if (!ci->i_xattrs.prealloc_blob ||
907 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
908 struct ceph_buffer *blob;
909
910 spin_unlock(&ci->i_ceph_lock);
911 dout(" preaallocating new blob size=%d\n", required_blob_size);
912 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
913 if (!blob)
914 goto out;
915 spin_lock(&ci->i_ceph_lock);
916 if (ci->i_xattrs.prealloc_blob)
917 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
918 ci->i_xattrs.prealloc_blob = blob;
919 goto retry;
920 }
921
922 err = __remove_xattr_by_name(ceph_inode(inode), name);
923
924 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
925 ci->i_xattrs.dirty = true;
926 inode->i_ctime = CURRENT_TIME;
927 spin_unlock(&ci->i_ceph_lock);
928 if (dirty)
929 __mark_inode_dirty(inode, dirty);
930 return err;
931do_sync:
932 spin_unlock(&ci->i_ceph_lock);
933 err = ceph_send_removexattr(dentry, name);
934out:
935 return err;
936}
937