Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3#include <linux/ceph/pagelist.h>
4
5#include "super.h"
6#include "mds_client.h"
7
8#include <linux/ceph/decode.h>
9
10#include <linux/xattr.h>
11#include <linux/security.h>
12#include <linux/posix_acl_xattr.h>
13#include <linux/slab.h>
14
15#define XATTR_CEPH_PREFIX "ceph."
16#define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
17
18static int __remove_xattr(struct ceph_inode_info *ci,
19 struct ceph_inode_xattr *xattr);
20
21static bool ceph_is_valid_xattr(const char *name)
22{
23 return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) ||
24 !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
25 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
26 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
27}
28
29/*
30 * These define virtual xattrs exposing the recursive directory
31 * statistics and layout metadata.
32 */
33struct ceph_vxattr {
34 char *name;
35 size_t name_size; /* strlen(name) + 1 (for '\0') */
36 ssize_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
37 size_t size);
38 bool (*exists_cb)(struct ceph_inode_info *ci);
39 unsigned int flags;
40};
41
42#define VXATTR_FLAG_READONLY (1<<0)
43#define VXATTR_FLAG_HIDDEN (1<<1)
44#define VXATTR_FLAG_RSTAT (1<<2)
45#define VXATTR_FLAG_DIRSTAT (1<<3)
46
47/* layouts */
48
49static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
50{
51 struct ceph_file_layout *fl = &ci->i_layout;
52 return (fl->stripe_unit > 0 || fl->stripe_count > 0 ||
53 fl->object_size > 0 || fl->pool_id >= 0 ||
54 rcu_dereference_raw(fl->pool_ns) != NULL);
55}
56
57static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
58 size_t size)
59{
60 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
61 struct ceph_osd_client *osdc = &fsc->client->osdc;
62 struct ceph_string *pool_ns;
63 s64 pool = ci->i_layout.pool_id;
64 const char *pool_name;
65 const char *ns_field = " pool_namespace=";
66 char buf[128];
67 size_t len, total_len = 0;
68 ssize_t ret;
69
70 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
71
72 dout("ceph_vxattrcb_layout %p\n", &ci->netfs.inode);
73 down_read(&osdc->lock);
74 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
75 if (pool_name) {
76 len = snprintf(buf, sizeof(buf),
77 "stripe_unit=%u stripe_count=%u object_size=%u pool=",
78 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
79 ci->i_layout.object_size);
80 total_len = len + strlen(pool_name);
81 } else {
82 len = snprintf(buf, sizeof(buf),
83 "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld",
84 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
85 ci->i_layout.object_size, pool);
86 total_len = len;
87 }
88
89 if (pool_ns)
90 total_len += strlen(ns_field) + pool_ns->len;
91
92 ret = total_len;
93 if (size >= total_len) {
94 memcpy(val, buf, len);
95 ret = len;
96 if (pool_name) {
97 len = strlen(pool_name);
98 memcpy(val + ret, pool_name, len);
99 ret += len;
100 }
101 if (pool_ns) {
102 len = strlen(ns_field);
103 memcpy(val + ret, ns_field, len);
104 ret += len;
105 memcpy(val + ret, pool_ns->str, pool_ns->len);
106 ret += pool_ns->len;
107 }
108 }
109 up_read(&osdc->lock);
110 ceph_put_string(pool_ns);
111 return ret;
112}
113
114/*
115 * The convention with strings in xattrs is that they should not be NULL
116 * terminated, since we're returning the length with them. snprintf always
117 * NULL terminates however, so call it on a temporary buffer and then memcpy
118 * the result into place.
119 */
120static __printf(3, 4)
121int ceph_fmt_xattr(char *val, size_t size, const char *fmt, ...)
122{
123 int ret;
124 va_list args;
125 char buf[96]; /* NB: reevaluate size if new vxattrs are added */
126
127 va_start(args, fmt);
128 ret = vsnprintf(buf, size ? sizeof(buf) : 0, fmt, args);
129 va_end(args);
130
131 /* Sanity check */
132 if (size && ret + 1 > sizeof(buf)) {
133 WARN_ONCE(true, "Returned length too big (%d)", ret);
134 return -E2BIG;
135 }
136
137 if (ret <= size)
138 memcpy(val, buf, ret);
139 return ret;
140}
141
142static ssize_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
143 char *val, size_t size)
144{
145 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.stripe_unit);
146}
147
148static ssize_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
149 char *val, size_t size)
150{
151 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.stripe_count);
152}
153
154static ssize_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
155 char *val, size_t size)
156{
157 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.object_size);
158}
159
160static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
161 char *val, size_t size)
162{
163 ssize_t ret;
164 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
165 struct ceph_osd_client *osdc = &fsc->client->osdc;
166 s64 pool = ci->i_layout.pool_id;
167 const char *pool_name;
168
169 down_read(&osdc->lock);
170 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
171 if (pool_name) {
172 ret = strlen(pool_name);
173 if (ret <= size)
174 memcpy(val, pool_name, ret);
175 } else {
176 ret = ceph_fmt_xattr(val, size, "%lld", pool);
177 }
178 up_read(&osdc->lock);
179 return ret;
180}
181
182static ssize_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info *ci,
183 char *val, size_t size)
184{
185 ssize_t ret = 0;
186 struct ceph_string *ns = ceph_try_get_string(ci->i_layout.pool_ns);
187
188 if (ns) {
189 ret = ns->len;
190 if (ret <= size)
191 memcpy(val, ns->str, ret);
192 ceph_put_string(ns);
193 }
194 return ret;
195}
196
197/* directories */
198
199static ssize_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
200 size_t size)
201{
202 return ceph_fmt_xattr(val, size, "%lld", ci->i_files + ci->i_subdirs);
203}
204
205static ssize_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
206 size_t size)
207{
208 return ceph_fmt_xattr(val, size, "%lld", ci->i_files);
209}
210
211static ssize_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
212 size_t size)
213{
214 return ceph_fmt_xattr(val, size, "%lld", ci->i_subdirs);
215}
216
217static ssize_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
218 size_t size)
219{
220 return ceph_fmt_xattr(val, size, "%lld",
221 ci->i_rfiles + ci->i_rsubdirs);
222}
223
224static ssize_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
225 size_t size)
226{
227 return ceph_fmt_xattr(val, size, "%lld", ci->i_rfiles);
228}
229
230static ssize_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
231 size_t size)
232{
233 return ceph_fmt_xattr(val, size, "%lld", ci->i_rsubdirs);
234}
235
236static ssize_t ceph_vxattrcb_dir_rsnaps(struct ceph_inode_info *ci, char *val,
237 size_t size)
238{
239 return ceph_fmt_xattr(val, size, "%lld", ci->i_rsnaps);
240}
241
242static ssize_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
243 size_t size)
244{
245 return ceph_fmt_xattr(val, size, "%lld", ci->i_rbytes);
246}
247
248static ssize_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
249 size_t size)
250{
251 return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_rctime.tv_sec,
252 ci->i_rctime.tv_nsec);
253}
254
255/* dir pin */
256static bool ceph_vxattrcb_dir_pin_exists(struct ceph_inode_info *ci)
257{
258 return ci->i_dir_pin != -ENODATA;
259}
260
261static ssize_t ceph_vxattrcb_dir_pin(struct ceph_inode_info *ci, char *val,
262 size_t size)
263{
264 return ceph_fmt_xattr(val, size, "%d", (int)ci->i_dir_pin);
265}
266
267/* quotas */
268static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci)
269{
270 bool ret = false;
271 spin_lock(&ci->i_ceph_lock);
272 if ((ci->i_max_files || ci->i_max_bytes) &&
273 ci->i_vino.snap == CEPH_NOSNAP &&
274 ci->i_snap_realm &&
275 ci->i_snap_realm->ino == ci->i_vino.ino)
276 ret = true;
277 spin_unlock(&ci->i_ceph_lock);
278 return ret;
279}
280
281static ssize_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val,
282 size_t size)
283{
284 return ceph_fmt_xattr(val, size, "max_bytes=%llu max_files=%llu",
285 ci->i_max_bytes, ci->i_max_files);
286}
287
288static ssize_t ceph_vxattrcb_quota_max_bytes(struct ceph_inode_info *ci,
289 char *val, size_t size)
290{
291 return ceph_fmt_xattr(val, size, "%llu", ci->i_max_bytes);
292}
293
294static ssize_t ceph_vxattrcb_quota_max_files(struct ceph_inode_info *ci,
295 char *val, size_t size)
296{
297 return ceph_fmt_xattr(val, size, "%llu", ci->i_max_files);
298}
299
300/* snapshots */
301static bool ceph_vxattrcb_snap_btime_exists(struct ceph_inode_info *ci)
302{
303 return (ci->i_snap_btime.tv_sec != 0 || ci->i_snap_btime.tv_nsec != 0);
304}
305
306static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
307 size_t size)
308{
309 return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_snap_btime.tv_sec,
310 ci->i_snap_btime.tv_nsec);
311}
312
313static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
314 char *val, size_t size)
315{
316 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
317
318 return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid);
319}
320
321static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci,
322 char *val, size_t size)
323{
324 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
325
326 return ceph_fmt_xattr(val, size, "client%lld",
327 ceph_client_gid(fsc->client));
328}
329
330static ssize_t ceph_vxattrcb_caps(struct ceph_inode_info *ci, char *val,
331 size_t size)
332{
333 int issued;
334
335 spin_lock(&ci->i_ceph_lock);
336 issued = __ceph_caps_issued(ci, NULL);
337 spin_unlock(&ci->i_ceph_lock);
338
339 return ceph_fmt_xattr(val, size, "%s/0x%x",
340 ceph_cap_string(issued), issued);
341}
342
343static ssize_t ceph_vxattrcb_auth_mds(struct ceph_inode_info *ci,
344 char *val, size_t size)
345{
346 int ret;
347
348 spin_lock(&ci->i_ceph_lock);
349 ret = ceph_fmt_xattr(val, size, "%d",
350 ci->i_auth_cap ? ci->i_auth_cap->session->s_mds : -1);
351 spin_unlock(&ci->i_ceph_lock);
352 return ret;
353}
354
355#define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
356#define CEPH_XATTR_NAME2(_type, _name, _name2) \
357 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
358
359#define XATTR_NAME_CEPH(_type, _name, _flags) \
360 { \
361 .name = CEPH_XATTR_NAME(_type, _name), \
362 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
363 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
364 .exists_cb = NULL, \
365 .flags = (VXATTR_FLAG_READONLY | _flags), \
366 }
367#define XATTR_RSTAT_FIELD(_type, _name) \
368 XATTR_NAME_CEPH(_type, _name, VXATTR_FLAG_RSTAT)
369#define XATTR_RSTAT_FIELD_UPDATABLE(_type, _name) \
370 { \
371 .name = CEPH_XATTR_NAME(_type, _name), \
372 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
373 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
374 .exists_cb = NULL, \
375 .flags = VXATTR_FLAG_RSTAT, \
376 }
377#define XATTR_LAYOUT_FIELD(_type, _name, _field) \
378 { \
379 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
380 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
381 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
382 .exists_cb = ceph_vxattrcb_layout_exists, \
383 .flags = VXATTR_FLAG_HIDDEN, \
384 }
385#define XATTR_QUOTA_FIELD(_type, _name) \
386 { \
387 .name = CEPH_XATTR_NAME(_type, _name), \
388 .name_size = sizeof(CEPH_XATTR_NAME(_type, _name)), \
389 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
390 .exists_cb = ceph_vxattrcb_quota_exists, \
391 .flags = VXATTR_FLAG_HIDDEN, \
392 }
393
394static struct ceph_vxattr ceph_dir_vxattrs[] = {
395 {
396 .name = "ceph.dir.layout",
397 .name_size = sizeof("ceph.dir.layout"),
398 .getxattr_cb = ceph_vxattrcb_layout,
399 .exists_cb = ceph_vxattrcb_layout_exists,
400 .flags = VXATTR_FLAG_HIDDEN,
401 },
402 XATTR_LAYOUT_FIELD(dir, layout, stripe_unit),
403 XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
404 XATTR_LAYOUT_FIELD(dir, layout, object_size),
405 XATTR_LAYOUT_FIELD(dir, layout, pool),
406 XATTR_LAYOUT_FIELD(dir, layout, pool_namespace),
407 XATTR_NAME_CEPH(dir, entries, VXATTR_FLAG_DIRSTAT),
408 XATTR_NAME_CEPH(dir, files, VXATTR_FLAG_DIRSTAT),
409 XATTR_NAME_CEPH(dir, subdirs, VXATTR_FLAG_DIRSTAT),
410 XATTR_RSTAT_FIELD(dir, rentries),
411 XATTR_RSTAT_FIELD(dir, rfiles),
412 XATTR_RSTAT_FIELD(dir, rsubdirs),
413 XATTR_RSTAT_FIELD(dir, rsnaps),
414 XATTR_RSTAT_FIELD(dir, rbytes),
415 XATTR_RSTAT_FIELD_UPDATABLE(dir, rctime),
416 {
417 .name = "ceph.dir.pin",
418 .name_size = sizeof("ceph.dir.pin"),
419 .getxattr_cb = ceph_vxattrcb_dir_pin,
420 .exists_cb = ceph_vxattrcb_dir_pin_exists,
421 .flags = VXATTR_FLAG_HIDDEN,
422 },
423 {
424 .name = "ceph.quota",
425 .name_size = sizeof("ceph.quota"),
426 .getxattr_cb = ceph_vxattrcb_quota,
427 .exists_cb = ceph_vxattrcb_quota_exists,
428 .flags = VXATTR_FLAG_HIDDEN,
429 },
430 XATTR_QUOTA_FIELD(quota, max_bytes),
431 XATTR_QUOTA_FIELD(quota, max_files),
432 {
433 .name = "ceph.snap.btime",
434 .name_size = sizeof("ceph.snap.btime"),
435 .getxattr_cb = ceph_vxattrcb_snap_btime,
436 .exists_cb = ceph_vxattrcb_snap_btime_exists,
437 .flags = VXATTR_FLAG_READONLY,
438 },
439 {
440 .name = "ceph.caps",
441 .name_size = sizeof("ceph.caps"),
442 .getxattr_cb = ceph_vxattrcb_caps,
443 .exists_cb = NULL,
444 .flags = VXATTR_FLAG_HIDDEN,
445 },
446 { .name = NULL, 0 } /* Required table terminator */
447};
448
449/* files */
450
451static struct ceph_vxattr ceph_file_vxattrs[] = {
452 {
453 .name = "ceph.file.layout",
454 .name_size = sizeof("ceph.file.layout"),
455 .getxattr_cb = ceph_vxattrcb_layout,
456 .exists_cb = ceph_vxattrcb_layout_exists,
457 .flags = VXATTR_FLAG_HIDDEN,
458 },
459 XATTR_LAYOUT_FIELD(file, layout, stripe_unit),
460 XATTR_LAYOUT_FIELD(file, layout, stripe_count),
461 XATTR_LAYOUT_FIELD(file, layout, object_size),
462 XATTR_LAYOUT_FIELD(file, layout, pool),
463 XATTR_LAYOUT_FIELD(file, layout, pool_namespace),
464 {
465 .name = "ceph.snap.btime",
466 .name_size = sizeof("ceph.snap.btime"),
467 .getxattr_cb = ceph_vxattrcb_snap_btime,
468 .exists_cb = ceph_vxattrcb_snap_btime_exists,
469 .flags = VXATTR_FLAG_READONLY,
470 },
471 {
472 .name = "ceph.caps",
473 .name_size = sizeof("ceph.caps"),
474 .getxattr_cb = ceph_vxattrcb_caps,
475 .exists_cb = NULL,
476 .flags = VXATTR_FLAG_HIDDEN,
477 },
478 { .name = NULL, 0 } /* Required table terminator */
479};
480
481static struct ceph_vxattr ceph_common_vxattrs[] = {
482 {
483 .name = "ceph.cluster_fsid",
484 .name_size = sizeof("ceph.cluster_fsid"),
485 .getxattr_cb = ceph_vxattrcb_cluster_fsid,
486 .exists_cb = NULL,
487 .flags = VXATTR_FLAG_READONLY,
488 },
489 {
490 .name = "ceph.client_id",
491 .name_size = sizeof("ceph.client_id"),
492 .getxattr_cb = ceph_vxattrcb_client_id,
493 .exists_cb = NULL,
494 .flags = VXATTR_FLAG_READONLY,
495 },
496 {
497 .name = "ceph.auth_mds",
498 .name_size = sizeof("ceph.auth_mds"),
499 .getxattr_cb = ceph_vxattrcb_auth_mds,
500 .exists_cb = NULL,
501 .flags = VXATTR_FLAG_READONLY,
502 },
503 { .name = NULL, 0 } /* Required table terminator */
504};
505
506static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
507{
508 if (S_ISDIR(inode->i_mode))
509 return ceph_dir_vxattrs;
510 else if (S_ISREG(inode->i_mode))
511 return ceph_file_vxattrs;
512 return NULL;
513}
514
515static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
516 const char *name)
517{
518 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
519
520 if (vxattr) {
521 while (vxattr->name) {
522 if (!strcmp(vxattr->name, name))
523 return vxattr;
524 vxattr++;
525 }
526 }
527
528 vxattr = ceph_common_vxattrs;
529 while (vxattr->name) {
530 if (!strcmp(vxattr->name, name))
531 return vxattr;
532 vxattr++;
533 }
534
535 return NULL;
536}
537
538static int __set_xattr(struct ceph_inode_info *ci,
539 const char *name, int name_len,
540 const char *val, int val_len,
541 int flags, int update_xattr,
542 struct ceph_inode_xattr **newxattr)
543{
544 struct rb_node **p;
545 struct rb_node *parent = NULL;
546 struct ceph_inode_xattr *xattr = NULL;
547 int c;
548 int new = 0;
549
550 p = &ci->i_xattrs.index.rb_node;
551 while (*p) {
552 parent = *p;
553 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
554 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
555 if (c < 0)
556 p = &(*p)->rb_left;
557 else if (c > 0)
558 p = &(*p)->rb_right;
559 else {
560 if (name_len == xattr->name_len)
561 break;
562 else if (name_len < xattr->name_len)
563 p = &(*p)->rb_left;
564 else
565 p = &(*p)->rb_right;
566 }
567 xattr = NULL;
568 }
569
570 if (update_xattr) {
571 int err = 0;
572
573 if (xattr && (flags & XATTR_CREATE))
574 err = -EEXIST;
575 else if (!xattr && (flags & XATTR_REPLACE))
576 err = -ENODATA;
577 if (err) {
578 kfree(name);
579 kfree(val);
580 kfree(*newxattr);
581 return err;
582 }
583 if (update_xattr < 0) {
584 if (xattr)
585 __remove_xattr(ci, xattr);
586 kfree(name);
587 kfree(*newxattr);
588 return 0;
589 }
590 }
591
592 if (!xattr) {
593 new = 1;
594 xattr = *newxattr;
595 xattr->name = name;
596 xattr->name_len = name_len;
597 xattr->should_free_name = update_xattr;
598
599 ci->i_xattrs.count++;
600 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
601 } else {
602 kfree(*newxattr);
603 *newxattr = NULL;
604 if (xattr->should_free_val)
605 kfree(xattr->val);
606
607 if (update_xattr) {
608 kfree(name);
609 name = xattr->name;
610 }
611 ci->i_xattrs.names_size -= xattr->name_len;
612 ci->i_xattrs.vals_size -= xattr->val_len;
613 }
614 ci->i_xattrs.names_size += name_len;
615 ci->i_xattrs.vals_size += val_len;
616 if (val)
617 xattr->val = val;
618 else
619 xattr->val = "";
620
621 xattr->val_len = val_len;
622 xattr->dirty = update_xattr;
623 xattr->should_free_val = (val && update_xattr);
624
625 if (new) {
626 rb_link_node(&xattr->node, parent, p);
627 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
628 dout("__set_xattr_val p=%p\n", p);
629 }
630
631 dout("__set_xattr_val added %llx.%llx xattr %p %.*s=%.*s\n",
632 ceph_vinop(&ci->netfs.inode), xattr, name_len, name, val_len, val);
633
634 return 0;
635}
636
637static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
638 const char *name)
639{
640 struct rb_node **p;
641 struct rb_node *parent = NULL;
642 struct ceph_inode_xattr *xattr = NULL;
643 int name_len = strlen(name);
644 int c;
645
646 p = &ci->i_xattrs.index.rb_node;
647 while (*p) {
648 parent = *p;
649 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
650 c = strncmp(name, xattr->name, xattr->name_len);
651 if (c == 0 && name_len > xattr->name_len)
652 c = 1;
653 if (c < 0)
654 p = &(*p)->rb_left;
655 else if (c > 0)
656 p = &(*p)->rb_right;
657 else {
658 dout("__get_xattr %s: found %.*s\n", name,
659 xattr->val_len, xattr->val);
660 return xattr;
661 }
662 }
663
664 dout("__get_xattr %s: not found\n", name);
665
666 return NULL;
667}
668
669static void __free_xattr(struct ceph_inode_xattr *xattr)
670{
671 BUG_ON(!xattr);
672
673 if (xattr->should_free_name)
674 kfree(xattr->name);
675 if (xattr->should_free_val)
676 kfree(xattr->val);
677
678 kfree(xattr);
679}
680
681static int __remove_xattr(struct ceph_inode_info *ci,
682 struct ceph_inode_xattr *xattr)
683{
684 if (!xattr)
685 return -ENODATA;
686
687 rb_erase(&xattr->node, &ci->i_xattrs.index);
688
689 if (xattr->should_free_name)
690 kfree(xattr->name);
691 if (xattr->should_free_val)
692 kfree(xattr->val);
693
694 ci->i_xattrs.names_size -= xattr->name_len;
695 ci->i_xattrs.vals_size -= xattr->val_len;
696 ci->i_xattrs.count--;
697 kfree(xattr);
698
699 return 0;
700}
701
702static char *__copy_xattr_names(struct ceph_inode_info *ci,
703 char *dest)
704{
705 struct rb_node *p;
706 struct ceph_inode_xattr *xattr = NULL;
707
708 p = rb_first(&ci->i_xattrs.index);
709 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
710
711 while (p) {
712 xattr = rb_entry(p, struct ceph_inode_xattr, node);
713 memcpy(dest, xattr->name, xattr->name_len);
714 dest[xattr->name_len] = '\0';
715
716 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
717 xattr->name_len, ci->i_xattrs.names_size);
718
719 dest += xattr->name_len + 1;
720 p = rb_next(p);
721 }
722
723 return dest;
724}
725
726void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
727{
728 struct rb_node *p, *tmp;
729 struct ceph_inode_xattr *xattr = NULL;
730
731 p = rb_first(&ci->i_xattrs.index);
732
733 dout("__ceph_destroy_xattrs p=%p\n", p);
734
735 while (p) {
736 xattr = rb_entry(p, struct ceph_inode_xattr, node);
737 tmp = p;
738 p = rb_next(tmp);
739 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
740 xattr->name_len, xattr->name);
741 rb_erase(tmp, &ci->i_xattrs.index);
742
743 __free_xattr(xattr);
744 }
745
746 ci->i_xattrs.names_size = 0;
747 ci->i_xattrs.vals_size = 0;
748 ci->i_xattrs.index_version = 0;
749 ci->i_xattrs.count = 0;
750 ci->i_xattrs.index = RB_ROOT;
751}
752
753static int __build_xattrs(struct inode *inode)
754 __releases(ci->i_ceph_lock)
755 __acquires(ci->i_ceph_lock)
756{
757 u32 namelen;
758 u32 numattr = 0;
759 void *p, *end;
760 u32 len;
761 const char *name, *val;
762 struct ceph_inode_info *ci = ceph_inode(inode);
763 u64 xattr_version;
764 struct ceph_inode_xattr **xattrs = NULL;
765 int err = 0;
766 int i;
767
768 dout("__build_xattrs() len=%d\n",
769 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
770
771 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
772 return 0; /* already built */
773
774 __ceph_destroy_xattrs(ci);
775
776start:
777 /* updated internal xattr rb tree */
778 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
779 p = ci->i_xattrs.blob->vec.iov_base;
780 end = p + ci->i_xattrs.blob->vec.iov_len;
781 ceph_decode_32_safe(&p, end, numattr, bad);
782 xattr_version = ci->i_xattrs.version;
783 spin_unlock(&ci->i_ceph_lock);
784
785 xattrs = kcalloc(numattr, sizeof(struct ceph_inode_xattr *),
786 GFP_NOFS);
787 err = -ENOMEM;
788 if (!xattrs)
789 goto bad_lock;
790
791 for (i = 0; i < numattr; i++) {
792 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
793 GFP_NOFS);
794 if (!xattrs[i])
795 goto bad_lock;
796 }
797
798 spin_lock(&ci->i_ceph_lock);
799 if (ci->i_xattrs.version != xattr_version) {
800 /* lost a race, retry */
801 for (i = 0; i < numattr; i++)
802 kfree(xattrs[i]);
803 kfree(xattrs);
804 xattrs = NULL;
805 goto start;
806 }
807 err = -EIO;
808 while (numattr--) {
809 ceph_decode_32_safe(&p, end, len, bad);
810 namelen = len;
811 name = p;
812 p += len;
813 ceph_decode_32_safe(&p, end, len, bad);
814 val = p;
815 p += len;
816
817 err = __set_xattr(ci, name, namelen, val, len,
818 0, 0, &xattrs[numattr]);
819
820 if (err < 0)
821 goto bad;
822 }
823 kfree(xattrs);
824 }
825 ci->i_xattrs.index_version = ci->i_xattrs.version;
826 ci->i_xattrs.dirty = false;
827
828 return err;
829bad_lock:
830 spin_lock(&ci->i_ceph_lock);
831bad:
832 if (xattrs) {
833 for (i = 0; i < numattr; i++)
834 kfree(xattrs[i]);
835 kfree(xattrs);
836 }
837 ci->i_xattrs.names_size = 0;
838 return err;
839}
840
841static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
842 int val_size)
843{
844 /*
845 * 4 bytes for the length, and additional 4 bytes per each xattr name,
846 * 4 bytes per each value
847 */
848 int size = 4 + ci->i_xattrs.count*(4 + 4) +
849 ci->i_xattrs.names_size +
850 ci->i_xattrs.vals_size;
851 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
852 ci->i_xattrs.count, ci->i_xattrs.names_size,
853 ci->i_xattrs.vals_size);
854
855 if (name_size)
856 size += 4 + 4 + name_size + val_size;
857
858 return size;
859}
860
861/*
862 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
863 * and swap into place. It returns the old i_xattrs.blob (or NULL) so
864 * that it can be freed by the caller as the i_ceph_lock is likely to be
865 * held.
866 */
867struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
868{
869 struct rb_node *p;
870 struct ceph_inode_xattr *xattr = NULL;
871 struct ceph_buffer *old_blob = NULL;
872 void *dest;
873
874 dout("__build_xattrs_blob %p\n", &ci->netfs.inode);
875 if (ci->i_xattrs.dirty) {
876 int need = __get_required_blob_size(ci, 0, 0);
877
878 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
879
880 p = rb_first(&ci->i_xattrs.index);
881 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
882
883 ceph_encode_32(&dest, ci->i_xattrs.count);
884 while (p) {
885 xattr = rb_entry(p, struct ceph_inode_xattr, node);
886
887 ceph_encode_32(&dest, xattr->name_len);
888 memcpy(dest, xattr->name, xattr->name_len);
889 dest += xattr->name_len;
890 ceph_encode_32(&dest, xattr->val_len);
891 memcpy(dest, xattr->val, xattr->val_len);
892 dest += xattr->val_len;
893
894 p = rb_next(p);
895 }
896
897 /* adjust buffer len; it may be larger than we need */
898 ci->i_xattrs.prealloc_blob->vec.iov_len =
899 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
900
901 if (ci->i_xattrs.blob)
902 old_blob = ci->i_xattrs.blob;
903 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
904 ci->i_xattrs.prealloc_blob = NULL;
905 ci->i_xattrs.dirty = false;
906 ci->i_xattrs.version++;
907 }
908
909 return old_blob;
910}
911
912static inline int __get_request_mask(struct inode *in) {
913 struct ceph_mds_request *req = current->journal_info;
914 int mask = 0;
915 if (req && req->r_target_inode == in) {
916 if (req->r_op == CEPH_MDS_OP_LOOKUP ||
917 req->r_op == CEPH_MDS_OP_LOOKUPINO ||
918 req->r_op == CEPH_MDS_OP_LOOKUPPARENT ||
919 req->r_op == CEPH_MDS_OP_GETATTR) {
920 mask = le32_to_cpu(req->r_args.getattr.mask);
921 } else if (req->r_op == CEPH_MDS_OP_OPEN ||
922 req->r_op == CEPH_MDS_OP_CREATE) {
923 mask = le32_to_cpu(req->r_args.open.mask);
924 }
925 }
926 return mask;
927}
928
929ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
930 size_t size)
931{
932 struct ceph_inode_info *ci = ceph_inode(inode);
933 struct ceph_inode_xattr *xattr;
934 struct ceph_vxattr *vxattr;
935 int req_mask;
936 ssize_t err;
937
938 if (strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
939 goto handle_non_vxattrs;
940
941 /* let's see if a virtual xattr was requested */
942 vxattr = ceph_match_vxattr(inode, name);
943 if (vxattr) {
944 int mask = 0;
945 if (vxattr->flags & VXATTR_FLAG_RSTAT)
946 mask |= CEPH_STAT_RSTAT;
947 if (vxattr->flags & VXATTR_FLAG_DIRSTAT)
948 mask |= CEPH_CAP_FILE_SHARED;
949 err = ceph_do_getattr(inode, mask, true);
950 if (err)
951 return err;
952 err = -ENODATA;
953 if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
954 err = vxattr->getxattr_cb(ci, value, size);
955 if (size && size < err)
956 err = -ERANGE;
957 }
958 return err;
959 } else {
960 err = ceph_do_getvxattr(inode, name, value, size);
961 /* this would happen with a new client and old server combo */
962 if (err == -EOPNOTSUPP)
963 err = -ENODATA;
964 return err;
965 }
966handle_non_vxattrs:
967 req_mask = __get_request_mask(inode);
968
969 spin_lock(&ci->i_ceph_lock);
970 dout("getxattr %p name '%s' ver=%lld index_ver=%lld\n", inode, name,
971 ci->i_xattrs.version, ci->i_xattrs.index_version);
972
973 if (ci->i_xattrs.version == 0 ||
974 !((req_mask & CEPH_CAP_XATTR_SHARED) ||
975 __ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1))) {
976 spin_unlock(&ci->i_ceph_lock);
977
978 /* security module gets xattr while filling trace */
979 if (current->journal_info) {
980 pr_warn_ratelimited("sync getxattr %p "
981 "during filling trace\n", inode);
982 return -EBUSY;
983 }
984
985 /* get xattrs from mds (if we don't already have them) */
986 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
987 if (err)
988 return err;
989 spin_lock(&ci->i_ceph_lock);
990 }
991
992 err = __build_xattrs(inode);
993 if (err < 0)
994 goto out;
995
996 err = -ENODATA; /* == ENOATTR */
997 xattr = __get_xattr(ci, name);
998 if (!xattr)
999 goto out;
1000
1001 err = -ERANGE;
1002 if (size && size < xattr->val_len)
1003 goto out;
1004
1005 err = xattr->val_len;
1006 if (size == 0)
1007 goto out;
1008
1009 memcpy(value, xattr->val, xattr->val_len);
1010
1011 if (current->journal_info &&
1012 !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
1013 security_ismaclabel(name + XATTR_SECURITY_PREFIX_LEN))
1014 ci->i_ceph_flags |= CEPH_I_SEC_INITED;
1015out:
1016 spin_unlock(&ci->i_ceph_lock);
1017 return err;
1018}
1019
1020ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
1021{
1022 struct inode *inode = d_inode(dentry);
1023 struct ceph_inode_info *ci = ceph_inode(inode);
1024 bool len_only = (size == 0);
1025 u32 namelen;
1026 int err;
1027
1028 spin_lock(&ci->i_ceph_lock);
1029 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
1030 ci->i_xattrs.version, ci->i_xattrs.index_version);
1031
1032 if (ci->i_xattrs.version == 0 ||
1033 !__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1)) {
1034 spin_unlock(&ci->i_ceph_lock);
1035 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
1036 if (err)
1037 return err;
1038 spin_lock(&ci->i_ceph_lock);
1039 }
1040
1041 err = __build_xattrs(inode);
1042 if (err < 0)
1043 goto out;
1044
1045 /* add 1 byte for each xattr due to the null termination */
1046 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
1047 if (!len_only) {
1048 if (namelen > size) {
1049 err = -ERANGE;
1050 goto out;
1051 }
1052 names = __copy_xattr_names(ci, names);
1053 size -= namelen;
1054 }
1055 err = namelen;
1056out:
1057 spin_unlock(&ci->i_ceph_lock);
1058 return err;
1059}
1060
1061static int ceph_sync_setxattr(struct inode *inode, const char *name,
1062 const char *value, size_t size, int flags)
1063{
1064 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1065 struct ceph_inode_info *ci = ceph_inode(inode);
1066 struct ceph_mds_request *req;
1067 struct ceph_mds_client *mdsc = fsc->mdsc;
1068 struct ceph_osd_client *osdc = &fsc->client->osdc;
1069 struct ceph_pagelist *pagelist = NULL;
1070 int op = CEPH_MDS_OP_SETXATTR;
1071 int err;
1072
1073 if (size > 0) {
1074 /* copy value into pagelist */
1075 pagelist = ceph_pagelist_alloc(GFP_NOFS);
1076 if (!pagelist)
1077 return -ENOMEM;
1078
1079 err = ceph_pagelist_append(pagelist, value, size);
1080 if (err)
1081 goto out;
1082 } else if (!value) {
1083 if (flags & CEPH_XATTR_REPLACE)
1084 op = CEPH_MDS_OP_RMXATTR;
1085 else
1086 flags |= CEPH_XATTR_REMOVE;
1087 }
1088
1089 dout("setxattr value size: %zu\n", size);
1090
1091 /* do request */
1092 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1093 if (IS_ERR(req)) {
1094 err = PTR_ERR(req);
1095 goto out;
1096 }
1097
1098 req->r_path2 = kstrdup(name, GFP_NOFS);
1099 if (!req->r_path2) {
1100 ceph_mdsc_put_request(req);
1101 err = -ENOMEM;
1102 goto out;
1103 }
1104
1105 if (op == CEPH_MDS_OP_SETXATTR) {
1106 req->r_args.setxattr.flags = cpu_to_le32(flags);
1107 req->r_args.setxattr.osdmap_epoch =
1108 cpu_to_le32(osdc->osdmap->epoch);
1109 req->r_pagelist = pagelist;
1110 pagelist = NULL;
1111 }
1112
1113 req->r_inode = inode;
1114 ihold(inode);
1115 req->r_num_caps = 1;
1116 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
1117
1118 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
1119 err = ceph_mdsc_do_request(mdsc, NULL, req);
1120 ceph_mdsc_put_request(req);
1121 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
1122
1123out:
1124 if (pagelist)
1125 ceph_pagelist_release(pagelist);
1126 return err;
1127}
1128
1129int __ceph_setxattr(struct inode *inode, const char *name,
1130 const void *value, size_t size, int flags)
1131{
1132 struct ceph_vxattr *vxattr;
1133 struct ceph_inode_info *ci = ceph_inode(inode);
1134 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1135 struct ceph_cap_flush *prealloc_cf = NULL;
1136 struct ceph_buffer *old_blob = NULL;
1137 int issued;
1138 int err;
1139 int dirty = 0;
1140 int name_len = strlen(name);
1141 int val_len = size;
1142 char *newname = NULL;
1143 char *newval = NULL;
1144 struct ceph_inode_xattr *xattr = NULL;
1145 int required_blob_size;
1146 bool check_realm = false;
1147 bool lock_snap_rwsem = false;
1148
1149 if (ceph_snap(inode) != CEPH_NOSNAP)
1150 return -EROFS;
1151
1152 vxattr = ceph_match_vxattr(inode, name);
1153 if (vxattr) {
1154 if (vxattr->flags & VXATTR_FLAG_READONLY)
1155 return -EOPNOTSUPP;
1156 if (value && !strncmp(vxattr->name, "ceph.quota", 10))
1157 check_realm = true;
1158 }
1159
1160 /* pass any unhandled ceph.* xattrs through to the MDS */
1161 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
1162 goto do_sync_unlocked;
1163
1164 /* preallocate memory for xattr name, value, index node */
1165 err = -ENOMEM;
1166 newname = kmemdup(name, name_len + 1, GFP_NOFS);
1167 if (!newname)
1168 goto out;
1169
1170 if (val_len) {
1171 newval = kmemdup(value, val_len, GFP_NOFS);
1172 if (!newval)
1173 goto out;
1174 }
1175
1176 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
1177 if (!xattr)
1178 goto out;
1179
1180 prealloc_cf = ceph_alloc_cap_flush();
1181 if (!prealloc_cf)
1182 goto out;
1183
1184 spin_lock(&ci->i_ceph_lock);
1185retry:
1186 issued = __ceph_caps_issued(ci, NULL);
1187 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
1188 if ((ci->i_xattrs.version == 0) || !(issued & CEPH_CAP_XATTR_EXCL) ||
1189 (required_blob_size > mdsc->mdsmap->m_max_xattr_size)) {
1190 dout("%s do sync setxattr: version: %llu size: %d max: %llu\n",
1191 __func__, ci->i_xattrs.version, required_blob_size,
1192 mdsc->mdsmap->m_max_xattr_size);
1193 goto do_sync;
1194 }
1195
1196 if (!lock_snap_rwsem && !ci->i_head_snapc) {
1197 lock_snap_rwsem = true;
1198 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1199 spin_unlock(&ci->i_ceph_lock);
1200 down_read(&mdsc->snap_rwsem);
1201 spin_lock(&ci->i_ceph_lock);
1202 goto retry;
1203 }
1204 }
1205
1206 dout("setxattr %p name '%s' issued %s\n", inode, name,
1207 ceph_cap_string(issued));
1208 __build_xattrs(inode);
1209
1210 if (!ci->i_xattrs.prealloc_blob ||
1211 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
1212 struct ceph_buffer *blob;
1213
1214 spin_unlock(&ci->i_ceph_lock);
1215 ceph_buffer_put(old_blob); /* Shouldn't be required */
1216 dout(" pre-allocating new blob size=%d\n", required_blob_size);
1217 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1218 if (!blob)
1219 goto do_sync_unlocked;
1220 spin_lock(&ci->i_ceph_lock);
1221 /* prealloc_blob can't be released while holding i_ceph_lock */
1222 if (ci->i_xattrs.prealloc_blob)
1223 old_blob = ci->i_xattrs.prealloc_blob;
1224 ci->i_xattrs.prealloc_blob = blob;
1225 goto retry;
1226 }
1227
1228 err = __set_xattr(ci, newname, name_len, newval, val_len,
1229 flags, value ? 1 : -1, &xattr);
1230
1231 if (!err) {
1232 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
1233 &prealloc_cf);
1234 ci->i_xattrs.dirty = true;
1235 inode->i_ctime = current_time(inode);
1236 }
1237
1238 spin_unlock(&ci->i_ceph_lock);
1239 ceph_buffer_put(old_blob);
1240 if (lock_snap_rwsem)
1241 up_read(&mdsc->snap_rwsem);
1242 if (dirty)
1243 __mark_inode_dirty(inode, dirty);
1244 ceph_free_cap_flush(prealloc_cf);
1245 return err;
1246
1247do_sync:
1248 spin_unlock(&ci->i_ceph_lock);
1249do_sync_unlocked:
1250 if (lock_snap_rwsem)
1251 up_read(&mdsc->snap_rwsem);
1252
1253 /* security module set xattr while filling trace */
1254 if (current->journal_info) {
1255 pr_warn_ratelimited("sync setxattr %p "
1256 "during filling trace\n", inode);
1257 err = -EBUSY;
1258 } else {
1259 err = ceph_sync_setxattr(inode, name, value, size, flags);
1260 if (err >= 0 && check_realm) {
1261 /* check if snaprealm was created for quota inode */
1262 spin_lock(&ci->i_ceph_lock);
1263 if ((ci->i_max_files || ci->i_max_bytes) &&
1264 !(ci->i_snap_realm &&
1265 ci->i_snap_realm->ino == ci->i_vino.ino))
1266 err = -EOPNOTSUPP;
1267 spin_unlock(&ci->i_ceph_lock);
1268 }
1269 }
1270out:
1271 ceph_free_cap_flush(prealloc_cf);
1272 kfree(newname);
1273 kfree(newval);
1274 kfree(xattr);
1275 return err;
1276}
1277
1278static int ceph_get_xattr_handler(const struct xattr_handler *handler,
1279 struct dentry *dentry, struct inode *inode,
1280 const char *name, void *value, size_t size)
1281{
1282 if (!ceph_is_valid_xattr(name))
1283 return -EOPNOTSUPP;
1284 return __ceph_getxattr(inode, name, value, size);
1285}
1286
1287static int ceph_set_xattr_handler(const struct xattr_handler *handler,
1288 struct user_namespace *mnt_userns,
1289 struct dentry *unused, struct inode *inode,
1290 const char *name, const void *value,
1291 size_t size, int flags)
1292{
1293 if (!ceph_is_valid_xattr(name))
1294 return -EOPNOTSUPP;
1295 return __ceph_setxattr(inode, name, value, size, flags);
1296}
1297
1298static const struct xattr_handler ceph_other_xattr_handler = {
1299 .prefix = "", /* match any name => handlers called with full name */
1300 .get = ceph_get_xattr_handler,
1301 .set = ceph_set_xattr_handler,
1302};
1303
1304#ifdef CONFIG_SECURITY
1305bool ceph_security_xattr_wanted(struct inode *in)
1306{
1307 return in->i_security != NULL;
1308}
1309
1310bool ceph_security_xattr_deadlock(struct inode *in)
1311{
1312 struct ceph_inode_info *ci;
1313 bool ret;
1314 if (!in->i_security)
1315 return false;
1316 ci = ceph_inode(in);
1317 spin_lock(&ci->i_ceph_lock);
1318 ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) &&
1319 !(ci->i_xattrs.version > 0 &&
1320 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0));
1321 spin_unlock(&ci->i_ceph_lock);
1322 return ret;
1323}
1324
1325#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
1326int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
1327 struct ceph_acl_sec_ctx *as_ctx)
1328{
1329 struct ceph_pagelist *pagelist = as_ctx->pagelist;
1330 const char *name;
1331 size_t name_len;
1332 int err;
1333
1334 err = security_dentry_init_security(dentry, mode, &dentry->d_name,
1335 &name, &as_ctx->sec_ctx,
1336 &as_ctx->sec_ctxlen);
1337 if (err < 0) {
1338 WARN_ON_ONCE(err != -EOPNOTSUPP);
1339 err = 0; /* do nothing */
1340 goto out;
1341 }
1342
1343 err = -ENOMEM;
1344 if (!pagelist) {
1345 pagelist = ceph_pagelist_alloc(GFP_KERNEL);
1346 if (!pagelist)
1347 goto out;
1348 err = ceph_pagelist_reserve(pagelist, PAGE_SIZE);
1349 if (err)
1350 goto out;
1351 ceph_pagelist_encode_32(pagelist, 1);
1352 }
1353
1354 /*
1355 * FIXME: Make security_dentry_init_security() generic. Currently
1356 * It only supports single security module and only selinux has
1357 * dentry_init_security hook.
1358 */
1359 name_len = strlen(name);
1360 err = ceph_pagelist_reserve(pagelist,
1361 4 * 2 + name_len + as_ctx->sec_ctxlen);
1362 if (err)
1363 goto out;
1364
1365 if (as_ctx->pagelist) {
1366 /* update count of KV pairs */
1367 BUG_ON(pagelist->length <= sizeof(__le32));
1368 if (list_is_singular(&pagelist->head)) {
1369 le32_add_cpu((__le32*)pagelist->mapped_tail, 1);
1370 } else {
1371 struct page *page = list_first_entry(&pagelist->head,
1372 struct page, lru);
1373 void *addr = kmap_atomic(page);
1374 le32_add_cpu((__le32*)addr, 1);
1375 kunmap_atomic(addr);
1376 }
1377 } else {
1378 as_ctx->pagelist = pagelist;
1379 }
1380
1381 ceph_pagelist_encode_32(pagelist, name_len);
1382 ceph_pagelist_append(pagelist, name, name_len);
1383
1384 ceph_pagelist_encode_32(pagelist, as_ctx->sec_ctxlen);
1385 ceph_pagelist_append(pagelist, as_ctx->sec_ctx, as_ctx->sec_ctxlen);
1386
1387 err = 0;
1388out:
1389 if (pagelist && !as_ctx->pagelist)
1390 ceph_pagelist_release(pagelist);
1391 return err;
1392}
1393#endif /* CONFIG_CEPH_FS_SECURITY_LABEL */
1394#endif /* CONFIG_SECURITY */
1395
1396void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx)
1397{
1398#ifdef CONFIG_CEPH_FS_POSIX_ACL
1399 posix_acl_release(as_ctx->acl);
1400 posix_acl_release(as_ctx->default_acl);
1401#endif
1402#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
1403 security_release_secctx(as_ctx->sec_ctx, as_ctx->sec_ctxlen);
1404#endif
1405 if (as_ctx->pagelist)
1406 ceph_pagelist_release(as_ctx->pagelist);
1407}
1408
1409/*
1410 * List of handlers for synthetic system.* attributes. Other
1411 * attributes are handled directly.
1412 */
1413const struct xattr_handler *ceph_xattr_handlers[] = {
1414#ifdef CONFIG_CEPH_FS_POSIX_ACL
1415 &posix_acl_access_xattr_handler,
1416 &posix_acl_default_xattr_handler,
1417#endif
1418 &ceph_other_xattr_handler,
1419 NULL,
1420};
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3#include <linux/ceph/pagelist.h>
4
5#include "super.h"
6#include "mds_client.h"
7
8#include <linux/ceph/decode.h>
9
10#include <linux/xattr.h>
11#include <linux/posix_acl_xattr.h>
12#include <linux/slab.h>
13
14#define XATTR_CEPH_PREFIX "ceph."
15#define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
16
17static int __remove_xattr(struct ceph_inode_info *ci,
18 struct ceph_inode_xattr *xattr);
19
20static const struct xattr_handler ceph_other_xattr_handler;
21
22/*
23 * List of handlers for synthetic system.* attributes. Other
24 * attributes are handled directly.
25 */
26const struct xattr_handler *ceph_xattr_handlers[] = {
27#ifdef CONFIG_CEPH_FS_POSIX_ACL
28 &posix_acl_access_xattr_handler,
29 &posix_acl_default_xattr_handler,
30#endif
31 &ceph_other_xattr_handler,
32 NULL,
33};
34
35static bool ceph_is_valid_xattr(const char *name)
36{
37 return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
38 !strncmp(name, XATTR_SECURITY_PREFIX,
39 XATTR_SECURITY_PREFIX_LEN) ||
40 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
41 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
42}
43
44/*
45 * These define virtual xattrs exposing the recursive directory
46 * statistics and layout metadata.
47 */
48struct ceph_vxattr {
49 char *name;
50 size_t name_size; /* strlen(name) + 1 (for '\0') */
51 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
52 size_t size);
53 bool readonly, hidden;
54 bool (*exists_cb)(struct ceph_inode_info *ci);
55};
56
57/* layouts */
58
59static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
60{
61 struct ceph_file_layout *fl = &ci->i_layout;
62 return (fl->stripe_unit > 0 || fl->stripe_count > 0 ||
63 fl->object_size > 0 || fl->pool_id >= 0 ||
64 rcu_dereference_raw(fl->pool_ns) != NULL);
65}
66
67static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
68 size_t size)
69{
70 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
71 struct ceph_osd_client *osdc = &fsc->client->osdc;
72 struct ceph_string *pool_ns;
73 s64 pool = ci->i_layout.pool_id;
74 const char *pool_name;
75 const char *ns_field = " pool_namespace=";
76 char buf[128];
77 size_t len, total_len = 0;
78 int ret;
79
80 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
81
82 dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
83 down_read(&osdc->lock);
84 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
85 if (pool_name) {
86 len = snprintf(buf, sizeof(buf),
87 "stripe_unit=%u stripe_count=%u object_size=%u pool=",
88 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
89 ci->i_layout.object_size);
90 total_len = len + strlen(pool_name);
91 } else {
92 len = snprintf(buf, sizeof(buf),
93 "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld",
94 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
95 ci->i_layout.object_size, (unsigned long long)pool);
96 total_len = len;
97 }
98
99 if (pool_ns)
100 total_len += strlen(ns_field) + pool_ns->len;
101
102 if (!size) {
103 ret = total_len;
104 } else if (total_len > size) {
105 ret = -ERANGE;
106 } else {
107 memcpy(val, buf, len);
108 ret = len;
109 if (pool_name) {
110 len = strlen(pool_name);
111 memcpy(val + ret, pool_name, len);
112 ret += len;
113 }
114 if (pool_ns) {
115 len = strlen(ns_field);
116 memcpy(val + ret, ns_field, len);
117 ret += len;
118 memcpy(val + ret, pool_ns->str, pool_ns->len);
119 ret += pool_ns->len;
120 }
121 }
122 up_read(&osdc->lock);
123 ceph_put_string(pool_ns);
124 return ret;
125}
126
127static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
128 char *val, size_t size)
129{
130 return snprintf(val, size, "%u", ci->i_layout.stripe_unit);
131}
132
133static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
134 char *val, size_t size)
135{
136 return snprintf(val, size, "%u", ci->i_layout.stripe_count);
137}
138
139static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
140 char *val, size_t size)
141{
142 return snprintf(val, size, "%u", ci->i_layout.object_size);
143}
144
145static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
146 char *val, size_t size)
147{
148 int ret;
149 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
150 struct ceph_osd_client *osdc = &fsc->client->osdc;
151 s64 pool = ci->i_layout.pool_id;
152 const char *pool_name;
153
154 down_read(&osdc->lock);
155 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
156 if (pool_name)
157 ret = snprintf(val, size, "%s", pool_name);
158 else
159 ret = snprintf(val, size, "%lld", (unsigned long long)pool);
160 up_read(&osdc->lock);
161 return ret;
162}
163
164static size_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info *ci,
165 char *val, size_t size)
166{
167 int ret = 0;
168 struct ceph_string *ns = ceph_try_get_string(ci->i_layout.pool_ns);
169 if (ns) {
170 ret = snprintf(val, size, "%.*s", (int)ns->len, ns->str);
171 ceph_put_string(ns);
172 }
173 return ret;
174}
175
176/* directories */
177
178static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
179 size_t size)
180{
181 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
182}
183
184static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
185 size_t size)
186{
187 return snprintf(val, size, "%lld", ci->i_files);
188}
189
190static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
191 size_t size)
192{
193 return snprintf(val, size, "%lld", ci->i_subdirs);
194}
195
196static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
197 size_t size)
198{
199 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
200}
201
202static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
203 size_t size)
204{
205 return snprintf(val, size, "%lld", ci->i_rfiles);
206}
207
208static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
209 size_t size)
210{
211 return snprintf(val, size, "%lld", ci->i_rsubdirs);
212}
213
214static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
215 size_t size)
216{
217 return snprintf(val, size, "%lld", ci->i_rbytes);
218}
219
220static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
221 size_t size)
222{
223 return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec,
224 (long)ci->i_rctime.tv_nsec);
225}
226
227/* quotas */
228
229static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci)
230{
231 bool ret = false;
232 spin_lock(&ci->i_ceph_lock);
233 if ((ci->i_max_files || ci->i_max_bytes) &&
234 ci->i_vino.snap == CEPH_NOSNAP &&
235 ci->i_snap_realm &&
236 ci->i_snap_realm->ino == ci->i_vino.ino)
237 ret = true;
238 spin_unlock(&ci->i_ceph_lock);
239 return ret;
240}
241
242static size_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val,
243 size_t size)
244{
245 return snprintf(val, size, "max_bytes=%llu max_files=%llu",
246 ci->i_max_bytes, ci->i_max_files);
247}
248
249static size_t ceph_vxattrcb_quota_max_bytes(struct ceph_inode_info *ci,
250 char *val, size_t size)
251{
252 return snprintf(val, size, "%llu", ci->i_max_bytes);
253}
254
255static size_t ceph_vxattrcb_quota_max_files(struct ceph_inode_info *ci,
256 char *val, size_t size)
257{
258 return snprintf(val, size, "%llu", ci->i_max_files);
259}
260
261#define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
262#define CEPH_XATTR_NAME2(_type, _name, _name2) \
263 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
264
265#define XATTR_NAME_CEPH(_type, _name) \
266 { \
267 .name = CEPH_XATTR_NAME(_type, _name), \
268 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
269 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
270 .readonly = true, \
271 .hidden = false, \
272 .exists_cb = NULL, \
273 }
274#define XATTR_LAYOUT_FIELD(_type, _name, _field) \
275 { \
276 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
277 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
278 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
279 .readonly = false, \
280 .hidden = true, \
281 .exists_cb = ceph_vxattrcb_layout_exists, \
282 }
283#define XATTR_QUOTA_FIELD(_type, _name) \
284 { \
285 .name = CEPH_XATTR_NAME(_type, _name), \
286 .name_size = sizeof(CEPH_XATTR_NAME(_type, _name)), \
287 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
288 .readonly = false, \
289 .hidden = true, \
290 .exists_cb = ceph_vxattrcb_quota_exists, \
291 }
292
293static struct ceph_vxattr ceph_dir_vxattrs[] = {
294 {
295 .name = "ceph.dir.layout",
296 .name_size = sizeof("ceph.dir.layout"),
297 .getxattr_cb = ceph_vxattrcb_layout,
298 .readonly = false,
299 .hidden = true,
300 .exists_cb = ceph_vxattrcb_layout_exists,
301 },
302 XATTR_LAYOUT_FIELD(dir, layout, stripe_unit),
303 XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
304 XATTR_LAYOUT_FIELD(dir, layout, object_size),
305 XATTR_LAYOUT_FIELD(dir, layout, pool),
306 XATTR_LAYOUT_FIELD(dir, layout, pool_namespace),
307 XATTR_NAME_CEPH(dir, entries),
308 XATTR_NAME_CEPH(dir, files),
309 XATTR_NAME_CEPH(dir, subdirs),
310 XATTR_NAME_CEPH(dir, rentries),
311 XATTR_NAME_CEPH(dir, rfiles),
312 XATTR_NAME_CEPH(dir, rsubdirs),
313 XATTR_NAME_CEPH(dir, rbytes),
314 XATTR_NAME_CEPH(dir, rctime),
315 {
316 .name = "ceph.quota",
317 .name_size = sizeof("ceph.quota"),
318 .getxattr_cb = ceph_vxattrcb_quota,
319 .readonly = false,
320 .hidden = true,
321 .exists_cb = ceph_vxattrcb_quota_exists,
322 },
323 XATTR_QUOTA_FIELD(quota, max_bytes),
324 XATTR_QUOTA_FIELD(quota, max_files),
325 { .name = NULL, 0 } /* Required table terminator */
326};
327static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
328
329/* files */
330
331static struct ceph_vxattr ceph_file_vxattrs[] = {
332 {
333 .name = "ceph.file.layout",
334 .name_size = sizeof("ceph.file.layout"),
335 .getxattr_cb = ceph_vxattrcb_layout,
336 .readonly = false,
337 .hidden = true,
338 .exists_cb = ceph_vxattrcb_layout_exists,
339 },
340 XATTR_LAYOUT_FIELD(file, layout, stripe_unit),
341 XATTR_LAYOUT_FIELD(file, layout, stripe_count),
342 XATTR_LAYOUT_FIELD(file, layout, object_size),
343 XATTR_LAYOUT_FIELD(file, layout, pool),
344 XATTR_LAYOUT_FIELD(file, layout, pool_namespace),
345 { .name = NULL, 0 } /* Required table terminator */
346};
347static size_t ceph_file_vxattrs_name_size; /* total size of all names */
348
349static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
350{
351 if (S_ISDIR(inode->i_mode))
352 return ceph_dir_vxattrs;
353 else if (S_ISREG(inode->i_mode))
354 return ceph_file_vxattrs;
355 return NULL;
356}
357
358static size_t ceph_vxattrs_name_size(struct ceph_vxattr *vxattrs)
359{
360 if (vxattrs == ceph_dir_vxattrs)
361 return ceph_dir_vxattrs_name_size;
362 if (vxattrs == ceph_file_vxattrs)
363 return ceph_file_vxattrs_name_size;
364 BUG_ON(vxattrs);
365 return 0;
366}
367
368/*
369 * Compute the aggregate size (including terminating '\0') of all
370 * virtual extended attribute names in the given vxattr table.
371 */
372static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
373{
374 struct ceph_vxattr *vxattr;
375 size_t size = 0;
376
377 for (vxattr = vxattrs; vxattr->name; vxattr++)
378 if (!vxattr->hidden)
379 size += vxattr->name_size;
380
381 return size;
382}
383
384/* Routines called at initialization and exit time */
385
386void __init ceph_xattr_init(void)
387{
388 ceph_dir_vxattrs_name_size = vxattrs_name_size(ceph_dir_vxattrs);
389 ceph_file_vxattrs_name_size = vxattrs_name_size(ceph_file_vxattrs);
390}
391
392void ceph_xattr_exit(void)
393{
394 ceph_dir_vxattrs_name_size = 0;
395 ceph_file_vxattrs_name_size = 0;
396}
397
398static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
399 const char *name)
400{
401 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
402
403 if (vxattr) {
404 while (vxattr->name) {
405 if (!strcmp(vxattr->name, name))
406 return vxattr;
407 vxattr++;
408 }
409 }
410
411 return NULL;
412}
413
414static int __set_xattr(struct ceph_inode_info *ci,
415 const char *name, int name_len,
416 const char *val, int val_len,
417 int flags, int update_xattr,
418 struct ceph_inode_xattr **newxattr)
419{
420 struct rb_node **p;
421 struct rb_node *parent = NULL;
422 struct ceph_inode_xattr *xattr = NULL;
423 int c;
424 int new = 0;
425
426 p = &ci->i_xattrs.index.rb_node;
427 while (*p) {
428 parent = *p;
429 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
430 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
431 if (c < 0)
432 p = &(*p)->rb_left;
433 else if (c > 0)
434 p = &(*p)->rb_right;
435 else {
436 if (name_len == xattr->name_len)
437 break;
438 else if (name_len < xattr->name_len)
439 p = &(*p)->rb_left;
440 else
441 p = &(*p)->rb_right;
442 }
443 xattr = NULL;
444 }
445
446 if (update_xattr) {
447 int err = 0;
448
449 if (xattr && (flags & XATTR_CREATE))
450 err = -EEXIST;
451 else if (!xattr && (flags & XATTR_REPLACE))
452 err = -ENODATA;
453 if (err) {
454 kfree(name);
455 kfree(val);
456 kfree(*newxattr);
457 return err;
458 }
459 if (update_xattr < 0) {
460 if (xattr)
461 __remove_xattr(ci, xattr);
462 kfree(name);
463 kfree(*newxattr);
464 return 0;
465 }
466 }
467
468 if (!xattr) {
469 new = 1;
470 xattr = *newxattr;
471 xattr->name = name;
472 xattr->name_len = name_len;
473 xattr->should_free_name = update_xattr;
474
475 ci->i_xattrs.count++;
476 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
477 } else {
478 kfree(*newxattr);
479 *newxattr = NULL;
480 if (xattr->should_free_val)
481 kfree((void *)xattr->val);
482
483 if (update_xattr) {
484 kfree((void *)name);
485 name = xattr->name;
486 }
487 ci->i_xattrs.names_size -= xattr->name_len;
488 ci->i_xattrs.vals_size -= xattr->val_len;
489 }
490 ci->i_xattrs.names_size += name_len;
491 ci->i_xattrs.vals_size += val_len;
492 if (val)
493 xattr->val = val;
494 else
495 xattr->val = "";
496
497 xattr->val_len = val_len;
498 xattr->dirty = update_xattr;
499 xattr->should_free_val = (val && update_xattr);
500
501 if (new) {
502 rb_link_node(&xattr->node, parent, p);
503 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
504 dout("__set_xattr_val p=%p\n", p);
505 }
506
507 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
508 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
509
510 return 0;
511}
512
513static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
514 const char *name)
515{
516 struct rb_node **p;
517 struct rb_node *parent = NULL;
518 struct ceph_inode_xattr *xattr = NULL;
519 int name_len = strlen(name);
520 int c;
521
522 p = &ci->i_xattrs.index.rb_node;
523 while (*p) {
524 parent = *p;
525 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
526 c = strncmp(name, xattr->name, xattr->name_len);
527 if (c == 0 && name_len > xattr->name_len)
528 c = 1;
529 if (c < 0)
530 p = &(*p)->rb_left;
531 else if (c > 0)
532 p = &(*p)->rb_right;
533 else {
534 dout("__get_xattr %s: found %.*s\n", name,
535 xattr->val_len, xattr->val);
536 return xattr;
537 }
538 }
539
540 dout("__get_xattr %s: not found\n", name);
541
542 return NULL;
543}
544
545static void __free_xattr(struct ceph_inode_xattr *xattr)
546{
547 BUG_ON(!xattr);
548
549 if (xattr->should_free_name)
550 kfree((void *)xattr->name);
551 if (xattr->should_free_val)
552 kfree((void *)xattr->val);
553
554 kfree(xattr);
555}
556
557static int __remove_xattr(struct ceph_inode_info *ci,
558 struct ceph_inode_xattr *xattr)
559{
560 if (!xattr)
561 return -ENODATA;
562
563 rb_erase(&xattr->node, &ci->i_xattrs.index);
564
565 if (xattr->should_free_name)
566 kfree((void *)xattr->name);
567 if (xattr->should_free_val)
568 kfree((void *)xattr->val);
569
570 ci->i_xattrs.names_size -= xattr->name_len;
571 ci->i_xattrs.vals_size -= xattr->val_len;
572 ci->i_xattrs.count--;
573 kfree(xattr);
574
575 return 0;
576}
577
578static char *__copy_xattr_names(struct ceph_inode_info *ci,
579 char *dest)
580{
581 struct rb_node *p;
582 struct ceph_inode_xattr *xattr = NULL;
583
584 p = rb_first(&ci->i_xattrs.index);
585 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
586
587 while (p) {
588 xattr = rb_entry(p, struct ceph_inode_xattr, node);
589 memcpy(dest, xattr->name, xattr->name_len);
590 dest[xattr->name_len] = '\0';
591
592 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
593 xattr->name_len, ci->i_xattrs.names_size);
594
595 dest += xattr->name_len + 1;
596 p = rb_next(p);
597 }
598
599 return dest;
600}
601
602void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
603{
604 struct rb_node *p, *tmp;
605 struct ceph_inode_xattr *xattr = NULL;
606
607 p = rb_first(&ci->i_xattrs.index);
608
609 dout("__ceph_destroy_xattrs p=%p\n", p);
610
611 while (p) {
612 xattr = rb_entry(p, struct ceph_inode_xattr, node);
613 tmp = p;
614 p = rb_next(tmp);
615 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
616 xattr->name_len, xattr->name);
617 rb_erase(tmp, &ci->i_xattrs.index);
618
619 __free_xattr(xattr);
620 }
621
622 ci->i_xattrs.names_size = 0;
623 ci->i_xattrs.vals_size = 0;
624 ci->i_xattrs.index_version = 0;
625 ci->i_xattrs.count = 0;
626 ci->i_xattrs.index = RB_ROOT;
627}
628
629static int __build_xattrs(struct inode *inode)
630 __releases(ci->i_ceph_lock)
631 __acquires(ci->i_ceph_lock)
632{
633 u32 namelen;
634 u32 numattr = 0;
635 void *p, *end;
636 u32 len;
637 const char *name, *val;
638 struct ceph_inode_info *ci = ceph_inode(inode);
639 int xattr_version;
640 struct ceph_inode_xattr **xattrs = NULL;
641 int err = 0;
642 int i;
643
644 dout("__build_xattrs() len=%d\n",
645 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
646
647 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
648 return 0; /* already built */
649
650 __ceph_destroy_xattrs(ci);
651
652start:
653 /* updated internal xattr rb tree */
654 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
655 p = ci->i_xattrs.blob->vec.iov_base;
656 end = p + ci->i_xattrs.blob->vec.iov_len;
657 ceph_decode_32_safe(&p, end, numattr, bad);
658 xattr_version = ci->i_xattrs.version;
659 spin_unlock(&ci->i_ceph_lock);
660
661 xattrs = kcalloc(numattr, sizeof(struct ceph_inode_xattr *),
662 GFP_NOFS);
663 err = -ENOMEM;
664 if (!xattrs)
665 goto bad_lock;
666
667 for (i = 0; i < numattr; i++) {
668 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
669 GFP_NOFS);
670 if (!xattrs[i])
671 goto bad_lock;
672 }
673
674 spin_lock(&ci->i_ceph_lock);
675 if (ci->i_xattrs.version != xattr_version) {
676 /* lost a race, retry */
677 for (i = 0; i < numattr; i++)
678 kfree(xattrs[i]);
679 kfree(xattrs);
680 xattrs = NULL;
681 goto start;
682 }
683 err = -EIO;
684 while (numattr--) {
685 ceph_decode_32_safe(&p, end, len, bad);
686 namelen = len;
687 name = p;
688 p += len;
689 ceph_decode_32_safe(&p, end, len, bad);
690 val = p;
691 p += len;
692
693 err = __set_xattr(ci, name, namelen, val, len,
694 0, 0, &xattrs[numattr]);
695
696 if (err < 0)
697 goto bad;
698 }
699 kfree(xattrs);
700 }
701 ci->i_xattrs.index_version = ci->i_xattrs.version;
702 ci->i_xattrs.dirty = false;
703
704 return err;
705bad_lock:
706 spin_lock(&ci->i_ceph_lock);
707bad:
708 if (xattrs) {
709 for (i = 0; i < numattr; i++)
710 kfree(xattrs[i]);
711 kfree(xattrs);
712 }
713 ci->i_xattrs.names_size = 0;
714 return err;
715}
716
717static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
718 int val_size)
719{
720 /*
721 * 4 bytes for the length, and additional 4 bytes per each xattr name,
722 * 4 bytes per each value
723 */
724 int size = 4 + ci->i_xattrs.count*(4 + 4) +
725 ci->i_xattrs.names_size +
726 ci->i_xattrs.vals_size;
727 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
728 ci->i_xattrs.count, ci->i_xattrs.names_size,
729 ci->i_xattrs.vals_size);
730
731 if (name_size)
732 size += 4 + 4 + name_size + val_size;
733
734 return size;
735}
736
737/*
738 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
739 * and swap into place.
740 */
741void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
742{
743 struct rb_node *p;
744 struct ceph_inode_xattr *xattr = NULL;
745 void *dest;
746
747 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
748 if (ci->i_xattrs.dirty) {
749 int need = __get_required_blob_size(ci, 0, 0);
750
751 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
752
753 p = rb_first(&ci->i_xattrs.index);
754 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
755
756 ceph_encode_32(&dest, ci->i_xattrs.count);
757 while (p) {
758 xattr = rb_entry(p, struct ceph_inode_xattr, node);
759
760 ceph_encode_32(&dest, xattr->name_len);
761 memcpy(dest, xattr->name, xattr->name_len);
762 dest += xattr->name_len;
763 ceph_encode_32(&dest, xattr->val_len);
764 memcpy(dest, xattr->val, xattr->val_len);
765 dest += xattr->val_len;
766
767 p = rb_next(p);
768 }
769
770 /* adjust buffer len; it may be larger than we need */
771 ci->i_xattrs.prealloc_blob->vec.iov_len =
772 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
773
774 if (ci->i_xattrs.blob)
775 ceph_buffer_put(ci->i_xattrs.blob);
776 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
777 ci->i_xattrs.prealloc_blob = NULL;
778 ci->i_xattrs.dirty = false;
779 ci->i_xattrs.version++;
780 }
781}
782
783static inline int __get_request_mask(struct inode *in) {
784 struct ceph_mds_request *req = current->journal_info;
785 int mask = 0;
786 if (req && req->r_target_inode == in) {
787 if (req->r_op == CEPH_MDS_OP_LOOKUP ||
788 req->r_op == CEPH_MDS_OP_LOOKUPINO ||
789 req->r_op == CEPH_MDS_OP_LOOKUPPARENT ||
790 req->r_op == CEPH_MDS_OP_GETATTR) {
791 mask = le32_to_cpu(req->r_args.getattr.mask);
792 } else if (req->r_op == CEPH_MDS_OP_OPEN ||
793 req->r_op == CEPH_MDS_OP_CREATE) {
794 mask = le32_to_cpu(req->r_args.open.mask);
795 }
796 }
797 return mask;
798}
799
800ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
801 size_t size)
802{
803 struct ceph_inode_info *ci = ceph_inode(inode);
804 struct ceph_inode_xattr *xattr;
805 struct ceph_vxattr *vxattr = NULL;
806 int req_mask;
807 int err;
808
809 /* let's see if a virtual xattr was requested */
810 vxattr = ceph_match_vxattr(inode, name);
811 if (vxattr) {
812 err = ceph_do_getattr(inode, 0, true);
813 if (err)
814 return err;
815 err = -ENODATA;
816 if (!(vxattr->exists_cb && !vxattr->exists_cb(ci)))
817 err = vxattr->getxattr_cb(ci, value, size);
818 return err;
819 }
820
821 req_mask = __get_request_mask(inode);
822
823 spin_lock(&ci->i_ceph_lock);
824 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
825 ci->i_xattrs.version, ci->i_xattrs.index_version);
826
827 if (ci->i_xattrs.version == 0 ||
828 !((req_mask & CEPH_CAP_XATTR_SHARED) ||
829 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1))) {
830 spin_unlock(&ci->i_ceph_lock);
831
832 /* security module gets xattr while filling trace */
833 if (current->journal_info) {
834 pr_warn_ratelimited("sync getxattr %p "
835 "during filling trace\n", inode);
836 return -EBUSY;
837 }
838
839 /* get xattrs from mds (if we don't already have them) */
840 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
841 if (err)
842 return err;
843 spin_lock(&ci->i_ceph_lock);
844 }
845
846 err = __build_xattrs(inode);
847 if (err < 0)
848 goto out;
849
850 err = -ENODATA; /* == ENOATTR */
851 xattr = __get_xattr(ci, name);
852 if (!xattr)
853 goto out;
854
855 err = -ERANGE;
856 if (size && size < xattr->val_len)
857 goto out;
858
859 err = xattr->val_len;
860 if (size == 0)
861 goto out;
862
863 memcpy(value, xattr->val, xattr->val_len);
864
865 if (current->journal_info &&
866 !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
867 ci->i_ceph_flags |= CEPH_I_SEC_INITED;
868out:
869 spin_unlock(&ci->i_ceph_lock);
870 return err;
871}
872
873ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
874{
875 struct inode *inode = d_inode(dentry);
876 struct ceph_inode_info *ci = ceph_inode(inode);
877 struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
878 u32 vir_namelen = 0;
879 u32 namelen;
880 int err;
881 u32 len;
882 int i;
883
884 spin_lock(&ci->i_ceph_lock);
885 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
886 ci->i_xattrs.version, ci->i_xattrs.index_version);
887
888 if (ci->i_xattrs.version == 0 ||
889 !__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1)) {
890 spin_unlock(&ci->i_ceph_lock);
891 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
892 if (err)
893 return err;
894 spin_lock(&ci->i_ceph_lock);
895 }
896
897 err = __build_xattrs(inode);
898 if (err < 0)
899 goto out;
900 /*
901 * Start with virtual dir xattr names (if any) (including
902 * terminating '\0' characters for each).
903 */
904 vir_namelen = ceph_vxattrs_name_size(vxattrs);
905
906 /* adding 1 byte per each variable due to the null termination */
907 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
908 err = -ERANGE;
909 if (size && vir_namelen + namelen > size)
910 goto out;
911
912 err = namelen + vir_namelen;
913 if (size == 0)
914 goto out;
915
916 names = __copy_xattr_names(ci, names);
917
918 /* virtual xattr names, too */
919 err = namelen;
920 if (vxattrs) {
921 for (i = 0; vxattrs[i].name; i++) {
922 if (!vxattrs[i].hidden &&
923 !(vxattrs[i].exists_cb &&
924 !vxattrs[i].exists_cb(ci))) {
925 len = sprintf(names, "%s", vxattrs[i].name);
926 names += len + 1;
927 err += len + 1;
928 }
929 }
930 }
931
932out:
933 spin_unlock(&ci->i_ceph_lock);
934 return err;
935}
936
937static int ceph_sync_setxattr(struct inode *inode, const char *name,
938 const char *value, size_t size, int flags)
939{
940 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
941 struct ceph_inode_info *ci = ceph_inode(inode);
942 struct ceph_mds_request *req;
943 struct ceph_mds_client *mdsc = fsc->mdsc;
944 struct ceph_pagelist *pagelist = NULL;
945 int op = CEPH_MDS_OP_SETXATTR;
946 int err;
947
948 if (size > 0) {
949 /* copy value into pagelist */
950 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
951 if (!pagelist)
952 return -ENOMEM;
953
954 ceph_pagelist_init(pagelist);
955 err = ceph_pagelist_append(pagelist, value, size);
956 if (err)
957 goto out;
958 } else if (!value) {
959 if (flags & CEPH_XATTR_REPLACE)
960 op = CEPH_MDS_OP_RMXATTR;
961 else
962 flags |= CEPH_XATTR_REMOVE;
963 }
964
965 dout("setxattr value=%.*s\n", (int)size, value);
966
967 /* do request */
968 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
969 if (IS_ERR(req)) {
970 err = PTR_ERR(req);
971 goto out;
972 }
973
974 req->r_path2 = kstrdup(name, GFP_NOFS);
975 if (!req->r_path2) {
976 ceph_mdsc_put_request(req);
977 err = -ENOMEM;
978 goto out;
979 }
980
981 if (op == CEPH_MDS_OP_SETXATTR) {
982 req->r_args.setxattr.flags = cpu_to_le32(flags);
983 req->r_pagelist = pagelist;
984 pagelist = NULL;
985 }
986
987 req->r_inode = inode;
988 ihold(inode);
989 req->r_num_caps = 1;
990 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
991
992 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
993 err = ceph_mdsc_do_request(mdsc, NULL, req);
994 ceph_mdsc_put_request(req);
995 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
996
997out:
998 if (pagelist)
999 ceph_pagelist_release(pagelist);
1000 return err;
1001}
1002
1003int __ceph_setxattr(struct inode *inode, const char *name,
1004 const void *value, size_t size, int flags)
1005{
1006 struct ceph_vxattr *vxattr;
1007 struct ceph_inode_info *ci = ceph_inode(inode);
1008 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1009 struct ceph_cap_flush *prealloc_cf = NULL;
1010 int issued;
1011 int err;
1012 int dirty = 0;
1013 int name_len = strlen(name);
1014 int val_len = size;
1015 char *newname = NULL;
1016 char *newval = NULL;
1017 struct ceph_inode_xattr *xattr = NULL;
1018 int required_blob_size;
1019 bool check_realm = false;
1020 bool lock_snap_rwsem = false;
1021
1022 if (ceph_snap(inode) != CEPH_NOSNAP)
1023 return -EROFS;
1024
1025 vxattr = ceph_match_vxattr(inode, name);
1026 if (vxattr) {
1027 if (vxattr->readonly)
1028 return -EOPNOTSUPP;
1029 if (value && !strncmp(vxattr->name, "ceph.quota", 10))
1030 check_realm = true;
1031 }
1032
1033 /* pass any unhandled ceph.* xattrs through to the MDS */
1034 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
1035 goto do_sync_unlocked;
1036
1037 /* preallocate memory for xattr name, value, index node */
1038 err = -ENOMEM;
1039 newname = kmemdup(name, name_len + 1, GFP_NOFS);
1040 if (!newname)
1041 goto out;
1042
1043 if (val_len) {
1044 newval = kmemdup(value, val_len, GFP_NOFS);
1045 if (!newval)
1046 goto out;
1047 }
1048
1049 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
1050 if (!xattr)
1051 goto out;
1052
1053 prealloc_cf = ceph_alloc_cap_flush();
1054 if (!prealloc_cf)
1055 goto out;
1056
1057 spin_lock(&ci->i_ceph_lock);
1058retry:
1059 issued = __ceph_caps_issued(ci, NULL);
1060 if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))
1061 goto do_sync;
1062
1063 if (!lock_snap_rwsem && !ci->i_head_snapc) {
1064 lock_snap_rwsem = true;
1065 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1066 spin_unlock(&ci->i_ceph_lock);
1067 down_read(&mdsc->snap_rwsem);
1068 spin_lock(&ci->i_ceph_lock);
1069 goto retry;
1070 }
1071 }
1072
1073 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
1074 __build_xattrs(inode);
1075
1076 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
1077
1078 if (!ci->i_xattrs.prealloc_blob ||
1079 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
1080 struct ceph_buffer *blob;
1081
1082 spin_unlock(&ci->i_ceph_lock);
1083 dout(" preaallocating new blob size=%d\n", required_blob_size);
1084 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1085 if (!blob)
1086 goto do_sync_unlocked;
1087 spin_lock(&ci->i_ceph_lock);
1088 if (ci->i_xattrs.prealloc_blob)
1089 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
1090 ci->i_xattrs.prealloc_blob = blob;
1091 goto retry;
1092 }
1093
1094 err = __set_xattr(ci, newname, name_len, newval, val_len,
1095 flags, value ? 1 : -1, &xattr);
1096
1097 if (!err) {
1098 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
1099 &prealloc_cf);
1100 ci->i_xattrs.dirty = true;
1101 inode->i_ctime = current_time(inode);
1102 }
1103
1104 spin_unlock(&ci->i_ceph_lock);
1105 if (lock_snap_rwsem)
1106 up_read(&mdsc->snap_rwsem);
1107 if (dirty)
1108 __mark_inode_dirty(inode, dirty);
1109 ceph_free_cap_flush(prealloc_cf);
1110 return err;
1111
1112do_sync:
1113 spin_unlock(&ci->i_ceph_lock);
1114do_sync_unlocked:
1115 if (lock_snap_rwsem)
1116 up_read(&mdsc->snap_rwsem);
1117
1118 /* security module set xattr while filling trace */
1119 if (current->journal_info) {
1120 pr_warn_ratelimited("sync setxattr %p "
1121 "during filling trace\n", inode);
1122 err = -EBUSY;
1123 } else {
1124 err = ceph_sync_setxattr(inode, name, value, size, flags);
1125 if (err >= 0 && check_realm) {
1126 /* check if snaprealm was created for quota inode */
1127 spin_lock(&ci->i_ceph_lock);
1128 if ((ci->i_max_files || ci->i_max_bytes) &&
1129 !(ci->i_snap_realm &&
1130 ci->i_snap_realm->ino == ci->i_vino.ino))
1131 err = -EOPNOTSUPP;
1132 spin_unlock(&ci->i_ceph_lock);
1133 }
1134 }
1135out:
1136 ceph_free_cap_flush(prealloc_cf);
1137 kfree(newname);
1138 kfree(newval);
1139 kfree(xattr);
1140 return err;
1141}
1142
1143static int ceph_get_xattr_handler(const struct xattr_handler *handler,
1144 struct dentry *dentry, struct inode *inode,
1145 const char *name, void *value, size_t size)
1146{
1147 if (!ceph_is_valid_xattr(name))
1148 return -EOPNOTSUPP;
1149 return __ceph_getxattr(inode, name, value, size);
1150}
1151
1152static int ceph_set_xattr_handler(const struct xattr_handler *handler,
1153 struct dentry *unused, struct inode *inode,
1154 const char *name, const void *value,
1155 size_t size, int flags)
1156{
1157 if (!ceph_is_valid_xattr(name))
1158 return -EOPNOTSUPP;
1159 return __ceph_setxattr(inode, name, value, size, flags);
1160}
1161
1162static const struct xattr_handler ceph_other_xattr_handler = {
1163 .prefix = "", /* match any name => handlers called with full name */
1164 .get = ceph_get_xattr_handler,
1165 .set = ceph_set_xattr_handler,
1166};
1167
1168#ifdef CONFIG_SECURITY
1169bool ceph_security_xattr_wanted(struct inode *in)
1170{
1171 return in->i_security != NULL;
1172}
1173
1174bool ceph_security_xattr_deadlock(struct inode *in)
1175{
1176 struct ceph_inode_info *ci;
1177 bool ret;
1178 if (!in->i_security)
1179 return false;
1180 ci = ceph_inode(in);
1181 spin_lock(&ci->i_ceph_lock);
1182 ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) &&
1183 !(ci->i_xattrs.version > 0 &&
1184 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0));
1185 spin_unlock(&ci->i_ceph_lock);
1186 return ret;
1187}
1188#endif