Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2008, Christoph Hellwig
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_shared.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_inode.h"
13#include "xfs_attr.h"
14#include "xfs_trace.h"
15#include "xfs_error.h"
16#include "xfs_acl.h"
17#include "xfs_da_format.h"
18#include "xfs_da_btree.h"
19#include "xfs_trans.h"
20
21#include <linux/posix_acl_xattr.h>
22
23/*
24 * Locking scheme:
25 * - all ACL updates are protected by inode->i_mutex, which is taken before
26 * calling into this file.
27 */
28
29STATIC struct posix_acl *
30xfs_acl_from_disk(
31 struct xfs_mount *mp,
32 const struct xfs_acl *aclp,
33 int len,
34 int max_entries)
35{
36 struct posix_acl_entry *acl_e;
37 struct posix_acl *acl;
38 const struct xfs_acl_entry *ace;
39 unsigned int count, i;
40
41 if (len < sizeof(*aclp)) {
42 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
43 len);
44 return ERR_PTR(-EFSCORRUPTED);
45 }
46
47 count = be32_to_cpu(aclp->acl_cnt);
48 if (count > max_entries || XFS_ACL_SIZE(count) != len) {
49 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
50 len);
51 return ERR_PTR(-EFSCORRUPTED);
52 }
53
54 acl = posix_acl_alloc(count, GFP_KERNEL);
55 if (!acl)
56 return ERR_PTR(-ENOMEM);
57
58 for (i = 0; i < count; i++) {
59 acl_e = &acl->a_entries[i];
60 ace = &aclp->acl_entry[i];
61
62 /*
63 * The tag is 32 bits on disk and 16 bits in core.
64 *
65 * Because every access to it goes through the core
66 * format first this is not a problem.
67 */
68 acl_e->e_tag = be32_to_cpu(ace->ae_tag);
69 acl_e->e_perm = be16_to_cpu(ace->ae_perm);
70
71 switch (acl_e->e_tag) {
72 case ACL_USER:
73 acl_e->e_uid = make_kuid(&init_user_ns,
74 be32_to_cpu(ace->ae_id));
75 break;
76 case ACL_GROUP:
77 acl_e->e_gid = make_kgid(&init_user_ns,
78 be32_to_cpu(ace->ae_id));
79 break;
80 case ACL_USER_OBJ:
81 case ACL_GROUP_OBJ:
82 case ACL_MASK:
83 case ACL_OTHER:
84 break;
85 default:
86 goto fail;
87 }
88 }
89 return acl;
90
91fail:
92 posix_acl_release(acl);
93 return ERR_PTR(-EINVAL);
94}
95
96STATIC void
97xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
98{
99 const struct posix_acl_entry *acl_e;
100 struct xfs_acl_entry *ace;
101 int i;
102
103 aclp->acl_cnt = cpu_to_be32(acl->a_count);
104 for (i = 0; i < acl->a_count; i++) {
105 ace = &aclp->acl_entry[i];
106 acl_e = &acl->a_entries[i];
107
108 ace->ae_tag = cpu_to_be32(acl_e->e_tag);
109 switch (acl_e->e_tag) {
110 case ACL_USER:
111 ace->ae_id = cpu_to_be32(
112 from_kuid(&init_user_ns, acl_e->e_uid));
113 break;
114 case ACL_GROUP:
115 ace->ae_id = cpu_to_be32(
116 from_kgid(&init_user_ns, acl_e->e_gid));
117 break;
118 default:
119 ace->ae_id = cpu_to_be32(ACL_UNDEFINED_ID);
120 break;
121 }
122
123 ace->ae_perm = cpu_to_be16(acl_e->e_perm);
124 }
125}
126
127struct posix_acl *
128xfs_get_acl(struct inode *inode, int type)
129{
130 struct xfs_inode *ip = XFS_I(inode);
131 struct xfs_mount *mp = ip->i_mount;
132 struct posix_acl *acl = NULL;
133 struct xfs_da_args args = {
134 .dp = ip,
135 .attr_filter = XFS_ATTR_ROOT,
136 .valuelen = XFS_ACL_MAX_SIZE(mp),
137 };
138 int error;
139
140 trace_xfs_get_acl(ip);
141
142 switch (type) {
143 case ACL_TYPE_ACCESS:
144 args.name = SGI_ACL_FILE;
145 break;
146 case ACL_TYPE_DEFAULT:
147 args.name = SGI_ACL_DEFAULT;
148 break;
149 default:
150 BUG();
151 }
152 args.namelen = strlen(args.name);
153
154 /*
155 * If the attribute doesn't exist make sure we have a negative cache
156 * entry, for any other error assume it is transient.
157 */
158 error = xfs_attr_get(&args);
159 if (!error) {
160 acl = xfs_acl_from_disk(mp, args.value, args.valuelen,
161 XFS_ACL_MAX_ENTRIES(mp));
162 } else if (error != -ENOATTR) {
163 acl = ERR_PTR(error);
164 }
165
166 kmem_free(args.value);
167 return acl;
168}
169
170int
171__xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
172{
173 struct xfs_inode *ip = XFS_I(inode);
174 struct xfs_da_args args = {
175 .dp = ip,
176 .attr_filter = XFS_ATTR_ROOT,
177 };
178 int error;
179
180 switch (type) {
181 case ACL_TYPE_ACCESS:
182 args.name = SGI_ACL_FILE;
183 break;
184 case ACL_TYPE_DEFAULT:
185 if (!S_ISDIR(inode->i_mode))
186 return acl ? -EACCES : 0;
187 args.name = SGI_ACL_DEFAULT;
188 break;
189 default:
190 return -EINVAL;
191 }
192 args.namelen = strlen(args.name);
193
194 if (acl) {
195 args.valuelen = XFS_ACL_SIZE(acl->a_count);
196 args.value = kvzalloc(args.valuelen, GFP_KERNEL);
197 if (!args.value)
198 return -ENOMEM;
199 xfs_acl_to_disk(args.value, acl);
200 }
201
202 error = xfs_attr_set(&args);
203 kmem_free(args.value);
204
205 /*
206 * If the attribute didn't exist to start with that's fine.
207 */
208 if (!acl && error == -ENOATTR)
209 error = 0;
210 if (!error)
211 set_cached_acl(inode, type, acl);
212 return error;
213}
214
215static int
216xfs_acl_set_mode(
217 struct inode *inode,
218 umode_t mode)
219{
220 struct xfs_inode *ip = XFS_I(inode);
221 struct xfs_mount *mp = ip->i_mount;
222 struct xfs_trans *tp;
223 int error;
224
225 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
226 if (error)
227 return error;
228
229 xfs_ilock(ip, XFS_ILOCK_EXCL);
230 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
231 inode->i_mode = mode;
232 inode->i_ctime = current_time(inode);
233 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
234
235 if (mp->m_flags & XFS_MOUNT_WSYNC)
236 xfs_trans_set_sync(tp);
237 return xfs_trans_commit(tp);
238}
239
240int
241xfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
242 struct posix_acl *acl, int type)
243{
244 umode_t mode;
245 bool set_mode = false;
246 int error = 0;
247
248 if (!acl)
249 goto set_acl;
250
251 error = -E2BIG;
252 if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
253 return error;
254
255 if (type == ACL_TYPE_ACCESS) {
256 error = posix_acl_update_mode(mnt_userns, inode, &mode, &acl);
257 if (error)
258 return error;
259 set_mode = true;
260 }
261
262 set_acl:
263 /*
264 * We set the mode after successfully updating the ACL xattr because the
265 * xattr update can fail at ENOSPC and we don't want to change the mode
266 * if the ACL update hasn't been applied.
267 */
268 error = __xfs_set_acl(inode, acl, type);
269 if (!error && set_mode && mode != inode->i_mode)
270 error = xfs_acl_set_mode(inode, mode);
271 return error;
272}
273
274/*
275 * Invalidate any cached ACLs if the user has bypassed the ACL interface.
276 * We don't validate the content whatsoever so it is caller responsibility to
277 * provide data in valid format and ensure i_mode is consistent.
278 */
279void
280xfs_forget_acl(
281 struct inode *inode,
282 const char *name)
283{
284 if (!strcmp(name, SGI_ACL_FILE))
285 forget_cached_acl(inode, ACL_TYPE_ACCESS);
286 else if (!strcmp(name, SGI_ACL_DEFAULT))
287 forget_cached_acl(inode, ACL_TYPE_DEFAULT);
288}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2008, Christoph Hellwig
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_shared.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_inode.h"
13#include "xfs_attr.h"
14#include "xfs_trace.h"
15#include "xfs_error.h"
16#include "xfs_acl.h"
17#include "xfs_da_format.h"
18#include "xfs_da_btree.h"
19
20#include <linux/posix_acl_xattr.h>
21
22/*
23 * Locking scheme:
24 * - all ACL updates are protected by inode->i_mutex, which is taken before
25 * calling into this file.
26 */
27
28STATIC struct posix_acl *
29xfs_acl_from_disk(
30 struct xfs_mount *mp,
31 const struct xfs_acl *aclp,
32 int len,
33 int max_entries)
34{
35 struct posix_acl_entry *acl_e;
36 struct posix_acl *acl;
37 const struct xfs_acl_entry *ace;
38 unsigned int count, i;
39
40 if (len < sizeof(*aclp)) {
41 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
42 len);
43 return ERR_PTR(-EFSCORRUPTED);
44 }
45
46 count = be32_to_cpu(aclp->acl_cnt);
47 if (count > max_entries || XFS_ACL_SIZE(count) != len) {
48 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
49 len);
50 return ERR_PTR(-EFSCORRUPTED);
51 }
52
53 acl = posix_acl_alloc(count, GFP_KERNEL);
54 if (!acl)
55 return ERR_PTR(-ENOMEM);
56
57 for (i = 0; i < count; i++) {
58 acl_e = &acl->a_entries[i];
59 ace = &aclp->acl_entry[i];
60
61 /*
62 * The tag is 32 bits on disk and 16 bits in core.
63 *
64 * Because every access to it goes through the core
65 * format first this is not a problem.
66 */
67 acl_e->e_tag = be32_to_cpu(ace->ae_tag);
68 acl_e->e_perm = be16_to_cpu(ace->ae_perm);
69
70 switch (acl_e->e_tag) {
71 case ACL_USER:
72 acl_e->e_uid = make_kuid(&init_user_ns,
73 be32_to_cpu(ace->ae_id));
74 break;
75 case ACL_GROUP:
76 acl_e->e_gid = make_kgid(&init_user_ns,
77 be32_to_cpu(ace->ae_id));
78 break;
79 case ACL_USER_OBJ:
80 case ACL_GROUP_OBJ:
81 case ACL_MASK:
82 case ACL_OTHER:
83 break;
84 default:
85 goto fail;
86 }
87 }
88 return acl;
89
90fail:
91 posix_acl_release(acl);
92 return ERR_PTR(-EINVAL);
93}
94
95STATIC void
96xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
97{
98 const struct posix_acl_entry *acl_e;
99 struct xfs_acl_entry *ace;
100 int i;
101
102 aclp->acl_cnt = cpu_to_be32(acl->a_count);
103 for (i = 0; i < acl->a_count; i++) {
104 ace = &aclp->acl_entry[i];
105 acl_e = &acl->a_entries[i];
106
107 ace->ae_tag = cpu_to_be32(acl_e->e_tag);
108 switch (acl_e->e_tag) {
109 case ACL_USER:
110 ace->ae_id = cpu_to_be32(
111 from_kuid(&init_user_ns, acl_e->e_uid));
112 break;
113 case ACL_GROUP:
114 ace->ae_id = cpu_to_be32(
115 from_kgid(&init_user_ns, acl_e->e_gid));
116 break;
117 default:
118 ace->ae_id = cpu_to_be32(ACL_UNDEFINED_ID);
119 break;
120 }
121
122 ace->ae_perm = cpu_to_be16(acl_e->e_perm);
123 }
124}
125
126struct posix_acl *
127xfs_get_acl(struct inode *inode, int type)
128{
129 struct xfs_inode *ip = XFS_I(inode);
130 struct xfs_mount *mp = ip->i_mount;
131 struct posix_acl *acl = NULL;
132 struct xfs_da_args args = {
133 .dp = ip,
134 .attr_filter = XFS_ATTR_ROOT,
135 .valuelen = XFS_ACL_MAX_SIZE(mp),
136 };
137 int error;
138
139 trace_xfs_get_acl(ip);
140
141 switch (type) {
142 case ACL_TYPE_ACCESS:
143 args.name = SGI_ACL_FILE;
144 break;
145 case ACL_TYPE_DEFAULT:
146 args.name = SGI_ACL_DEFAULT;
147 break;
148 default:
149 BUG();
150 }
151 args.namelen = strlen(args.name);
152
153 /*
154 * If the attribute doesn't exist make sure we have a negative cache
155 * entry, for any other error assume it is transient.
156 */
157 error = xfs_attr_get(&args);
158 if (!error) {
159 acl = xfs_acl_from_disk(mp, args.value, args.valuelen,
160 XFS_ACL_MAX_ENTRIES(mp));
161 } else if (error != -ENOATTR) {
162 acl = ERR_PTR(error);
163 }
164
165 kmem_free(args.value);
166 return acl;
167}
168
169int
170__xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
171{
172 struct xfs_inode *ip = XFS_I(inode);
173 struct xfs_da_args args = {
174 .dp = ip,
175 .attr_filter = XFS_ATTR_ROOT,
176 };
177 int error;
178
179 switch (type) {
180 case ACL_TYPE_ACCESS:
181 args.name = SGI_ACL_FILE;
182 break;
183 case ACL_TYPE_DEFAULT:
184 if (!S_ISDIR(inode->i_mode))
185 return acl ? -EACCES : 0;
186 args.name = SGI_ACL_DEFAULT;
187 break;
188 default:
189 return -EINVAL;
190 }
191 args.namelen = strlen(args.name);
192
193 if (acl) {
194 args.valuelen = XFS_ACL_SIZE(acl->a_count);
195 args.value = kmem_zalloc_large(args.valuelen, 0);
196 if (!args.value)
197 return -ENOMEM;
198 xfs_acl_to_disk(args.value, acl);
199 }
200
201 error = xfs_attr_set(&args);
202 kmem_free(args.value);
203
204 /*
205 * If the attribute didn't exist to start with that's fine.
206 */
207 if (!acl && error == -ENOATTR)
208 error = 0;
209 if (!error)
210 set_cached_acl(inode, type, acl);
211 return error;
212}
213
214static int
215xfs_set_mode(struct inode *inode, umode_t mode)
216{
217 int error = 0;
218
219 if (mode != inode->i_mode) {
220 struct iattr iattr;
221
222 iattr.ia_valid = ATTR_MODE | ATTR_CTIME;
223 iattr.ia_mode = mode;
224 iattr.ia_ctime = current_time(inode);
225
226 error = xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
227 }
228
229 return error;
230}
231
232int
233xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
234{
235 umode_t mode;
236 bool set_mode = false;
237 int error = 0;
238
239 if (!acl)
240 goto set_acl;
241
242 error = -E2BIG;
243 if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
244 return error;
245
246 if (type == ACL_TYPE_ACCESS) {
247 error = posix_acl_update_mode(inode, &mode, &acl);
248 if (error)
249 return error;
250 set_mode = true;
251 }
252
253 set_acl:
254 error = __xfs_set_acl(inode, acl, type);
255 if (error)
256 return error;
257
258 /*
259 * We set the mode after successfully updating the ACL xattr because the
260 * xattr update can fail at ENOSPC and we don't want to change the mode
261 * if the ACL update hasn't been applied.
262 */
263 if (set_mode)
264 error = xfs_set_mode(inode, mode);
265
266 return error;
267}
268
269/*
270 * Invalidate any cached ACLs if the user has bypassed the ACL interface.
271 * We don't validate the content whatsoever so it is caller responsibility to
272 * provide data in valid format and ensure i_mode is consistent.
273 */
274void
275xfs_forget_acl(
276 struct inode *inode,
277 const char *name)
278{
279 if (!strcmp(name, SGI_ACL_FILE))
280 forget_cached_acl(inode, ACL_TYPE_ACCESS);
281 else if (!strcmp(name, SGI_ACL_DEFAULT))
282 forget_cached_acl(inode, ACL_TYPE_DEFAULT);
283}