Loading...
1/*
2 * Copyright (c) 2006-2007 Silicon Graphics, Inc.
3 * Copyright (c) 2014 Christoph Hellwig.
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_mount.h"
25#include "xfs_defer.h"
26#include "xfs_inode.h"
27#include "xfs_bmap.h"
28#include "xfs_bmap_util.h"
29#include "xfs_alloc.h"
30#include "xfs_mru_cache.h"
31#include "xfs_filestream.h"
32#include "xfs_trace.h"
33#include "xfs_ag_resv.h"
34
35struct xfs_fstrm_item {
36 struct xfs_mru_cache_elem mru;
37 xfs_agnumber_t ag; /* AG in use for this directory */
38};
39
40enum xfs_fstrm_alloc {
41 XFS_PICK_USERDATA = 1,
42 XFS_PICK_LOWSPACE = 2,
43};
44
45/*
46 * Allocation group filestream associations are tracked with per-ag atomic
47 * counters. These counters allow xfs_filestream_pick_ag() to tell whether a
48 * particular AG already has active filestreams associated with it. The mount
49 * point's m_peraglock is used to protect these counters from per-ag array
50 * re-allocation during a growfs operation. When xfs_growfs_data_private() is
51 * about to reallocate the array, it calls xfs_filestream_flush() with the
52 * m_peraglock held in write mode.
53 *
54 * Since xfs_mru_cache_flush() guarantees that all the free functions for all
55 * the cache elements have finished executing before it returns, it's safe for
56 * the free functions to use the atomic counters without m_peraglock protection.
57 * This allows the implementation of xfs_fstrm_free_func() to be agnostic about
58 * whether it was called with the m_peraglock held in read mode, write mode or
59 * not held at all. The race condition this addresses is the following:
60 *
61 * - The work queue scheduler fires and pulls a filestream directory cache
62 * element off the LRU end of the cache for deletion, then gets pre-empted.
63 * - A growfs operation grabs the m_peraglock in write mode, flushes all the
64 * remaining items from the cache and reallocates the mount point's per-ag
65 * array, resetting all the counters to zero.
66 * - The work queue thread resumes and calls the free function for the element
67 * it started cleaning up earlier. In the process it decrements the
68 * filestreams counter for an AG that now has no references.
69 *
70 * With a shrinkfs feature, the above scenario could panic the system.
71 *
72 * All other uses of the following macros should be protected by either the
73 * m_peraglock held in read mode, or the cache's internal locking exposed by the
74 * interval between a call to xfs_mru_cache_lookup() and a call to
75 * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode
76 * when new elements are added to the cache.
77 *
78 * Combined, these locking rules ensure that no associations will ever exist in
79 * the cache that reference per-ag array elements that have since been
80 * reallocated.
81 */
82int
83xfs_filestream_peek_ag(
84 xfs_mount_t *mp,
85 xfs_agnumber_t agno)
86{
87 struct xfs_perag *pag;
88 int ret;
89
90 pag = xfs_perag_get(mp, agno);
91 ret = atomic_read(&pag->pagf_fstrms);
92 xfs_perag_put(pag);
93 return ret;
94}
95
96static int
97xfs_filestream_get_ag(
98 xfs_mount_t *mp,
99 xfs_agnumber_t agno)
100{
101 struct xfs_perag *pag;
102 int ret;
103
104 pag = xfs_perag_get(mp, agno);
105 ret = atomic_inc_return(&pag->pagf_fstrms);
106 xfs_perag_put(pag);
107 return ret;
108}
109
110static void
111xfs_filestream_put_ag(
112 xfs_mount_t *mp,
113 xfs_agnumber_t agno)
114{
115 struct xfs_perag *pag;
116
117 pag = xfs_perag_get(mp, agno);
118 atomic_dec(&pag->pagf_fstrms);
119 xfs_perag_put(pag);
120}
121
122static void
123xfs_fstrm_free_func(
124 void *data,
125 struct xfs_mru_cache_elem *mru)
126{
127 struct xfs_mount *mp = data;
128 struct xfs_fstrm_item *item =
129 container_of(mru, struct xfs_fstrm_item, mru);
130
131 xfs_filestream_put_ag(mp, item->ag);
132 trace_xfs_filestream_free(mp, mru->key, item->ag);
133
134 kmem_free(item);
135}
136
137/*
138 * Scan the AGs starting at startag looking for an AG that isn't in use and has
139 * at least minlen blocks free.
140 */
141static int
142xfs_filestream_pick_ag(
143 struct xfs_inode *ip,
144 xfs_agnumber_t startag,
145 xfs_agnumber_t *agp,
146 int flags,
147 xfs_extlen_t minlen)
148{
149 struct xfs_mount *mp = ip->i_mount;
150 struct xfs_fstrm_item *item;
151 struct xfs_perag *pag;
152 xfs_extlen_t longest, free = 0, minfree, maxfree = 0;
153 xfs_agnumber_t ag, max_ag = NULLAGNUMBER;
154 int err, trylock, nscan;
155
156 ASSERT(S_ISDIR(VFS_I(ip)->i_mode));
157
158 /* 2% of an AG's blocks must be free for it to be chosen. */
159 minfree = mp->m_sb.sb_agblocks / 50;
160
161 ag = startag;
162 *agp = NULLAGNUMBER;
163
164 /* For the first pass, don't sleep trying to init the per-AG. */
165 trylock = XFS_ALLOC_FLAG_TRYLOCK;
166
167 for (nscan = 0; 1; nscan++) {
168 trace_xfs_filestream_scan(mp, ip->i_ino, ag);
169
170 pag = xfs_perag_get(mp, ag);
171
172 if (!pag->pagf_init) {
173 err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
174 if (err && !trylock) {
175 xfs_perag_put(pag);
176 return err;
177 }
178 }
179
180 /* Might fail sometimes during the 1st pass with trylock set. */
181 if (!pag->pagf_init)
182 goto next_ag;
183
184 /* Keep track of the AG with the most free blocks. */
185 if (pag->pagf_freeblks > maxfree) {
186 maxfree = pag->pagf_freeblks;
187 max_ag = ag;
188 }
189
190 /*
191 * The AG reference count does two things: it enforces mutual
192 * exclusion when examining the suitability of an AG in this
193 * loop, and it guards against two filestreams being established
194 * in the same AG as each other.
195 */
196 if (xfs_filestream_get_ag(mp, ag) > 1) {
197 xfs_filestream_put_ag(mp, ag);
198 goto next_ag;
199 }
200
201 longest = xfs_alloc_longest_free_extent(pag,
202 xfs_alloc_min_freelist(mp, pag),
203 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
204 if (((minlen && longest >= minlen) ||
205 (!minlen && pag->pagf_freeblks >= minfree)) &&
206 (!pag->pagf_metadata || !(flags & XFS_PICK_USERDATA) ||
207 (flags & XFS_PICK_LOWSPACE))) {
208
209 /* Break out, retaining the reference on the AG. */
210 free = pag->pagf_freeblks;
211 xfs_perag_put(pag);
212 *agp = ag;
213 break;
214 }
215
216 /* Drop the reference on this AG, it's not usable. */
217 xfs_filestream_put_ag(mp, ag);
218next_ag:
219 xfs_perag_put(pag);
220 /* Move to the next AG, wrapping to AG 0 if necessary. */
221 if (++ag >= mp->m_sb.sb_agcount)
222 ag = 0;
223
224 /* If a full pass of the AGs hasn't been done yet, continue. */
225 if (ag != startag)
226 continue;
227
228 /* Allow sleeping in xfs_alloc_pagf_init() on the 2nd pass. */
229 if (trylock != 0) {
230 trylock = 0;
231 continue;
232 }
233
234 /* Finally, if lowspace wasn't set, set it for the 3rd pass. */
235 if (!(flags & XFS_PICK_LOWSPACE)) {
236 flags |= XFS_PICK_LOWSPACE;
237 continue;
238 }
239
240 /*
241 * Take the AG with the most free space, regardless of whether
242 * it's already in use by another filestream.
243 */
244 if (max_ag != NULLAGNUMBER) {
245 xfs_filestream_get_ag(mp, max_ag);
246 free = maxfree;
247 *agp = max_ag;
248 break;
249 }
250
251 /* take AG 0 if none matched */
252 trace_xfs_filestream_pick(ip, *agp, free, nscan);
253 *agp = 0;
254 return 0;
255 }
256
257 trace_xfs_filestream_pick(ip, *agp, free, nscan);
258
259 if (*agp == NULLAGNUMBER)
260 return 0;
261
262 err = -ENOMEM;
263 item = kmem_alloc(sizeof(*item), KM_MAYFAIL);
264 if (!item)
265 goto out_put_ag;
266
267 item->ag = *agp;
268
269 err = xfs_mru_cache_insert(mp->m_filestream, ip->i_ino, &item->mru);
270 if (err) {
271 if (err == -EEXIST)
272 err = 0;
273 goto out_free_item;
274 }
275
276 return 0;
277
278out_free_item:
279 kmem_free(item);
280out_put_ag:
281 xfs_filestream_put_ag(mp, *agp);
282 return err;
283}
284
285static struct xfs_inode *
286xfs_filestream_get_parent(
287 struct xfs_inode *ip)
288{
289 struct inode *inode = VFS_I(ip), *dir = NULL;
290 struct dentry *dentry, *parent;
291
292 dentry = d_find_alias(inode);
293 if (!dentry)
294 goto out;
295
296 parent = dget_parent(dentry);
297 if (!parent)
298 goto out_dput;
299
300 dir = igrab(d_inode(parent));
301 dput(parent);
302
303out_dput:
304 dput(dentry);
305out:
306 return dir ? XFS_I(dir) : NULL;
307}
308
309/*
310 * Find the right allocation group for a file, either by finding an
311 * existing file stream or creating a new one.
312 *
313 * Returns NULLAGNUMBER in case of an error.
314 */
315xfs_agnumber_t
316xfs_filestream_lookup_ag(
317 struct xfs_inode *ip)
318{
319 struct xfs_mount *mp = ip->i_mount;
320 struct xfs_inode *pip = NULL;
321 xfs_agnumber_t startag, ag = NULLAGNUMBER;
322 struct xfs_mru_cache_elem *mru;
323
324 ASSERT(S_ISREG(VFS_I(ip)->i_mode));
325
326 pip = xfs_filestream_get_parent(ip);
327 if (!pip)
328 return NULLAGNUMBER;
329
330 mru = xfs_mru_cache_lookup(mp->m_filestream, pip->i_ino);
331 if (mru) {
332 ag = container_of(mru, struct xfs_fstrm_item, mru)->ag;
333 xfs_mru_cache_done(mp->m_filestream);
334
335 trace_xfs_filestream_lookup(mp, ip->i_ino, ag);
336 goto out;
337 }
338
339 /*
340 * Set the starting AG using the rotor for inode32, otherwise
341 * use the directory inode's AG.
342 */
343 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
344 xfs_agnumber_t rotorstep = xfs_rotorstep;
345 startag = (mp->m_agfrotor / rotorstep) % mp->m_sb.sb_agcount;
346 mp->m_agfrotor = (mp->m_agfrotor + 1) %
347 (mp->m_sb.sb_agcount * rotorstep);
348 } else
349 startag = XFS_INO_TO_AGNO(mp, pip->i_ino);
350
351 if (xfs_filestream_pick_ag(pip, startag, &ag, 0, 0))
352 ag = NULLAGNUMBER;
353out:
354 IRELE(pip);
355 return ag;
356}
357
358/*
359 * Pick a new allocation group for the current file and its file stream.
360 *
361 * This is called when the allocator can't find a suitable extent in the
362 * current AG, and we have to move the stream into a new AG with more space.
363 */
364int
365xfs_filestream_new_ag(
366 struct xfs_bmalloca *ap,
367 xfs_agnumber_t *agp)
368{
369 struct xfs_inode *ip = ap->ip, *pip;
370 struct xfs_mount *mp = ip->i_mount;
371 xfs_extlen_t minlen = ap->length;
372 xfs_agnumber_t startag = 0;
373 int flags = 0;
374 int err = 0;
375 struct xfs_mru_cache_elem *mru;
376
377 *agp = NULLAGNUMBER;
378
379 pip = xfs_filestream_get_parent(ip);
380 if (!pip)
381 goto exit;
382
383 mru = xfs_mru_cache_remove(mp->m_filestream, pip->i_ino);
384 if (mru) {
385 struct xfs_fstrm_item *item =
386 container_of(mru, struct xfs_fstrm_item, mru);
387 startag = (item->ag + 1) % mp->m_sb.sb_agcount;
388 }
389
390 if (xfs_alloc_is_userdata(ap->datatype))
391 flags |= XFS_PICK_USERDATA;
392 if (ap->dfops->dop_low)
393 flags |= XFS_PICK_LOWSPACE;
394
395 err = xfs_filestream_pick_ag(pip, startag, agp, flags, minlen);
396
397 /*
398 * Only free the item here so we skip over the old AG earlier.
399 */
400 if (mru)
401 xfs_fstrm_free_func(mp, mru);
402
403 IRELE(pip);
404exit:
405 if (*agp == NULLAGNUMBER)
406 *agp = 0;
407 return err;
408}
409
410void
411xfs_filestream_deassociate(
412 struct xfs_inode *ip)
413{
414 xfs_mru_cache_delete(ip->i_mount->m_filestream, ip->i_ino);
415}
416
417int
418xfs_filestream_mount(
419 xfs_mount_t *mp)
420{
421 /*
422 * The filestream timer tunable is currently fixed within the range of
423 * one second to four minutes, with five seconds being the default. The
424 * group count is somewhat arbitrary, but it'd be nice to adhere to the
425 * timer tunable to within about 10 percent. This requires at least 10
426 * groups.
427 */
428 return xfs_mru_cache_create(&mp->m_filestream, mp,
429 xfs_fstrm_centisecs * 10, 10, xfs_fstrm_free_func);
430}
431
432void
433xfs_filestream_unmount(
434 xfs_mount_t *mp)
435{
436 xfs_mru_cache_destroy(mp->m_filestream);
437}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2006-2007 Silicon Graphics, Inc.
4 * Copyright (c) 2014 Christoph Hellwig.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_sb.h"
13#include "xfs_mount.h"
14#include "xfs_inode.h"
15#include "xfs_bmap.h"
16#include "xfs_alloc.h"
17#include "xfs_mru_cache.h"
18#include "xfs_trace.h"
19#include "xfs_ag_resv.h"
20#include "xfs_trans.h"
21#include "xfs_filestream.h"
22
23struct xfs_fstrm_item {
24 struct xfs_mru_cache_elem mru;
25 xfs_agnumber_t ag; /* AG in use for this directory */
26};
27
28enum xfs_fstrm_alloc {
29 XFS_PICK_USERDATA = 1,
30 XFS_PICK_LOWSPACE = 2,
31};
32
33/*
34 * Allocation group filestream associations are tracked with per-ag atomic
35 * counters. These counters allow xfs_filestream_pick_ag() to tell whether a
36 * particular AG already has active filestreams associated with it. The mount
37 * point's m_peraglock is used to protect these counters from per-ag array
38 * re-allocation during a growfs operation. When xfs_growfs_data_private() is
39 * about to reallocate the array, it calls xfs_filestream_flush() with the
40 * m_peraglock held in write mode.
41 *
42 * Since xfs_mru_cache_flush() guarantees that all the free functions for all
43 * the cache elements have finished executing before it returns, it's safe for
44 * the free functions to use the atomic counters without m_peraglock protection.
45 * This allows the implementation of xfs_fstrm_free_func() to be agnostic about
46 * whether it was called with the m_peraglock held in read mode, write mode or
47 * not held at all. The race condition this addresses is the following:
48 *
49 * - The work queue scheduler fires and pulls a filestream directory cache
50 * element off the LRU end of the cache for deletion, then gets pre-empted.
51 * - A growfs operation grabs the m_peraglock in write mode, flushes all the
52 * remaining items from the cache and reallocates the mount point's per-ag
53 * array, resetting all the counters to zero.
54 * - The work queue thread resumes and calls the free function for the element
55 * it started cleaning up earlier. In the process it decrements the
56 * filestreams counter for an AG that now has no references.
57 *
58 * With a shrinkfs feature, the above scenario could panic the system.
59 *
60 * All other uses of the following macros should be protected by either the
61 * m_peraglock held in read mode, or the cache's internal locking exposed by the
62 * interval between a call to xfs_mru_cache_lookup() and a call to
63 * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode
64 * when new elements are added to the cache.
65 *
66 * Combined, these locking rules ensure that no associations will ever exist in
67 * the cache that reference per-ag array elements that have since been
68 * reallocated.
69 */
70int
71xfs_filestream_peek_ag(
72 xfs_mount_t *mp,
73 xfs_agnumber_t agno)
74{
75 struct xfs_perag *pag;
76 int ret;
77
78 pag = xfs_perag_get(mp, agno);
79 ret = atomic_read(&pag->pagf_fstrms);
80 xfs_perag_put(pag);
81 return ret;
82}
83
84static int
85xfs_filestream_get_ag(
86 xfs_mount_t *mp,
87 xfs_agnumber_t agno)
88{
89 struct xfs_perag *pag;
90 int ret;
91
92 pag = xfs_perag_get(mp, agno);
93 ret = atomic_inc_return(&pag->pagf_fstrms);
94 xfs_perag_put(pag);
95 return ret;
96}
97
98static void
99xfs_filestream_put_ag(
100 xfs_mount_t *mp,
101 xfs_agnumber_t agno)
102{
103 struct xfs_perag *pag;
104
105 pag = xfs_perag_get(mp, agno);
106 atomic_dec(&pag->pagf_fstrms);
107 xfs_perag_put(pag);
108}
109
110static void
111xfs_fstrm_free_func(
112 void *data,
113 struct xfs_mru_cache_elem *mru)
114{
115 struct xfs_mount *mp = data;
116 struct xfs_fstrm_item *item =
117 container_of(mru, struct xfs_fstrm_item, mru);
118
119 xfs_filestream_put_ag(mp, item->ag);
120 trace_xfs_filestream_free(mp, mru->key, item->ag);
121
122 kmem_free(item);
123}
124
125/*
126 * Scan the AGs starting at startag looking for an AG that isn't in use and has
127 * at least minlen blocks free.
128 */
129static int
130xfs_filestream_pick_ag(
131 struct xfs_inode *ip,
132 xfs_agnumber_t startag,
133 xfs_agnumber_t *agp,
134 int flags,
135 xfs_extlen_t minlen)
136{
137 struct xfs_mount *mp = ip->i_mount;
138 struct xfs_fstrm_item *item;
139 struct xfs_perag *pag;
140 xfs_extlen_t longest, free = 0, minfree, maxfree = 0;
141 xfs_agnumber_t ag, max_ag = NULLAGNUMBER;
142 int err, trylock, nscan;
143
144 ASSERT(S_ISDIR(VFS_I(ip)->i_mode));
145
146 /* 2% of an AG's blocks must be free for it to be chosen. */
147 minfree = mp->m_sb.sb_agblocks / 50;
148
149 ag = startag;
150 *agp = NULLAGNUMBER;
151
152 /* For the first pass, don't sleep trying to init the per-AG. */
153 trylock = XFS_ALLOC_FLAG_TRYLOCK;
154
155 for (nscan = 0; 1; nscan++) {
156 trace_xfs_filestream_scan(mp, ip->i_ino, ag);
157
158 pag = xfs_perag_get(mp, ag);
159
160 if (!pag->pagf_init) {
161 err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
162 if (err) {
163 xfs_perag_put(pag);
164 if (err != -EAGAIN)
165 return err;
166 /* Couldn't lock the AGF, skip this AG. */
167 continue;
168 }
169 }
170
171 /* Keep track of the AG with the most free blocks. */
172 if (pag->pagf_freeblks > maxfree) {
173 maxfree = pag->pagf_freeblks;
174 max_ag = ag;
175 }
176
177 /*
178 * The AG reference count does two things: it enforces mutual
179 * exclusion when examining the suitability of an AG in this
180 * loop, and it guards against two filestreams being established
181 * in the same AG as each other.
182 */
183 if (xfs_filestream_get_ag(mp, ag) > 1) {
184 xfs_filestream_put_ag(mp, ag);
185 goto next_ag;
186 }
187
188 longest = xfs_alloc_longest_free_extent(pag,
189 xfs_alloc_min_freelist(mp, pag),
190 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
191 if (((minlen && longest >= minlen) ||
192 (!minlen && pag->pagf_freeblks >= minfree)) &&
193 (!pag->pagf_metadata || !(flags & XFS_PICK_USERDATA) ||
194 (flags & XFS_PICK_LOWSPACE))) {
195
196 /* Break out, retaining the reference on the AG. */
197 free = pag->pagf_freeblks;
198 xfs_perag_put(pag);
199 *agp = ag;
200 break;
201 }
202
203 /* Drop the reference on this AG, it's not usable. */
204 xfs_filestream_put_ag(mp, ag);
205next_ag:
206 xfs_perag_put(pag);
207 /* Move to the next AG, wrapping to AG 0 if necessary. */
208 if (++ag >= mp->m_sb.sb_agcount)
209 ag = 0;
210
211 /* If a full pass of the AGs hasn't been done yet, continue. */
212 if (ag != startag)
213 continue;
214
215 /* Allow sleeping in xfs_alloc_pagf_init() on the 2nd pass. */
216 if (trylock != 0) {
217 trylock = 0;
218 continue;
219 }
220
221 /* Finally, if lowspace wasn't set, set it for the 3rd pass. */
222 if (!(flags & XFS_PICK_LOWSPACE)) {
223 flags |= XFS_PICK_LOWSPACE;
224 continue;
225 }
226
227 /*
228 * Take the AG with the most free space, regardless of whether
229 * it's already in use by another filestream.
230 */
231 if (max_ag != NULLAGNUMBER) {
232 xfs_filestream_get_ag(mp, max_ag);
233 free = maxfree;
234 *agp = max_ag;
235 break;
236 }
237
238 /* take AG 0 if none matched */
239 trace_xfs_filestream_pick(ip, *agp, free, nscan);
240 *agp = 0;
241 return 0;
242 }
243
244 trace_xfs_filestream_pick(ip, *agp, free, nscan);
245
246 if (*agp == NULLAGNUMBER)
247 return 0;
248
249 err = -ENOMEM;
250 item = kmem_alloc(sizeof(*item), KM_MAYFAIL);
251 if (!item)
252 goto out_put_ag;
253
254 item->ag = *agp;
255
256 err = xfs_mru_cache_insert(mp->m_filestream, ip->i_ino, &item->mru);
257 if (err) {
258 if (err == -EEXIST)
259 err = 0;
260 goto out_free_item;
261 }
262
263 return 0;
264
265out_free_item:
266 kmem_free(item);
267out_put_ag:
268 xfs_filestream_put_ag(mp, *agp);
269 return err;
270}
271
272static struct xfs_inode *
273xfs_filestream_get_parent(
274 struct xfs_inode *ip)
275{
276 struct inode *inode = VFS_I(ip), *dir = NULL;
277 struct dentry *dentry, *parent;
278
279 dentry = d_find_alias(inode);
280 if (!dentry)
281 goto out;
282
283 parent = dget_parent(dentry);
284 if (!parent)
285 goto out_dput;
286
287 dir = igrab(d_inode(parent));
288 dput(parent);
289
290out_dput:
291 dput(dentry);
292out:
293 return dir ? XFS_I(dir) : NULL;
294}
295
296/*
297 * Find the right allocation group for a file, either by finding an
298 * existing file stream or creating a new one.
299 *
300 * Returns NULLAGNUMBER in case of an error.
301 */
302xfs_agnumber_t
303xfs_filestream_lookup_ag(
304 struct xfs_inode *ip)
305{
306 struct xfs_mount *mp = ip->i_mount;
307 struct xfs_inode *pip = NULL;
308 xfs_agnumber_t startag, ag = NULLAGNUMBER;
309 struct xfs_mru_cache_elem *mru;
310
311 ASSERT(S_ISREG(VFS_I(ip)->i_mode));
312
313 pip = xfs_filestream_get_parent(ip);
314 if (!pip)
315 return NULLAGNUMBER;
316
317 mru = xfs_mru_cache_lookup(mp->m_filestream, pip->i_ino);
318 if (mru) {
319 ag = container_of(mru, struct xfs_fstrm_item, mru)->ag;
320 xfs_mru_cache_done(mp->m_filestream);
321
322 trace_xfs_filestream_lookup(mp, ip->i_ino, ag);
323 goto out;
324 }
325
326 /*
327 * Set the starting AG using the rotor for inode32, otherwise
328 * use the directory inode's AG.
329 */
330 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
331 xfs_agnumber_t rotorstep = xfs_rotorstep;
332 startag = (mp->m_agfrotor / rotorstep) % mp->m_sb.sb_agcount;
333 mp->m_agfrotor = (mp->m_agfrotor + 1) %
334 (mp->m_sb.sb_agcount * rotorstep);
335 } else
336 startag = XFS_INO_TO_AGNO(mp, pip->i_ino);
337
338 if (xfs_filestream_pick_ag(pip, startag, &ag, 0, 0))
339 ag = NULLAGNUMBER;
340out:
341 xfs_irele(pip);
342 return ag;
343}
344
345/*
346 * Pick a new allocation group for the current file and its file stream.
347 *
348 * This is called when the allocator can't find a suitable extent in the
349 * current AG, and we have to move the stream into a new AG with more space.
350 */
351int
352xfs_filestream_new_ag(
353 struct xfs_bmalloca *ap,
354 xfs_agnumber_t *agp)
355{
356 struct xfs_inode *ip = ap->ip, *pip;
357 struct xfs_mount *mp = ip->i_mount;
358 xfs_extlen_t minlen = ap->length;
359 xfs_agnumber_t startag = 0;
360 int flags = 0;
361 int err = 0;
362 struct xfs_mru_cache_elem *mru;
363
364 *agp = NULLAGNUMBER;
365
366 pip = xfs_filestream_get_parent(ip);
367 if (!pip)
368 goto exit;
369
370 mru = xfs_mru_cache_remove(mp->m_filestream, pip->i_ino);
371 if (mru) {
372 struct xfs_fstrm_item *item =
373 container_of(mru, struct xfs_fstrm_item, mru);
374 startag = (item->ag + 1) % mp->m_sb.sb_agcount;
375 }
376
377 if (ap->datatype & XFS_ALLOC_USERDATA)
378 flags |= XFS_PICK_USERDATA;
379 if (ap->tp->t_flags & XFS_TRANS_LOWMODE)
380 flags |= XFS_PICK_LOWSPACE;
381
382 err = xfs_filestream_pick_ag(pip, startag, agp, flags, minlen);
383
384 /*
385 * Only free the item here so we skip over the old AG earlier.
386 */
387 if (mru)
388 xfs_fstrm_free_func(mp, mru);
389
390 xfs_irele(pip);
391exit:
392 if (*agp == NULLAGNUMBER)
393 *agp = 0;
394 return err;
395}
396
397void
398xfs_filestream_deassociate(
399 struct xfs_inode *ip)
400{
401 xfs_mru_cache_delete(ip->i_mount->m_filestream, ip->i_ino);
402}
403
404int
405xfs_filestream_mount(
406 xfs_mount_t *mp)
407{
408 /*
409 * The filestream timer tunable is currently fixed within the range of
410 * one second to four minutes, with five seconds being the default. The
411 * group count is somewhat arbitrary, but it'd be nice to adhere to the
412 * timer tunable to within about 10 percent. This requires at least 10
413 * groups.
414 */
415 return xfs_mru_cache_create(&mp->m_filestream, mp,
416 xfs_fstrm_centisecs * 10, 10, xfs_fstrm_free_func);
417}
418
419void
420xfs_filestream_unmount(
421 xfs_mount_t *mp)
422{
423 xfs_mru_cache_destroy(mp->m_filestream);
424}