Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *	vfsv0 quota IO operations on file
  4 */
  5
  6#include <linux/errno.h>
  7#include <linux/fs.h>
  8#include <linux/mount.h>
  9#include <linux/dqblk_v2.h>
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/module.h>
 13#include <linux/slab.h>
 14#include <linux/quotaops.h>
 15
 16#include <asm/byteorder.h>
 17
 18#include "quota_tree.h"
 19
 20MODULE_AUTHOR("Jan Kara");
 21MODULE_DESCRIPTION("Quota trie support");
 22MODULE_LICENSE("GPL");
 23
 24/*
 25 * Maximum quota tree depth we support. Only to limit recursion when working
 26 * with the tree.
 27 */
 28#define MAX_QTREE_DEPTH 6
 29
 30#define __QUOTA_QT_PARANOIA
 31
 32static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
 33{
 34	unsigned int epb = info->dqi_usable_bs >> 2;
 35
 36	depth = info->dqi_qtree_depth - depth - 1;
 37	while (depth--)
 38		id /= epb;
 39	return id % epb;
 40}
 41
 42static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
 43{
 44	qid_t id = from_kqid(&init_user_ns, qid);
 45
 46	return __get_index(info, id, depth);
 47}
 48
 49/* Number of entries in one blocks */
 50static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
 51{
 52	return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
 53	       / info->dqi_entry_size;
 54}
 55
 
 
 
 
 
 
 
 
 
 56static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
 57{
 58	struct super_block *sb = info->dqi_sb;
 59
 60	memset(buf, 0, info->dqi_usable_bs);
 61	return sb->s_op->quota_read(sb, info->dqi_type, buf,
 62	       info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
 63}
 64
 65static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
 66{
 67	struct super_block *sb = info->dqi_sb;
 68	ssize_t ret;
 69
 70	ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
 71	       info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
 72	if (ret != info->dqi_usable_bs) {
 73		quota_error(sb, "dquota write failed");
 74		if (ret >= 0)
 75			ret = -EIO;
 76	}
 77	return ret;
 78}
 79
 80static inline int do_check_range(struct super_block *sb, const char *val_name,
 81				 uint val, uint min_val, uint max_val)
 82{
 83	if (val < min_val || val > max_val) {
 84		quota_error(sb, "Getting %s %u out of range %u-%u",
 85			    val_name, val, min_val, max_val);
 86		return -EUCLEAN;
 87	}
 88
 89	return 0;
 90}
 91
 92static int check_dquot_block_header(struct qtree_mem_dqinfo *info,
 93				    struct qt_disk_dqdbheader *dh)
 94{
 95	int err = 0;
 96
 97	err = do_check_range(info->dqi_sb, "dqdh_next_free",
 98			     le32_to_cpu(dh->dqdh_next_free), 0,
 99			     info->dqi_blocks - 1);
100	if (err)
101		return err;
102	err = do_check_range(info->dqi_sb, "dqdh_prev_free",
103			     le32_to_cpu(dh->dqdh_prev_free), 0,
104			     info->dqi_blocks - 1);
105	if (err)
106		return err;
107	err = do_check_range(info->dqi_sb, "dqdh_entries",
108			     le16_to_cpu(dh->dqdh_entries), 0,
109			     qtree_dqstr_in_blk(info));
110
111	return err;
112}
113
114/* Remove empty block from list and return it */
115static int get_free_dqblk(struct qtree_mem_dqinfo *info)
116{
117	char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
118	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
119	int ret, blk;
120
121	if (!buf)
122		return -ENOMEM;
123	if (info->dqi_free_blk) {
124		blk = info->dqi_free_blk;
125		ret = read_blk(info, blk, buf);
126		if (ret < 0)
127			goto out_buf;
128		ret = check_dquot_block_header(info, dh);
129		if (ret)
130			goto out_buf;
131		info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
132	}
133	else {
134		memset(buf, 0, info->dqi_usable_bs);
135		/* Assure block allocation... */
136		ret = write_blk(info, info->dqi_blocks, buf);
137		if (ret < 0)
138			goto out_buf;
139		blk = info->dqi_blocks++;
140	}
141	mark_info_dirty(info->dqi_sb, info->dqi_type);
142	ret = blk;
143out_buf:
144	kfree(buf);
145	return ret;
146}
147
148/* Insert empty block to the list */
149static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
150{
151	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
152	int err;
153
154	dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
155	dh->dqdh_prev_free = cpu_to_le32(0);
156	dh->dqdh_entries = cpu_to_le16(0);
157	err = write_blk(info, blk, buf);
158	if (err < 0)
159		return err;
160	info->dqi_free_blk = blk;
161	mark_info_dirty(info->dqi_sb, info->dqi_type);
162	return 0;
163}
164
165/* Remove given block from the list of blocks with free entries */
166static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
167			       uint blk)
168{
169	char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
170	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
171	uint nextblk = le32_to_cpu(dh->dqdh_next_free);
172	uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
173	int err;
174
175	if (!tmpbuf)
176		return -ENOMEM;
177	if (nextblk) {
178		err = read_blk(info, nextblk, tmpbuf);
179		if (err < 0)
180			goto out_buf;
181		((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
182							dh->dqdh_prev_free;
183		err = write_blk(info, nextblk, tmpbuf);
184		if (err < 0)
185			goto out_buf;
186	}
187	if (prevblk) {
188		err = read_blk(info, prevblk, tmpbuf);
189		if (err < 0)
190			goto out_buf;
191		((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
192							dh->dqdh_next_free;
193		err = write_blk(info, prevblk, tmpbuf);
194		if (err < 0)
195			goto out_buf;
196	} else {
197		info->dqi_free_entry = nextblk;
198		mark_info_dirty(info->dqi_sb, info->dqi_type);
199	}
200	kfree(tmpbuf);
201	dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
202	/* No matter whether write succeeds block is out of list */
203	if (write_blk(info, blk, buf) < 0)
204		quota_error(info->dqi_sb, "Can't write block (%u) "
205			    "with free entries", blk);
206	return 0;
207out_buf:
208	kfree(tmpbuf);
209	return err;
210}
211
212/* Insert given block to the beginning of list with free entries */
213static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
214			       uint blk)
215{
216	char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
217	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
218	int err;
219
220	if (!tmpbuf)
221		return -ENOMEM;
222	dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
223	dh->dqdh_prev_free = cpu_to_le32(0);
224	err = write_blk(info, blk, buf);
225	if (err < 0)
226		goto out_buf;
227	if (info->dqi_free_entry) {
228		err = read_blk(info, info->dqi_free_entry, tmpbuf);
229		if (err < 0)
230			goto out_buf;
231		((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
232							cpu_to_le32(blk);
233		err = write_blk(info, info->dqi_free_entry, tmpbuf);
234		if (err < 0)
235			goto out_buf;
236	}
237	kfree(tmpbuf);
238	info->dqi_free_entry = blk;
239	mark_info_dirty(info->dqi_sb, info->dqi_type);
240	return 0;
241out_buf:
242	kfree(tmpbuf);
243	return err;
244}
245
246/* Is the entry in the block free? */
247int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
248{
249	int i;
250
251	for (i = 0; i < info->dqi_entry_size; i++)
252		if (disk[i])
253			return 0;
254	return 1;
255}
256EXPORT_SYMBOL(qtree_entry_unused);
257
258/* Find space for dquot */
259static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
260			      struct dquot *dquot, int *err)
261{
262	uint blk, i;
263	struct qt_disk_dqdbheader *dh;
264	char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
265	char *ddquot;
266
267	*err = 0;
268	if (!buf) {
269		*err = -ENOMEM;
270		return 0;
271	}
272	dh = (struct qt_disk_dqdbheader *)buf;
273	if (info->dqi_free_entry) {
274		blk = info->dqi_free_entry;
275		*err = read_blk(info, blk, buf);
276		if (*err < 0)
277			goto out_buf;
278		*err = check_dquot_block_header(info, dh);
279		if (*err)
280			goto out_buf;
281	} else {
282		blk = get_free_dqblk(info);
283		if ((int)blk < 0) {
284			*err = blk;
285			kfree(buf);
286			return 0;
287		}
288		memset(buf, 0, info->dqi_usable_bs);
289		/* This is enough as the block is already zeroed and the entry
290		 * list is empty... */
291		info->dqi_free_entry = blk;
292		mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
293	}
294	/* Block will be full? */
295	if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
296		*err = remove_free_dqentry(info, buf, blk);
297		if (*err < 0) {
298			quota_error(dquot->dq_sb, "Can't remove block (%u) "
299				    "from entry free list", blk);
300			goto out_buf;
301		}
302	}
303	le16_add_cpu(&dh->dqdh_entries, 1);
304	/* Find free structure in block */
305	ddquot = buf + sizeof(struct qt_disk_dqdbheader);
306	for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
307		if (qtree_entry_unused(info, ddquot))
308			break;
309		ddquot += info->dqi_entry_size;
310	}
311#ifdef __QUOTA_QT_PARANOIA
312	if (i == qtree_dqstr_in_blk(info)) {
313		quota_error(dquot->dq_sb, "Data block full but it shouldn't");
314		*err = -EIO;
315		goto out_buf;
316	}
317#endif
318	*err = write_blk(info, blk, buf);
319	if (*err < 0) {
320		quota_error(dquot->dq_sb, "Can't write quota data block %u",
321			    blk);
322		goto out_buf;
323	}
324	dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
325			sizeof(struct qt_disk_dqdbheader) +
326			i * info->dqi_entry_size;
327	kfree(buf);
328	return blk;
329out_buf:
330	kfree(buf);
331	return 0;
332}
333
334/* Insert reference to structure into the trie */
335static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
336			  uint *blks, int depth)
337{
338	char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
339	int ret = 0, newson = 0, newact = 0;
340	__le32 *ref;
341	uint newblk;
342	int i;
343
344	if (!buf)
345		return -ENOMEM;
346	if (!blks[depth]) {
347		ret = get_free_dqblk(info);
348		if (ret < 0)
349			goto out_buf;
350		for (i = 0; i < depth; i++)
351			if (ret == blks[i]) {
352				quota_error(dquot->dq_sb,
353					"Free block already used in tree: block %u",
354					ret);
355				ret = -EIO;
356				goto out_buf;
357			}
358		blks[depth] = ret;
359		memset(buf, 0, info->dqi_usable_bs);
360		newact = 1;
361	} else {
362		ret = read_blk(info, blks[depth], buf);
363		if (ret < 0) {
364			quota_error(dquot->dq_sb, "Can't read tree quota "
365				    "block %u", blks[depth]);
366			goto out_buf;
367		}
368	}
369	ref = (__le32 *)buf;
370	newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
371	ret = do_check_range(dquot->dq_sb, "block", newblk, 0,
372			     info->dqi_blocks - 1);
373	if (ret)
374		goto out_buf;
375	if (!newblk) {
376		newson = 1;
377	} else {
378		for (i = 0; i <= depth; i++)
379			if (newblk == blks[i]) {
380				quota_error(dquot->dq_sb,
381					"Cycle in quota tree detected: block %u index %u",
382					blks[depth],
383					get_index(info, dquot->dq_id, depth));
384				ret = -EIO;
385				goto out_buf;
386			}
387	}
388	blks[depth + 1] = newblk;
389	if (depth == info->dqi_qtree_depth - 1) {
390#ifdef __QUOTA_QT_PARANOIA
391		if (newblk) {
392			quota_error(dquot->dq_sb, "Inserting already present "
393				    "quota entry (block %u)",
394				    le32_to_cpu(ref[get_index(info,
395						dquot->dq_id, depth)]));
396			ret = -EIO;
397			goto out_buf;
398		}
399#endif
400		blks[depth + 1] = find_free_dqentry(info, dquot, &ret);
401	} else {
402		ret = do_insert_tree(info, dquot, blks, depth + 1);
403	}
404	if (newson && ret >= 0) {
405		ref[get_index(info, dquot->dq_id, depth)] =
406						cpu_to_le32(blks[depth + 1]);
407		ret = write_blk(info, blks[depth], buf);
408	} else if (newact && ret < 0) {
409		put_free_dqblk(info, buf, blks[depth]);
410	}
411out_buf:
412	kfree(buf);
413	return ret;
414}
415
416/* Wrapper for inserting quota structure into tree */
417static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
418				 struct dquot *dquot)
419{
420	uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
421
422#ifdef __QUOTA_QT_PARANOIA
423	if (info->dqi_blocks <= QT_TREEOFF) {
424		quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
425		return -EIO;
426	}
427#endif
428	if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
429		quota_error(dquot->dq_sb, "Quota tree depth too big!");
430		return -EIO;
431	}
432	return do_insert_tree(info, dquot, blks, 0);
433}
434
435/*
436 * We don't have to be afraid of deadlocks as we never have quotas on quota
437 * files...
438 */
439int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
440{
441	int type = dquot->dq_id.type;
442	struct super_block *sb = dquot->dq_sb;
443	ssize_t ret;
444	char *ddquot = kmalloc(info->dqi_entry_size, GFP_KERNEL);
445
446	if (!ddquot)
447		return -ENOMEM;
448
449	/* dq_off is guarded by dqio_sem */
450	if (!dquot->dq_off) {
451		ret = dq_insert_tree(info, dquot);
452		if (ret < 0) {
453			quota_error(sb, "Error %zd occurred while creating "
454				    "quota", ret);
455			kfree(ddquot);
456			return ret;
457		}
458	}
459	spin_lock(&dquot->dq_dqb_lock);
460	info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
461	spin_unlock(&dquot->dq_dqb_lock);
462	ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
463				    dquot->dq_off);
464	if (ret != info->dqi_entry_size) {
465		quota_error(sb, "dquota write failed");
466		if (ret >= 0)
467			ret = -ENOSPC;
468	} else {
469		ret = 0;
470	}
471	dqstats_inc(DQST_WRITES);
472	kfree(ddquot);
473
474	return ret;
475}
476EXPORT_SYMBOL(qtree_write_dquot);
477
478/* Free dquot entry in data block */
479static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
480			uint blk)
481{
482	struct qt_disk_dqdbheader *dh;
483	char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
484	int ret = 0;
485
486	if (!buf)
487		return -ENOMEM;
488	if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
489		quota_error(dquot->dq_sb, "Quota structure has offset to "
490			"other block (%u) than it should (%u)", blk,
491			(uint)(dquot->dq_off >> info->dqi_blocksize_bits));
492		ret = -EIO;
493		goto out_buf;
494	}
495	ret = read_blk(info, blk, buf);
496	if (ret < 0) {
497		quota_error(dquot->dq_sb, "Can't read quota data block %u",
498			    blk);
499		goto out_buf;
500	}
501	dh = (struct qt_disk_dqdbheader *)buf;
502	ret = check_dquot_block_header(info, dh);
503	if (ret)
504		goto out_buf;
505	le16_add_cpu(&dh->dqdh_entries, -1);
506	if (!le16_to_cpu(dh->dqdh_entries)) {	/* Block got free? */
507		ret = remove_free_dqentry(info, buf, blk);
508		if (ret >= 0)
509			ret = put_free_dqblk(info, buf, blk);
510		if (ret < 0) {
511			quota_error(dquot->dq_sb, "Can't move quota data block "
512				    "(%u) to free list", blk);
513			goto out_buf;
514		}
515	} else {
516		memset(buf +
517		       (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
518		       0, info->dqi_entry_size);
519		if (le16_to_cpu(dh->dqdh_entries) ==
520		    qtree_dqstr_in_blk(info) - 1) {
521			/* Insert will write block itself */
522			ret = insert_free_dqentry(info, buf, blk);
523			if (ret < 0) {
524				quota_error(dquot->dq_sb, "Can't insert quota "
525				    "data block (%u) to free entry list", blk);
526				goto out_buf;
527			}
528		} else {
529			ret = write_blk(info, blk, buf);
530			if (ret < 0) {
531				quota_error(dquot->dq_sb, "Can't write quota "
532					    "data block %u", blk);
533				goto out_buf;
534			}
535		}
536	}
537	dquot->dq_off = 0;	/* Quota is now unattached */
538out_buf:
539	kfree(buf);
540	return ret;
541}
542
543/* Remove reference to dquot from tree */
544static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
545		       uint *blks, int depth)
546{
547	char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
548	int ret = 0;
549	uint newblk;
550	__le32 *ref = (__le32 *)buf;
551	int i;
552
553	if (!buf)
554		return -ENOMEM;
555	ret = read_blk(info, blks[depth], buf);
556	if (ret < 0) {
557		quota_error(dquot->dq_sb, "Can't read quota data block %u",
558			    blks[depth]);
559		goto out_buf;
560	}
561	newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
562	ret = do_check_range(dquot->dq_sb, "block", newblk, QT_TREEOFF,
563			     info->dqi_blocks - 1);
564	if (ret)
565		goto out_buf;
566
567	for (i = 0; i <= depth; i++)
568		if (newblk == blks[i]) {
569			quota_error(dquot->dq_sb,
570				"Cycle in quota tree detected: block %u index %u",
571				blks[depth],
572				get_index(info, dquot->dq_id, depth));
573			ret = -EIO;
574			goto out_buf;
575		}
576	if (depth == info->dqi_qtree_depth - 1) {
577		ret = free_dqentry(info, dquot, newblk);
578		blks[depth + 1] = 0;
579	} else {
580		blks[depth + 1] = newblk;
581		ret = remove_tree(info, dquot, blks, depth + 1);
582	}
583	if (ret >= 0 && !blks[depth + 1]) {
 
584		ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
585		/* Block got empty? */
586		for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
587			;
588		/* Don't put the root block into the free block list */
589		if (i == (info->dqi_usable_bs >> 2)
590		    && blks[depth] != QT_TREEOFF) {
591			put_free_dqblk(info, buf, blks[depth]);
592			blks[depth] = 0;
593		} else {
594			ret = write_blk(info, blks[depth], buf);
595			if (ret < 0)
596				quota_error(dquot->dq_sb,
597					    "Can't write quota tree block %u",
598					    blks[depth]);
599		}
600	}
601out_buf:
602	kfree(buf);
603	return ret;
604}
605
606/* Delete dquot from tree */
607int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
608{
609	uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
610
611	if (!dquot->dq_off)	/* Even not allocated? */
612		return 0;
613	if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
614		quota_error(dquot->dq_sb, "Quota tree depth too big!");
615		return -EIO;
616	}
617	return remove_tree(info, dquot, blks, 0);
618}
619EXPORT_SYMBOL(qtree_delete_dquot);
620
621/* Find entry in block */
622static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
623				 struct dquot *dquot, uint blk)
624{
625	char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
626	loff_t ret = 0;
627	int i;
628	char *ddquot;
629
630	if (!buf)
631		return -ENOMEM;
632	ret = read_blk(info, blk, buf);
633	if (ret < 0) {
634		quota_error(dquot->dq_sb, "Can't read quota tree "
635			    "block %u", blk);
636		goto out_buf;
637	}
638	ddquot = buf + sizeof(struct qt_disk_dqdbheader);
639	for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
640		if (info->dqi_ops->is_id(ddquot, dquot))
641			break;
642		ddquot += info->dqi_entry_size;
643	}
644	if (i == qtree_dqstr_in_blk(info)) {
645		quota_error(dquot->dq_sb,
646			    "Quota for id %u referenced but not present",
647			    from_kqid(&init_user_ns, dquot->dq_id));
648		ret = -EIO;
649		goto out_buf;
650	} else {
651		ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
652		  qt_disk_dqdbheader) + i * info->dqi_entry_size;
653	}
654out_buf:
655	kfree(buf);
656	return ret;
657}
658
659/* Find entry for given id in the tree */
660static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
661				struct dquot *dquot, uint *blks, int depth)
662{
663	char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
664	loff_t ret = 0;
665	__le32 *ref = (__le32 *)buf;
666	uint blk;
667	int i;
668
669	if (!buf)
670		return -ENOMEM;
671	ret = read_blk(info, blks[depth], buf);
672	if (ret < 0) {
673		quota_error(dquot->dq_sb, "Can't read quota tree block %u",
674			    blks[depth]);
675		goto out_buf;
676	}
677	ret = 0;
678	blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
679	if (!blk)	/* No reference? */
680		goto out_buf;
681	ret = do_check_range(dquot->dq_sb, "block", blk, QT_TREEOFF,
682			     info->dqi_blocks - 1);
683	if (ret)
684		goto out_buf;
685
686	/* Check for cycles in the tree */
687	for (i = 0; i <= depth; i++)
688		if (blk == blks[i]) {
689			quota_error(dquot->dq_sb,
690				"Cycle in quota tree detected: block %u index %u",
691				blks[depth],
692				get_index(info, dquot->dq_id, depth));
693			ret = -EIO;
694			goto out_buf;
695		}
696	blks[depth + 1] = blk;
697	if (depth < info->dqi_qtree_depth - 1)
698		ret = find_tree_dqentry(info, dquot, blks, depth + 1);
699	else
700		ret = find_block_dqentry(info, dquot, blk);
701out_buf:
702	kfree(buf);
703	return ret;
704}
705
706/* Find entry for given id in the tree - wrapper function */
707static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
708				  struct dquot *dquot)
709{
710	uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
711
712	if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
713		quota_error(dquot->dq_sb, "Quota tree depth too big!");
714		return -EIO;
715	}
716	return find_tree_dqentry(info, dquot, blks, 0);
717}
718
719int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
720{
721	int type = dquot->dq_id.type;
722	struct super_block *sb = dquot->dq_sb;
723	loff_t offset;
724	char *ddquot;
725	int ret = 0;
726
727#ifdef __QUOTA_QT_PARANOIA
728	/* Invalidated quota? */
729	if (!sb_dqopt(dquot->dq_sb)->files[type]) {
730		quota_error(sb, "Quota invalidated while reading!");
731		return -EIO;
732	}
733#endif
734	/* Do we know offset of the dquot entry in the quota file? */
735	if (!dquot->dq_off) {
736		offset = find_dqentry(info, dquot);
737		if (offset <= 0) {	/* Entry not present? */
738			if (offset < 0)
739				quota_error(sb,"Can't read quota structure "
740					    "for id %u",
741					    from_kqid(&init_user_ns,
742						      dquot->dq_id));
743			dquot->dq_off = 0;
744			set_bit(DQ_FAKE_B, &dquot->dq_flags);
745			memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
746			ret = offset;
747			goto out;
748		}
749		dquot->dq_off = offset;
750	}
751	ddquot = kmalloc(info->dqi_entry_size, GFP_KERNEL);
752	if (!ddquot)
753		return -ENOMEM;
754	ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
755				   dquot->dq_off);
756	if (ret != info->dqi_entry_size) {
757		if (ret >= 0)
758			ret = -EIO;
759		quota_error(sb, "Error while reading quota structure for id %u",
760			    from_kqid(&init_user_ns, dquot->dq_id));
761		set_bit(DQ_FAKE_B, &dquot->dq_flags);
762		memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
763		kfree(ddquot);
764		goto out;
765	}
766	spin_lock(&dquot->dq_dqb_lock);
767	info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
768	if (!dquot->dq_dqb.dqb_bhardlimit &&
769	    !dquot->dq_dqb.dqb_bsoftlimit &&
770	    !dquot->dq_dqb.dqb_ihardlimit &&
771	    !dquot->dq_dqb.dqb_isoftlimit)
772		set_bit(DQ_FAKE_B, &dquot->dq_flags);
773	spin_unlock(&dquot->dq_dqb_lock);
774	kfree(ddquot);
775out:
776	dqstats_inc(DQST_READS);
777	return ret;
778}
779EXPORT_SYMBOL(qtree_read_dquot);
780
781/* Check whether dquot should not be deleted. We know we are
782 * the only one operating on dquot (thanks to dq_lock) */
783int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
784{
785	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
786	    !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
787		return qtree_delete_dquot(info, dquot);
788	return 0;
789}
790EXPORT_SYMBOL(qtree_release_dquot);
791
792static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
793			unsigned int blk, int depth)
794{
795	char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
796	__le32 *ref = (__le32 *)buf;
797	ssize_t ret;
798	unsigned int epb = info->dqi_usable_bs >> 2;
799	unsigned int level_inc = 1;
800	int i;
801
802	if (!buf)
803		return -ENOMEM;
804
805	for (i = depth; i < info->dqi_qtree_depth - 1; i++)
806		level_inc *= epb;
807
808	ret = read_blk(info, blk, buf);
809	if (ret < 0) {
810		quota_error(info->dqi_sb,
811			    "Can't read quota tree block %u", blk);
812		goto out_buf;
813	}
814	for (i = __get_index(info, *id, depth); i < epb; i++) {
815		uint blk_no = le32_to_cpu(ref[i]);
816
817		if (blk_no == 0) {
818			*id += level_inc;
819			continue;
820		}
821		ret = do_check_range(info->dqi_sb, "block", blk_no, 0,
822				     info->dqi_blocks - 1);
823		if (ret)
824			goto out_buf;
825		if (depth == info->dqi_qtree_depth - 1) {
826			ret = 0;
827			goto out_buf;
828		}
829		ret = find_next_id(info, id, blk_no, depth + 1);
830		if (ret != -ENOENT)
831			break;
832	}
833	if (i == epb) {
834		ret = -ENOENT;
835		goto out_buf;
836	}
837out_buf:
838	kfree(buf);
839	return ret;
840}
841
842int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
843{
844	qid_t id = from_kqid(&init_user_ns, *qid);
845	int ret;
846
847	ret = find_next_id(info, &id, QT_TREEOFF, 0);
848	if (ret < 0)
849		return ret;
850	*qid = make_kqid(&init_user_ns, qid->type, id);
851	return 0;
852}
853EXPORT_SYMBOL(qtree_get_next_id);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *	vfsv0 quota IO operations on file
  4 */
  5
  6#include <linux/errno.h>
  7#include <linux/fs.h>
  8#include <linux/mount.h>
  9#include <linux/dqblk_v2.h>
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/module.h>
 13#include <linux/slab.h>
 14#include <linux/quotaops.h>
 15
 16#include <asm/byteorder.h>
 17
 18#include "quota_tree.h"
 19
 20MODULE_AUTHOR("Jan Kara");
 21MODULE_DESCRIPTION("Quota trie support");
 22MODULE_LICENSE("GPL");
 23
 
 
 
 
 
 
 24#define __QUOTA_QT_PARANOIA
 25
 26static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
 27{
 28	unsigned int epb = info->dqi_usable_bs >> 2;
 29
 30	depth = info->dqi_qtree_depth - depth - 1;
 31	while (depth--)
 32		id /= epb;
 33	return id % epb;
 34}
 35
 36static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
 37{
 38	qid_t id = from_kqid(&init_user_ns, qid);
 39
 40	return __get_index(info, id, depth);
 41}
 42
 43/* Number of entries in one blocks */
 44static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
 45{
 46	return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
 47	       / info->dqi_entry_size;
 48}
 49
 50static char *getdqbuf(size_t size)
 51{
 52	char *buf = kmalloc(size, GFP_NOFS);
 53	if (!buf)
 54		printk(KERN_WARNING
 55		       "VFS: Not enough memory for quota buffers.\n");
 56	return buf;
 57}
 58
 59static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
 60{
 61	struct super_block *sb = info->dqi_sb;
 62
 63	memset(buf, 0, info->dqi_usable_bs);
 64	return sb->s_op->quota_read(sb, info->dqi_type, buf,
 65	       info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
 66}
 67
 68static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
 69{
 70	struct super_block *sb = info->dqi_sb;
 71	ssize_t ret;
 72
 73	ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
 74	       info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
 75	if (ret != info->dqi_usable_bs) {
 76		quota_error(sb, "dquota write failed");
 77		if (ret >= 0)
 78			ret = -EIO;
 79	}
 80	return ret;
 81}
 82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 83/* Remove empty block from list and return it */
 84static int get_free_dqblk(struct qtree_mem_dqinfo *info)
 85{
 86	char *buf = getdqbuf(info->dqi_usable_bs);
 87	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
 88	int ret, blk;
 89
 90	if (!buf)
 91		return -ENOMEM;
 92	if (info->dqi_free_blk) {
 93		blk = info->dqi_free_blk;
 94		ret = read_blk(info, blk, buf);
 95		if (ret < 0)
 96			goto out_buf;
 
 
 
 97		info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
 98	}
 99	else {
100		memset(buf, 0, info->dqi_usable_bs);
101		/* Assure block allocation... */
102		ret = write_blk(info, info->dqi_blocks, buf);
103		if (ret < 0)
104			goto out_buf;
105		blk = info->dqi_blocks++;
106	}
107	mark_info_dirty(info->dqi_sb, info->dqi_type);
108	ret = blk;
109out_buf:
110	kfree(buf);
111	return ret;
112}
113
114/* Insert empty block to the list */
115static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
116{
117	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
118	int err;
119
120	dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
121	dh->dqdh_prev_free = cpu_to_le32(0);
122	dh->dqdh_entries = cpu_to_le16(0);
123	err = write_blk(info, blk, buf);
124	if (err < 0)
125		return err;
126	info->dqi_free_blk = blk;
127	mark_info_dirty(info->dqi_sb, info->dqi_type);
128	return 0;
129}
130
131/* Remove given block from the list of blocks with free entries */
132static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
133			       uint blk)
134{
135	char *tmpbuf = getdqbuf(info->dqi_usable_bs);
136	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
137	uint nextblk = le32_to_cpu(dh->dqdh_next_free);
138	uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
139	int err;
140
141	if (!tmpbuf)
142		return -ENOMEM;
143	if (nextblk) {
144		err = read_blk(info, nextblk, tmpbuf);
145		if (err < 0)
146			goto out_buf;
147		((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
148							dh->dqdh_prev_free;
149		err = write_blk(info, nextblk, tmpbuf);
150		if (err < 0)
151			goto out_buf;
152	}
153	if (prevblk) {
154		err = read_blk(info, prevblk, tmpbuf);
155		if (err < 0)
156			goto out_buf;
157		((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
158							dh->dqdh_next_free;
159		err = write_blk(info, prevblk, tmpbuf);
160		if (err < 0)
161			goto out_buf;
162	} else {
163		info->dqi_free_entry = nextblk;
164		mark_info_dirty(info->dqi_sb, info->dqi_type);
165	}
166	kfree(tmpbuf);
167	dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
168	/* No matter whether write succeeds block is out of list */
169	if (write_blk(info, blk, buf) < 0)
170		quota_error(info->dqi_sb, "Can't write block (%u) "
171			    "with free entries", blk);
172	return 0;
173out_buf:
174	kfree(tmpbuf);
175	return err;
176}
177
178/* Insert given block to the beginning of list with free entries */
179static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
180			       uint blk)
181{
182	char *tmpbuf = getdqbuf(info->dqi_usable_bs);
183	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
184	int err;
185
186	if (!tmpbuf)
187		return -ENOMEM;
188	dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
189	dh->dqdh_prev_free = cpu_to_le32(0);
190	err = write_blk(info, blk, buf);
191	if (err < 0)
192		goto out_buf;
193	if (info->dqi_free_entry) {
194		err = read_blk(info, info->dqi_free_entry, tmpbuf);
195		if (err < 0)
196			goto out_buf;
197		((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
198							cpu_to_le32(blk);
199		err = write_blk(info, info->dqi_free_entry, tmpbuf);
200		if (err < 0)
201			goto out_buf;
202	}
203	kfree(tmpbuf);
204	info->dqi_free_entry = blk;
205	mark_info_dirty(info->dqi_sb, info->dqi_type);
206	return 0;
207out_buf:
208	kfree(tmpbuf);
209	return err;
210}
211
212/* Is the entry in the block free? */
213int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
214{
215	int i;
216
217	for (i = 0; i < info->dqi_entry_size; i++)
218		if (disk[i])
219			return 0;
220	return 1;
221}
222EXPORT_SYMBOL(qtree_entry_unused);
223
224/* Find space for dquot */
225static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
226			      struct dquot *dquot, int *err)
227{
228	uint blk, i;
229	struct qt_disk_dqdbheader *dh;
230	char *buf = getdqbuf(info->dqi_usable_bs);
231	char *ddquot;
232
233	*err = 0;
234	if (!buf) {
235		*err = -ENOMEM;
236		return 0;
237	}
238	dh = (struct qt_disk_dqdbheader *)buf;
239	if (info->dqi_free_entry) {
240		blk = info->dqi_free_entry;
241		*err = read_blk(info, blk, buf);
242		if (*err < 0)
243			goto out_buf;
 
 
 
244	} else {
245		blk = get_free_dqblk(info);
246		if ((int)blk < 0) {
247			*err = blk;
248			kfree(buf);
249			return 0;
250		}
251		memset(buf, 0, info->dqi_usable_bs);
252		/* This is enough as the block is already zeroed and the entry
253		 * list is empty... */
254		info->dqi_free_entry = blk;
255		mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
256	}
257	/* Block will be full? */
258	if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
259		*err = remove_free_dqentry(info, buf, blk);
260		if (*err < 0) {
261			quota_error(dquot->dq_sb, "Can't remove block (%u) "
262				    "from entry free list", blk);
263			goto out_buf;
264		}
265	}
266	le16_add_cpu(&dh->dqdh_entries, 1);
267	/* Find free structure in block */
268	ddquot = buf + sizeof(struct qt_disk_dqdbheader);
269	for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
270		if (qtree_entry_unused(info, ddquot))
271			break;
272		ddquot += info->dqi_entry_size;
273	}
274#ifdef __QUOTA_QT_PARANOIA
275	if (i == qtree_dqstr_in_blk(info)) {
276		quota_error(dquot->dq_sb, "Data block full but it shouldn't");
277		*err = -EIO;
278		goto out_buf;
279	}
280#endif
281	*err = write_blk(info, blk, buf);
282	if (*err < 0) {
283		quota_error(dquot->dq_sb, "Can't write quota data block %u",
284			    blk);
285		goto out_buf;
286	}
287	dquot->dq_off = (blk << info->dqi_blocksize_bits) +
288			sizeof(struct qt_disk_dqdbheader) +
289			i * info->dqi_entry_size;
290	kfree(buf);
291	return blk;
292out_buf:
293	kfree(buf);
294	return 0;
295}
296
297/* Insert reference to structure into the trie */
298static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
299			  uint *treeblk, int depth)
300{
301	char *buf = getdqbuf(info->dqi_usable_bs);
302	int ret = 0, newson = 0, newact = 0;
303	__le32 *ref;
304	uint newblk;
 
305
306	if (!buf)
307		return -ENOMEM;
308	if (!*treeblk) {
309		ret = get_free_dqblk(info);
310		if (ret < 0)
311			goto out_buf;
312		*treeblk = ret;
 
 
 
 
 
 
 
 
313		memset(buf, 0, info->dqi_usable_bs);
314		newact = 1;
315	} else {
316		ret = read_blk(info, *treeblk, buf);
317		if (ret < 0) {
318			quota_error(dquot->dq_sb, "Can't read tree quota "
319				    "block %u", *treeblk);
320			goto out_buf;
321		}
322	}
323	ref = (__le32 *)buf;
324	newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
325	if (!newblk)
 
 
 
 
326		newson = 1;
 
 
 
 
 
 
 
 
 
 
 
 
327	if (depth == info->dqi_qtree_depth - 1) {
328#ifdef __QUOTA_QT_PARANOIA
329		if (newblk) {
330			quota_error(dquot->dq_sb, "Inserting already present "
331				    "quota entry (block %u)",
332				    le32_to_cpu(ref[get_index(info,
333						dquot->dq_id, depth)]));
334			ret = -EIO;
335			goto out_buf;
336		}
337#endif
338		newblk = find_free_dqentry(info, dquot, &ret);
339	} else {
340		ret = do_insert_tree(info, dquot, &newblk, depth+1);
341	}
342	if (newson && ret >= 0) {
343		ref[get_index(info, dquot->dq_id, depth)] =
344							cpu_to_le32(newblk);
345		ret = write_blk(info, *treeblk, buf);
346	} else if (newact && ret < 0) {
347		put_free_dqblk(info, buf, *treeblk);
348	}
349out_buf:
350	kfree(buf);
351	return ret;
352}
353
354/* Wrapper for inserting quota structure into tree */
355static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
356				 struct dquot *dquot)
357{
358	int tmp = QT_TREEOFF;
359
360#ifdef __QUOTA_QT_PARANOIA
361	if (info->dqi_blocks <= QT_TREEOFF) {
362		quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
363		return -EIO;
364	}
365#endif
366	return do_insert_tree(info, dquot, &tmp, 0);
 
 
 
 
367}
368
369/*
370 * We don't have to be afraid of deadlocks as we never have quotas on quota
371 * files...
372 */
373int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
374{
375	int type = dquot->dq_id.type;
376	struct super_block *sb = dquot->dq_sb;
377	ssize_t ret;
378	char *ddquot = getdqbuf(info->dqi_entry_size);
379
380	if (!ddquot)
381		return -ENOMEM;
382
383	/* dq_off is guarded by dqio_sem */
384	if (!dquot->dq_off) {
385		ret = dq_insert_tree(info, dquot);
386		if (ret < 0) {
387			quota_error(sb, "Error %zd occurred while creating "
388				    "quota", ret);
389			kfree(ddquot);
390			return ret;
391		}
392	}
393	spin_lock(&dquot->dq_dqb_lock);
394	info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
395	spin_unlock(&dquot->dq_dqb_lock);
396	ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
397				    dquot->dq_off);
398	if (ret != info->dqi_entry_size) {
399		quota_error(sb, "dquota write failed");
400		if (ret >= 0)
401			ret = -ENOSPC;
402	} else {
403		ret = 0;
404	}
405	dqstats_inc(DQST_WRITES);
406	kfree(ddquot);
407
408	return ret;
409}
410EXPORT_SYMBOL(qtree_write_dquot);
411
412/* Free dquot entry in data block */
413static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
414			uint blk)
415{
416	struct qt_disk_dqdbheader *dh;
417	char *buf = getdqbuf(info->dqi_usable_bs);
418	int ret = 0;
419
420	if (!buf)
421		return -ENOMEM;
422	if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
423		quota_error(dquot->dq_sb, "Quota structure has offset to "
424			"other block (%u) than it should (%u)", blk,
425			(uint)(dquot->dq_off >> info->dqi_blocksize_bits));
 
426		goto out_buf;
427	}
428	ret = read_blk(info, blk, buf);
429	if (ret < 0) {
430		quota_error(dquot->dq_sb, "Can't read quota data block %u",
431			    blk);
432		goto out_buf;
433	}
434	dh = (struct qt_disk_dqdbheader *)buf;
 
 
 
435	le16_add_cpu(&dh->dqdh_entries, -1);
436	if (!le16_to_cpu(dh->dqdh_entries)) {	/* Block got free? */
437		ret = remove_free_dqentry(info, buf, blk);
438		if (ret >= 0)
439			ret = put_free_dqblk(info, buf, blk);
440		if (ret < 0) {
441			quota_error(dquot->dq_sb, "Can't move quota data block "
442				    "(%u) to free list", blk);
443			goto out_buf;
444		}
445	} else {
446		memset(buf +
447		       (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
448		       0, info->dqi_entry_size);
449		if (le16_to_cpu(dh->dqdh_entries) ==
450		    qtree_dqstr_in_blk(info) - 1) {
451			/* Insert will write block itself */
452			ret = insert_free_dqentry(info, buf, blk);
453			if (ret < 0) {
454				quota_error(dquot->dq_sb, "Can't insert quota "
455				    "data block (%u) to free entry list", blk);
456				goto out_buf;
457			}
458		} else {
459			ret = write_blk(info, blk, buf);
460			if (ret < 0) {
461				quota_error(dquot->dq_sb, "Can't write quota "
462					    "data block %u", blk);
463				goto out_buf;
464			}
465		}
466	}
467	dquot->dq_off = 0;	/* Quota is now unattached */
468out_buf:
469	kfree(buf);
470	return ret;
471}
472
473/* Remove reference to dquot from tree */
474static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
475		       uint *blk, int depth)
476{
477	char *buf = getdqbuf(info->dqi_usable_bs);
478	int ret = 0;
479	uint newblk;
480	__le32 *ref = (__le32 *)buf;
 
481
482	if (!buf)
483		return -ENOMEM;
484	ret = read_blk(info, *blk, buf);
485	if (ret < 0) {
486		quota_error(dquot->dq_sb, "Can't read quota data block %u",
487			    *blk);
488		goto out_buf;
489	}
490	newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
491	if (depth == info->dqi_qtree_depth - 1) {
492		ret = free_dqentry(info, dquot, newblk);
493		newblk = 0;
494	} else {
495		ret = remove_tree(info, dquot, &newblk, depth+1);
 
496	}
497	if (ret >= 0 && !newblk) {
498		int i;
499		ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
500		/* Block got empty? */
501		for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
502			;
503		/* Don't put the root block into the free block list */
504		if (i == (info->dqi_usable_bs >> 2)
505		    && *blk != QT_TREEOFF) {
506			put_free_dqblk(info, buf, *blk);
507			*blk = 0;
508		} else {
509			ret = write_blk(info, *blk, buf);
510			if (ret < 0)
511				quota_error(dquot->dq_sb,
512					    "Can't write quota tree block %u",
513					    *blk);
514		}
515	}
516out_buf:
517	kfree(buf);
518	return ret;
519}
520
521/* Delete dquot from tree */
522int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
523{
524	uint tmp = QT_TREEOFF;
525
526	if (!dquot->dq_off)	/* Even not allocated? */
527		return 0;
528	return remove_tree(info, dquot, &tmp, 0);
 
 
 
 
529}
530EXPORT_SYMBOL(qtree_delete_dquot);
531
532/* Find entry in block */
533static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
534				 struct dquot *dquot, uint blk)
535{
536	char *buf = getdqbuf(info->dqi_usable_bs);
537	loff_t ret = 0;
538	int i;
539	char *ddquot;
540
541	if (!buf)
542		return -ENOMEM;
543	ret = read_blk(info, blk, buf);
544	if (ret < 0) {
545		quota_error(dquot->dq_sb, "Can't read quota tree "
546			    "block %u", blk);
547		goto out_buf;
548	}
549	ddquot = buf + sizeof(struct qt_disk_dqdbheader);
550	for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
551		if (info->dqi_ops->is_id(ddquot, dquot))
552			break;
553		ddquot += info->dqi_entry_size;
554	}
555	if (i == qtree_dqstr_in_blk(info)) {
556		quota_error(dquot->dq_sb,
557			    "Quota for id %u referenced but not present",
558			    from_kqid(&init_user_ns, dquot->dq_id));
559		ret = -EIO;
560		goto out_buf;
561	} else {
562		ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
563		  qt_disk_dqdbheader) + i * info->dqi_entry_size;
564	}
565out_buf:
566	kfree(buf);
567	return ret;
568}
569
570/* Find entry for given id in the tree */
571static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
572				struct dquot *dquot, uint blk, int depth)
573{
574	char *buf = getdqbuf(info->dqi_usable_bs);
575	loff_t ret = 0;
576	__le32 *ref = (__le32 *)buf;
 
 
577
578	if (!buf)
579		return -ENOMEM;
580	ret = read_blk(info, blk, buf);
581	if (ret < 0) {
582		quota_error(dquot->dq_sb, "Can't read quota tree block %u",
583			    blk);
584		goto out_buf;
585	}
586	ret = 0;
587	blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
588	if (!blk)	/* No reference? */
589		goto out_buf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
590	if (depth < info->dqi_qtree_depth - 1)
591		ret = find_tree_dqentry(info, dquot, blk, depth+1);
592	else
593		ret = find_block_dqentry(info, dquot, blk);
594out_buf:
595	kfree(buf);
596	return ret;
597}
598
599/* Find entry for given id in the tree - wrapper function */
600static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
601				  struct dquot *dquot)
602{
603	return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
 
 
 
 
 
 
604}
605
606int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
607{
608	int type = dquot->dq_id.type;
609	struct super_block *sb = dquot->dq_sb;
610	loff_t offset;
611	char *ddquot;
612	int ret = 0;
613
614#ifdef __QUOTA_QT_PARANOIA
615	/* Invalidated quota? */
616	if (!sb_dqopt(dquot->dq_sb)->files[type]) {
617		quota_error(sb, "Quota invalidated while reading!");
618		return -EIO;
619	}
620#endif
621	/* Do we know offset of the dquot entry in the quota file? */
622	if (!dquot->dq_off) {
623		offset = find_dqentry(info, dquot);
624		if (offset <= 0) {	/* Entry not present? */
625			if (offset < 0)
626				quota_error(sb,"Can't read quota structure "
627					    "for id %u",
628					    from_kqid(&init_user_ns,
629						      dquot->dq_id));
630			dquot->dq_off = 0;
631			set_bit(DQ_FAKE_B, &dquot->dq_flags);
632			memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
633			ret = offset;
634			goto out;
635		}
636		dquot->dq_off = offset;
637	}
638	ddquot = getdqbuf(info->dqi_entry_size);
639	if (!ddquot)
640		return -ENOMEM;
641	ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
642				   dquot->dq_off);
643	if (ret != info->dqi_entry_size) {
644		if (ret >= 0)
645			ret = -EIO;
646		quota_error(sb, "Error while reading quota structure for id %u",
647			    from_kqid(&init_user_ns, dquot->dq_id));
648		set_bit(DQ_FAKE_B, &dquot->dq_flags);
649		memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
650		kfree(ddquot);
651		goto out;
652	}
653	spin_lock(&dquot->dq_dqb_lock);
654	info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
655	if (!dquot->dq_dqb.dqb_bhardlimit &&
656	    !dquot->dq_dqb.dqb_bsoftlimit &&
657	    !dquot->dq_dqb.dqb_ihardlimit &&
658	    !dquot->dq_dqb.dqb_isoftlimit)
659		set_bit(DQ_FAKE_B, &dquot->dq_flags);
660	spin_unlock(&dquot->dq_dqb_lock);
661	kfree(ddquot);
662out:
663	dqstats_inc(DQST_READS);
664	return ret;
665}
666EXPORT_SYMBOL(qtree_read_dquot);
667
668/* Check whether dquot should not be deleted. We know we are
669 * the only one operating on dquot (thanks to dq_lock) */
670int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
671{
672	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
673	    !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
674		return qtree_delete_dquot(info, dquot);
675	return 0;
676}
677EXPORT_SYMBOL(qtree_release_dquot);
678
679static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
680			unsigned int blk, int depth)
681{
682	char *buf = getdqbuf(info->dqi_usable_bs);
683	__le32 *ref = (__le32 *)buf;
684	ssize_t ret;
685	unsigned int epb = info->dqi_usable_bs >> 2;
686	unsigned int level_inc = 1;
687	int i;
688
689	if (!buf)
690		return -ENOMEM;
691
692	for (i = depth; i < info->dqi_qtree_depth - 1; i++)
693		level_inc *= epb;
694
695	ret = read_blk(info, blk, buf);
696	if (ret < 0) {
697		quota_error(info->dqi_sb,
698			    "Can't read quota tree block %u", blk);
699		goto out_buf;
700	}
701	for (i = __get_index(info, *id, depth); i < epb; i++) {
702		if (ref[i] == cpu_to_le32(0)) {
 
 
703			*id += level_inc;
704			continue;
705		}
 
 
 
 
706		if (depth == info->dqi_qtree_depth - 1) {
707			ret = 0;
708			goto out_buf;
709		}
710		ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1);
711		if (ret != -ENOENT)
712			break;
713	}
714	if (i == epb) {
715		ret = -ENOENT;
716		goto out_buf;
717	}
718out_buf:
719	kfree(buf);
720	return ret;
721}
722
723int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
724{
725	qid_t id = from_kqid(&init_user_ns, *qid);
726	int ret;
727
728	ret = find_next_id(info, &id, QT_TREEOFF, 0);
729	if (ret < 0)
730		return ret;
731	*qid = make_kqid(&init_user_ns, qid->type, id);
732	return 0;
733}
734EXPORT_SYMBOL(qtree_get_next_id);