Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: LGPL-2.1
  2/*
  3 * Copyright IBM Corporation, 2007
  4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  5 *
  6 */
  7
  8#include <linux/slab.h>
  9#include "ext4_jbd2.h"
 10#include "ext4_extents.h"
 11
 12/*
 13 * The contiguous blocks details which can be
 14 * represented by a single extent
 15 */
 16struct migrate_struct {
 17	ext4_lblk_t first_block, last_block, curr_block;
 18	ext4_fsblk_t first_pblock, last_pblock;
 19};
 20
 21static int finish_range(handle_t *handle, struct inode *inode,
 22				struct migrate_struct *lb)
 23
 24{
 25	int retval = 0, needed;
 26	struct ext4_extent newext;
 27	struct ext4_ext_path *path;
 28	if (lb->first_pblock == 0)
 29		return 0;
 30
 31	/* Add the extent to temp inode*/
 32	newext.ee_block = cpu_to_le32(lb->first_block);
 33	newext.ee_len   = cpu_to_le16(lb->last_block - lb->first_block + 1);
 34	ext4_ext_store_pblock(&newext, lb->first_pblock);
 35	/* Locking only for convenience since we are operating on temp inode */
 36	down_write(&EXT4_I(inode)->i_data_sem);
 37	path = ext4_find_extent(inode, lb->first_block, NULL, 0);
 38	if (IS_ERR(path)) {
 39		retval = PTR_ERR(path);
 40		path = NULL;
 41		goto err_out;
 42	}
 43
 44	/*
 45	 * Calculate the credit needed to inserting this extent
 46	 * Since we are doing this in loop we may accumulate extra
 47	 * credit. But below we try to not accumulate too much
 48	 * of them by restarting the journal.
 49	 */
 50	needed = ext4_ext_calc_credits_for_single_extent(inode,
 51		    lb->last_block - lb->first_block + 1, path);
 52
 53	retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
 54	if (retval < 0)
 55		goto err_out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56	retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
 57err_out:
 58	up_write((&EXT4_I(inode)->i_data_sem));
 59	ext4_free_ext_path(path);
 
 60	lb->first_pblock = 0;
 61	return retval;
 62}
 63
 64static int update_extent_range(handle_t *handle, struct inode *inode,
 65			       ext4_fsblk_t pblock, struct migrate_struct *lb)
 66{
 67	int retval;
 68	/*
 69	 * See if we can add on to the existing range (if it exists)
 70	 */
 71	if (lb->first_pblock &&
 72		(lb->last_pblock+1 == pblock) &&
 73		(lb->last_block+1 == lb->curr_block)) {
 74		lb->last_pblock = pblock;
 75		lb->last_block = lb->curr_block;
 76		lb->curr_block++;
 77		return 0;
 78	}
 79	/*
 80	 * Start a new range.
 81	 */
 82	retval = finish_range(handle, inode, lb);
 83	lb->first_pblock = lb->last_pblock = pblock;
 84	lb->first_block = lb->last_block = lb->curr_block;
 85	lb->curr_block++;
 86	return retval;
 87}
 88
 89static int update_ind_extent_range(handle_t *handle, struct inode *inode,
 90				   ext4_fsblk_t pblock,
 91				   struct migrate_struct *lb)
 92{
 93	struct buffer_head *bh;
 94	__le32 *i_data;
 95	int i, retval = 0;
 96	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 97
 98	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
 99	if (IS_ERR(bh))
100		return PTR_ERR(bh);
101
102	i_data = (__le32 *)bh->b_data;
103	for (i = 0; i < max_entries; i++) {
104		if (i_data[i]) {
105			retval = update_extent_range(handle, inode,
106						le32_to_cpu(i_data[i]), lb);
107			if (retval)
108				break;
109		} else {
110			lb->curr_block++;
111		}
112	}
113	put_bh(bh);
114	return retval;
115
116}
117
118static int update_dind_extent_range(handle_t *handle, struct inode *inode,
119				    ext4_fsblk_t pblock,
120				    struct migrate_struct *lb)
121{
122	struct buffer_head *bh;
123	__le32 *i_data;
124	int i, retval = 0;
125	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
126
127	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
128	if (IS_ERR(bh))
129		return PTR_ERR(bh);
130
131	i_data = (__le32 *)bh->b_data;
132	for (i = 0; i < max_entries; i++) {
133		if (i_data[i]) {
134			retval = update_ind_extent_range(handle, inode,
135						le32_to_cpu(i_data[i]), lb);
136			if (retval)
137				break;
138		} else {
139			/* Only update the file block number */
140			lb->curr_block += max_entries;
141		}
142	}
143	put_bh(bh);
144	return retval;
145
146}
147
148static int update_tind_extent_range(handle_t *handle, struct inode *inode,
149				    ext4_fsblk_t pblock,
150				    struct migrate_struct *lb)
151{
152	struct buffer_head *bh;
153	__le32 *i_data;
154	int i, retval = 0;
155	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
156
157	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
158	if (IS_ERR(bh))
159		return PTR_ERR(bh);
160
161	i_data = (__le32 *)bh->b_data;
162	for (i = 0; i < max_entries; i++) {
163		if (i_data[i]) {
164			retval = update_dind_extent_range(handle, inode,
165						le32_to_cpu(i_data[i]), lb);
166			if (retval)
167				break;
168		} else {
169			/* Only update the file block number */
170			lb->curr_block += max_entries * max_entries;
171		}
172	}
173	put_bh(bh);
174	return retval;
175
176}
177
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178static int free_dind_blocks(handle_t *handle,
179				struct inode *inode, __le32 i_data)
180{
181	int i;
182	__le32 *tmp_idata;
183	struct buffer_head *bh;
184	struct super_block *sb = inode->i_sb;
185	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
186	int err;
187
188	bh = ext4_sb_bread(sb, le32_to_cpu(i_data), 0);
189	if (IS_ERR(bh))
190		return PTR_ERR(bh);
191
192	tmp_idata = (__le32 *)bh->b_data;
193	for (i = 0; i < max_entries; i++) {
194		if (tmp_idata[i]) {
195			err = ext4_journal_ensure_credits(handle,
196				EXT4_RESERVE_TRANS_BLOCKS,
197				ext4_free_metadata_revoke_credits(sb, 1));
198			if (err < 0) {
199				put_bh(bh);
200				return err;
201			}
202			ext4_free_blocks(handle, inode, NULL,
203					 le32_to_cpu(tmp_idata[i]), 1,
204					 EXT4_FREE_BLOCKS_METADATA |
205					 EXT4_FREE_BLOCKS_FORGET);
206		}
207	}
208	put_bh(bh);
209	err = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
210				ext4_free_metadata_revoke_credits(sb, 1));
211	if (err < 0)
212		return err;
213	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
214			 EXT4_FREE_BLOCKS_METADATA |
215			 EXT4_FREE_BLOCKS_FORGET);
216	return 0;
217}
218
219static int free_tind_blocks(handle_t *handle,
220				struct inode *inode, __le32 i_data)
221{
222	int i, retval = 0;
223	__le32 *tmp_idata;
224	struct buffer_head *bh;
225	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
226
227	bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
228	if (IS_ERR(bh))
229		return PTR_ERR(bh);
230
231	tmp_idata = (__le32 *)bh->b_data;
232	for (i = 0; i < max_entries; i++) {
233		if (tmp_idata[i]) {
234			retval = free_dind_blocks(handle,
235					inode, tmp_idata[i]);
236			if (retval) {
237				put_bh(bh);
238				return retval;
239			}
240		}
241	}
242	put_bh(bh);
243	retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
244			ext4_free_metadata_revoke_credits(inode->i_sb, 1));
245	if (retval < 0)
246		return retval;
247	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
248			 EXT4_FREE_BLOCKS_METADATA |
249			 EXT4_FREE_BLOCKS_FORGET);
250	return 0;
251}
252
253static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
254{
255	int retval;
256
257	/* ei->i_data[EXT4_IND_BLOCK] */
258	if (i_data[0]) {
259		retval = ext4_journal_ensure_credits(handle,
260			EXT4_RESERVE_TRANS_BLOCKS,
261			ext4_free_metadata_revoke_credits(inode->i_sb, 1));
262		if (retval < 0)
263			return retval;
264		ext4_free_blocks(handle, inode, NULL,
265				le32_to_cpu(i_data[0]), 1,
266				 EXT4_FREE_BLOCKS_METADATA |
267				 EXT4_FREE_BLOCKS_FORGET);
268	}
269
270	/* ei->i_data[EXT4_DIND_BLOCK] */
271	if (i_data[1]) {
272		retval = free_dind_blocks(handle, inode, i_data[1]);
273		if (retval)
274			return retval;
275	}
276
277	/* ei->i_data[EXT4_TIND_BLOCK] */
278	if (i_data[2]) {
279		retval = free_tind_blocks(handle, inode, i_data[2]);
280		if (retval)
281			return retval;
282	}
283	return 0;
284}
285
286static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
287						struct inode *tmp_inode)
288{
289	int retval, retval2 = 0;
290	__le32	i_data[3];
291	struct ext4_inode_info *ei = EXT4_I(inode);
292	struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
293
294	/*
295	 * One credit accounted for writing the
296	 * i_data field of the original inode
297	 */
298	retval = ext4_journal_ensure_credits(handle, 1, 0);
299	if (retval < 0)
300		goto err_out;
 
 
 
301
302	i_data[0] = ei->i_data[EXT4_IND_BLOCK];
303	i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
304	i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
305
306	down_write(&EXT4_I(inode)->i_data_sem);
307	/*
308	 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
309	 * happened after we started the migrate. We need to
310	 * fail the migrate
311	 */
312	if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
313		retval = -EAGAIN;
314		up_write(&EXT4_I(inode)->i_data_sem);
315		goto err_out;
316	} else
317		ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
318	/*
319	 * We have the extent map build with the tmp inode.
320	 * Now copy the i_data across
321	 */
322	ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
323	memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
324
325	/*
326	 * Update i_blocks with the new blocks that got
327	 * allocated while adding extents for extent index
328	 * blocks.
329	 *
330	 * While converting to extents we need not
331	 * update the original inode i_blocks for extent blocks
332	 * via quota APIs. The quota update happened via tmp_inode already.
333	 */
334	spin_lock(&inode->i_lock);
335	inode->i_blocks += tmp_inode->i_blocks;
336	spin_unlock(&inode->i_lock);
337	up_write(&EXT4_I(inode)->i_data_sem);
338
339	/*
340	 * We mark the inode dirty after, because we decrement the
341	 * i_blocks when freeing the indirect meta-data blocks
342	 */
343	retval = free_ind_block(handle, inode, i_data);
344	retval2 = ext4_mark_inode_dirty(handle, inode);
345	if (unlikely(retval2 && !retval))
346		retval = retval2;
347
348err_out:
349	return retval;
350}
351
352static int free_ext_idx(handle_t *handle, struct inode *inode,
353					struct ext4_extent_idx *ix)
354{
355	int i, retval = 0;
356	ext4_fsblk_t block;
357	struct buffer_head *bh;
358	struct ext4_extent_header *eh;
359
360	block = ext4_idx_pblock(ix);
361	bh = ext4_sb_bread(inode->i_sb, block, 0);
362	if (IS_ERR(bh))
363		return PTR_ERR(bh);
364
365	eh = (struct ext4_extent_header *)bh->b_data;
366	if (eh->eh_depth != 0) {
367		ix = EXT_FIRST_INDEX(eh);
368		for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
369			retval = free_ext_idx(handle, inode, ix);
370			if (retval) {
371				put_bh(bh);
372				return retval;
373			}
374		}
375	}
376	put_bh(bh);
377	retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
378			ext4_free_metadata_revoke_credits(inode->i_sb, 1));
379	if (retval < 0)
380		return retval;
381	ext4_free_blocks(handle, inode, NULL, block, 1,
382			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
383	return 0;
384}
385
386/*
387 * Free the extent meta data blocks only
388 */
389static int free_ext_block(handle_t *handle, struct inode *inode)
390{
391	int i, retval = 0;
392	struct ext4_inode_info *ei = EXT4_I(inode);
393	struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
394	struct ext4_extent_idx *ix;
395	if (eh->eh_depth == 0)
396		/*
397		 * No extra blocks allocated for extent meta data
398		 */
399		return 0;
400	ix = EXT_FIRST_INDEX(eh);
401	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
402		retval = free_ext_idx(handle, inode, ix);
403		if (retval)
404			return retval;
405	}
406	return retval;
407}
408
409int ext4_ext_migrate(struct inode *inode)
410{
411	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
412	handle_t *handle;
413	int retval = 0, i;
414	__le32 *i_data;
415	struct ext4_inode_info *ei;
416	struct inode *tmp_inode = NULL;
417	struct migrate_struct lb;
418	unsigned long max_entries;
419	__u32 goal, tmp_csum_seed;
420	uid_t owner[2];
421
422	/*
423	 * If the filesystem does not support extents, or the inode
424	 * already is extent-based, error out.
425	 */
426	if (!ext4_has_feature_extents(inode->i_sb) ||
427	    ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
428	    ext4_has_inline_data(inode))
429		return -EINVAL;
430
431	if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
432		/*
433		 * don't migrate fast symlink
434		 */
435		return retval;
436
437	percpu_down_write(&sbi->s_writepages_rwsem);
438
439	/*
440	 * Worst case we can touch the allocation bitmaps and a block
441	 * group descriptor block.  We do need to worry about
442	 * credits for modifying the quota inode.
443	 */
444	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
445		3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
446
447	if (IS_ERR(handle)) {
448		retval = PTR_ERR(handle);
449		goto out_unlock;
450	}
451	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
452		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
453	owner[0] = i_uid_read(inode);
454	owner[1] = i_gid_read(inode);
455	tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
456				   S_IFREG, NULL, goal, owner, 0);
457	if (IS_ERR(tmp_inode)) {
458		retval = PTR_ERR(tmp_inode);
459		ext4_journal_stop(handle);
460		goto out_unlock;
461	}
462	/*
463	 * Use the correct seed for checksum (i.e. the seed from 'inode').  This
464	 * is so that the metadata blocks will have the correct checksum after
465	 * the migration.
466	 */
467	ei = EXT4_I(inode);
468	tmp_csum_seed = EXT4_I(tmp_inode)->i_csum_seed;
469	EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed;
470	i_size_write(tmp_inode, i_size_read(inode));
471	/*
472	 * Set the i_nlink to zero so it will be deleted later
473	 * when we drop inode reference.
474	 */
475	clear_nlink(tmp_inode);
476
477	ext4_ext_tree_init(handle, tmp_inode);
 
478	ext4_journal_stop(handle);
479
480	/*
481	 * start with one credit accounted for
482	 * superblock modification.
483	 *
484	 * For the tmp_inode we already have committed the
485	 * transaction that created the inode. Later as and
486	 * when we add extents we extent the journal
487	 */
488	/*
489	 * Even though we take i_rwsem we can still cause block
490	 * allocation via mmap write to holes. If we have allocated
491	 * new blocks we fail migrate.  New block allocation will
492	 * clear EXT4_STATE_EXT_MIGRATE flag.  The flag is updated
493	 * with i_data_sem held to prevent racing with block
494	 * allocation.
495	 */
496	down_read(&EXT4_I(inode)->i_data_sem);
497	ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
498	up_read((&EXT4_I(inode)->i_data_sem));
499
500	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
501	if (IS_ERR(handle)) {
 
 
 
 
 
 
502		retval = PTR_ERR(handle);
503		goto out_tmp_inode;
504	}
505
 
506	i_data = ei->i_data;
507	memset(&lb, 0, sizeof(lb));
508
509	/* 32 bit block address 4 bytes */
510	max_entries = inode->i_sb->s_blocksize >> 2;
511	for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
512		if (i_data[i]) {
513			retval = update_extent_range(handle, tmp_inode,
514						le32_to_cpu(i_data[i]), &lb);
515			if (retval)
516				goto err_out;
517		} else
518			lb.curr_block++;
519	}
520	if (i_data[EXT4_IND_BLOCK]) {
521		retval = update_ind_extent_range(handle, tmp_inode,
522				le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
523		if (retval)
524			goto err_out;
525	} else
526		lb.curr_block += max_entries;
527	if (i_data[EXT4_DIND_BLOCK]) {
528		retval = update_dind_extent_range(handle, tmp_inode,
529				le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
530		if (retval)
531			goto err_out;
532	} else
533		lb.curr_block += max_entries * max_entries;
534	if (i_data[EXT4_TIND_BLOCK]) {
535		retval = update_tind_extent_range(handle, tmp_inode,
536				le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
537		if (retval)
538			goto err_out;
539	}
540	/*
541	 * Build the last extent
542	 */
543	retval = finish_range(handle, tmp_inode, &lb);
544err_out:
545	if (retval)
546		/*
547		 * Failure case delete the extent information with the
548		 * tmp_inode
549		 */
550		free_ext_block(handle, tmp_inode);
551	else {
552		retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
553		if (retval)
554			/*
555			 * if we fail to swap inode data free the extent
556			 * details of the tmp inode
557			 */
558			free_ext_block(handle, tmp_inode);
559	}
560
561	/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
562	retval = ext4_journal_ensure_credits(handle, 1, 0);
563	if (retval < 0)
564		goto out_stop;
565	/*
566	 * Mark the tmp_inode as of size zero
567	 */
568	i_size_write(tmp_inode, 0);
569
570	/*
571	 * set the  i_blocks count to zero
572	 * so that the ext4_evict_inode() does the
573	 * right job
574	 *
575	 * We don't need to take the i_lock because
576	 * the inode is not visible to user space.
577	 */
578	tmp_inode->i_blocks = 0;
579	EXT4_I(tmp_inode)->i_csum_seed = tmp_csum_seed;
580
581	/* Reset the extent details */
582	ext4_ext_tree_init(handle, tmp_inode);
583out_stop:
584	ext4_journal_stop(handle);
585out_tmp_inode:
586	unlock_new_inode(tmp_inode);
587	iput(tmp_inode);
588out_unlock:
589	percpu_up_write(&sbi->s_writepages_rwsem);
590	return retval;
591}
592
593/*
594 * Migrate a simple extent-based inode to use the i_blocks[] array
595 */
596int ext4_ind_migrate(struct inode *inode)
597{
598	struct ext4_extent_header	*eh;
599	struct ext4_sb_info		*sbi = EXT4_SB(inode->i_sb);
600	struct ext4_super_block		*es = sbi->s_es;
601	struct ext4_inode_info		*ei = EXT4_I(inode);
602	struct ext4_extent		*ex;
603	unsigned int			i, len;
604	ext4_lblk_t			start, end;
605	ext4_fsblk_t			blk;
606	handle_t			*handle;
607	int				ret, ret2 = 0;
608
609	if (!ext4_has_feature_extents(inode->i_sb) ||
610	    (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
611		return -EINVAL;
612
613	if (ext4_has_feature_bigalloc(inode->i_sb))
614		return -EOPNOTSUPP;
615
616	/*
617	 * In order to get correct extent info, force all delayed allocation
618	 * blocks to be allocated, otherwise delayed allocation blocks may not
619	 * be reflected and bypass the checks on extent header.
620	 */
621	if (test_opt(inode->i_sb, DELALLOC))
622		ext4_alloc_da_blocks(inode);
623
624	percpu_down_write(&sbi->s_writepages_rwsem);
625
626	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
627	if (IS_ERR(handle)) {
628		ret = PTR_ERR(handle);
629		goto out_unlock;
630	}
631
632	down_write(&EXT4_I(inode)->i_data_sem);
633	ret = ext4_ext_check_inode(inode);
634	if (ret)
635		goto errout;
636
637	eh = ext_inode_hdr(inode);
638	ex  = EXT_FIRST_EXTENT(eh);
639	if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
640	    eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
641		ret = -EOPNOTSUPP;
642		goto errout;
643	}
644	if (eh->eh_entries == 0)
645		blk = len = start = end = 0;
646	else {
647		len = le16_to_cpu(ex->ee_len);
648		blk = ext4_ext_pblock(ex);
649		start = le32_to_cpu(ex->ee_block);
650		end = start + len - 1;
651		if (end >= EXT4_NDIR_BLOCKS) {
652			ret = -EOPNOTSUPP;
653			goto errout;
654		}
655	}
656
657	ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
658	memset(ei->i_data, 0, sizeof(ei->i_data));
659	for (i = start; i <= end; i++)
660		ei->i_data[i] = cpu_to_le32(blk++);
661	ret2 = ext4_mark_inode_dirty(handle, inode);
662	if (unlikely(ret2 && !ret))
663		ret = ret2;
664errout:
665	ext4_journal_stop(handle);
666	up_write(&EXT4_I(inode)->i_data_sem);
667out_unlock:
668	percpu_up_write(&sbi->s_writepages_rwsem);
669	return ret;
670}
v5.4
  1// SPDX-License-Identifier: LGPL-2.1
  2/*
  3 * Copyright IBM Corporation, 2007
  4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  5 *
  6 */
  7
  8#include <linux/slab.h>
  9#include "ext4_jbd2.h"
 10#include "ext4_extents.h"
 11
 12/*
 13 * The contiguous blocks details which can be
 14 * represented by a single extent
 15 */
 16struct migrate_struct {
 17	ext4_lblk_t first_block, last_block, curr_block;
 18	ext4_fsblk_t first_pblock, last_pblock;
 19};
 20
 21static int finish_range(handle_t *handle, struct inode *inode,
 22				struct migrate_struct *lb)
 23
 24{
 25	int retval = 0, needed;
 26	struct ext4_extent newext;
 27	struct ext4_ext_path *path;
 28	if (lb->first_pblock == 0)
 29		return 0;
 30
 31	/* Add the extent to temp inode*/
 32	newext.ee_block = cpu_to_le32(lb->first_block);
 33	newext.ee_len   = cpu_to_le16(lb->last_block - lb->first_block + 1);
 34	ext4_ext_store_pblock(&newext, lb->first_pblock);
 35	/* Locking only for convinience since we are operating on temp inode */
 36	down_write(&EXT4_I(inode)->i_data_sem);
 37	path = ext4_find_extent(inode, lb->first_block, NULL, 0);
 38	if (IS_ERR(path)) {
 39		retval = PTR_ERR(path);
 40		path = NULL;
 41		goto err_out;
 42	}
 43
 44	/*
 45	 * Calculate the credit needed to inserting this extent
 46	 * Since we are doing this in loop we may accumalate extra
 47	 * credit. But below we try to not accumalate too much
 48	 * of them by restarting the journal.
 49	 */
 50	needed = ext4_ext_calc_credits_for_single_extent(inode,
 51		    lb->last_block - lb->first_block + 1, path);
 52
 53	/*
 54	 * Make sure the credit we accumalated is not really high
 55	 */
 56	if (needed && ext4_handle_has_enough_credits(handle,
 57						EXT4_RESERVE_TRANS_BLOCKS)) {
 58		up_write((&EXT4_I(inode)->i_data_sem));
 59		retval = ext4_journal_restart(handle, needed);
 60		down_write((&EXT4_I(inode)->i_data_sem));
 61		if (retval)
 62			goto err_out;
 63	} else if (needed) {
 64		retval = ext4_journal_extend(handle, needed);
 65		if (retval) {
 66			/*
 67			 * IF not able to extend the journal restart the journal
 68			 */
 69			up_write((&EXT4_I(inode)->i_data_sem));
 70			retval = ext4_journal_restart(handle, needed);
 71			down_write((&EXT4_I(inode)->i_data_sem));
 72			if (retval)
 73				goto err_out;
 74		}
 75	}
 76	retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
 77err_out:
 78	up_write((&EXT4_I(inode)->i_data_sem));
 79	ext4_ext_drop_refs(path);
 80	kfree(path);
 81	lb->first_pblock = 0;
 82	return retval;
 83}
 84
 85static int update_extent_range(handle_t *handle, struct inode *inode,
 86			       ext4_fsblk_t pblock, struct migrate_struct *lb)
 87{
 88	int retval;
 89	/*
 90	 * See if we can add on to the existing range (if it exists)
 91	 */
 92	if (lb->first_pblock &&
 93		(lb->last_pblock+1 == pblock) &&
 94		(lb->last_block+1 == lb->curr_block)) {
 95		lb->last_pblock = pblock;
 96		lb->last_block = lb->curr_block;
 97		lb->curr_block++;
 98		return 0;
 99	}
100	/*
101	 * Start a new range.
102	 */
103	retval = finish_range(handle, inode, lb);
104	lb->first_pblock = lb->last_pblock = pblock;
105	lb->first_block = lb->last_block = lb->curr_block;
106	lb->curr_block++;
107	return retval;
108}
109
110static int update_ind_extent_range(handle_t *handle, struct inode *inode,
111				   ext4_fsblk_t pblock,
112				   struct migrate_struct *lb)
113{
114	struct buffer_head *bh;
115	__le32 *i_data;
116	int i, retval = 0;
117	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
118
119	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
120	if (IS_ERR(bh))
121		return PTR_ERR(bh);
122
123	i_data = (__le32 *)bh->b_data;
124	for (i = 0; i < max_entries; i++) {
125		if (i_data[i]) {
126			retval = update_extent_range(handle, inode,
127						le32_to_cpu(i_data[i]), lb);
128			if (retval)
129				break;
130		} else {
131			lb->curr_block++;
132		}
133	}
134	put_bh(bh);
135	return retval;
136
137}
138
139static int update_dind_extent_range(handle_t *handle, struct inode *inode,
140				    ext4_fsblk_t pblock,
141				    struct migrate_struct *lb)
142{
143	struct buffer_head *bh;
144	__le32 *i_data;
145	int i, retval = 0;
146	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
147
148	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
149	if (IS_ERR(bh))
150		return PTR_ERR(bh);
151
152	i_data = (__le32 *)bh->b_data;
153	for (i = 0; i < max_entries; i++) {
154		if (i_data[i]) {
155			retval = update_ind_extent_range(handle, inode,
156						le32_to_cpu(i_data[i]), lb);
157			if (retval)
158				break;
159		} else {
160			/* Only update the file block number */
161			lb->curr_block += max_entries;
162		}
163	}
164	put_bh(bh);
165	return retval;
166
167}
168
169static int update_tind_extent_range(handle_t *handle, struct inode *inode,
170				    ext4_fsblk_t pblock,
171				    struct migrate_struct *lb)
172{
173	struct buffer_head *bh;
174	__le32 *i_data;
175	int i, retval = 0;
176	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
177
178	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
179	if (IS_ERR(bh))
180		return PTR_ERR(bh);
181
182	i_data = (__le32 *)bh->b_data;
183	for (i = 0; i < max_entries; i++) {
184		if (i_data[i]) {
185			retval = update_dind_extent_range(handle, inode,
186						le32_to_cpu(i_data[i]), lb);
187			if (retval)
188				break;
189		} else {
190			/* Only update the file block number */
191			lb->curr_block += max_entries * max_entries;
192		}
193	}
194	put_bh(bh);
195	return retval;
196
197}
198
199static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
200{
201	int retval = 0, needed;
202
203	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
204		return 0;
205	/*
206	 * We are freeing a blocks. During this we touch
207	 * superblock, group descriptor and block bitmap.
208	 * So allocate a credit of 3. We may update
209	 * quota (user and group).
210	 */
211	needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
212
213	if (ext4_journal_extend(handle, needed) != 0)
214		retval = ext4_journal_restart(handle, needed);
215
216	return retval;
217}
218
219static int free_dind_blocks(handle_t *handle,
220				struct inode *inode, __le32 i_data)
221{
222	int i;
223	__le32 *tmp_idata;
224	struct buffer_head *bh;
 
225	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 
226
227	bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
228	if (IS_ERR(bh))
229		return PTR_ERR(bh);
230
231	tmp_idata = (__le32 *)bh->b_data;
232	for (i = 0; i < max_entries; i++) {
233		if (tmp_idata[i]) {
234			extend_credit_for_blkdel(handle, inode);
 
 
 
 
 
 
235			ext4_free_blocks(handle, inode, NULL,
236					 le32_to_cpu(tmp_idata[i]), 1,
237					 EXT4_FREE_BLOCKS_METADATA |
238					 EXT4_FREE_BLOCKS_FORGET);
239		}
240	}
241	put_bh(bh);
242	extend_credit_for_blkdel(handle, inode);
 
 
 
243	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
244			 EXT4_FREE_BLOCKS_METADATA |
245			 EXT4_FREE_BLOCKS_FORGET);
246	return 0;
247}
248
249static int free_tind_blocks(handle_t *handle,
250				struct inode *inode, __le32 i_data)
251{
252	int i, retval = 0;
253	__le32 *tmp_idata;
254	struct buffer_head *bh;
255	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
256
257	bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
258	if (IS_ERR(bh))
259		return PTR_ERR(bh);
260
261	tmp_idata = (__le32 *)bh->b_data;
262	for (i = 0; i < max_entries; i++) {
263		if (tmp_idata[i]) {
264			retval = free_dind_blocks(handle,
265					inode, tmp_idata[i]);
266			if (retval) {
267				put_bh(bh);
268				return retval;
269			}
270		}
271	}
272	put_bh(bh);
273	extend_credit_for_blkdel(handle, inode);
 
 
 
274	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
275			 EXT4_FREE_BLOCKS_METADATA |
276			 EXT4_FREE_BLOCKS_FORGET);
277	return 0;
278}
279
280static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
281{
282	int retval;
283
284	/* ei->i_data[EXT4_IND_BLOCK] */
285	if (i_data[0]) {
286		extend_credit_for_blkdel(handle, inode);
 
 
 
 
287		ext4_free_blocks(handle, inode, NULL,
288				le32_to_cpu(i_data[0]), 1,
289				 EXT4_FREE_BLOCKS_METADATA |
290				 EXT4_FREE_BLOCKS_FORGET);
291	}
292
293	/* ei->i_data[EXT4_DIND_BLOCK] */
294	if (i_data[1]) {
295		retval = free_dind_blocks(handle, inode, i_data[1]);
296		if (retval)
297			return retval;
298	}
299
300	/* ei->i_data[EXT4_TIND_BLOCK] */
301	if (i_data[2]) {
302		retval = free_tind_blocks(handle, inode, i_data[2]);
303		if (retval)
304			return retval;
305	}
306	return 0;
307}
308
309static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
310						struct inode *tmp_inode)
311{
312	int retval;
313	__le32	i_data[3];
314	struct ext4_inode_info *ei = EXT4_I(inode);
315	struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
316
317	/*
318	 * One credit accounted for writing the
319	 * i_data field of the original inode
320	 */
321	retval = ext4_journal_extend(handle, 1);
322	if (retval) {
323		retval = ext4_journal_restart(handle, 1);
324		if (retval)
325			goto err_out;
326	}
327
328	i_data[0] = ei->i_data[EXT4_IND_BLOCK];
329	i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
330	i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
331
332	down_write(&EXT4_I(inode)->i_data_sem);
333	/*
334	 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
335	 * happened after we started the migrate. We need to
336	 * fail the migrate
337	 */
338	if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
339		retval = -EAGAIN;
340		up_write(&EXT4_I(inode)->i_data_sem);
341		goto err_out;
342	} else
343		ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
344	/*
345	 * We have the extent map build with the tmp inode.
346	 * Now copy the i_data across
347	 */
348	ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
349	memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
350
351	/*
352	 * Update i_blocks with the new blocks that got
353	 * allocated while adding extents for extent index
354	 * blocks.
355	 *
356	 * While converting to extents we need not
357	 * update the original inode i_blocks for extent blocks
358	 * via quota APIs. The quota update happened via tmp_inode already.
359	 */
360	spin_lock(&inode->i_lock);
361	inode->i_blocks += tmp_inode->i_blocks;
362	spin_unlock(&inode->i_lock);
363	up_write(&EXT4_I(inode)->i_data_sem);
364
365	/*
366	 * We mark the inode dirty after, because we decrement the
367	 * i_blocks when freeing the indirect meta-data blocks
368	 */
369	retval = free_ind_block(handle, inode, i_data);
370	ext4_mark_inode_dirty(handle, inode);
 
 
371
372err_out:
373	return retval;
374}
375
376static int free_ext_idx(handle_t *handle, struct inode *inode,
377					struct ext4_extent_idx *ix)
378{
379	int i, retval = 0;
380	ext4_fsblk_t block;
381	struct buffer_head *bh;
382	struct ext4_extent_header *eh;
383
384	block = ext4_idx_pblock(ix);
385	bh = ext4_sb_bread(inode->i_sb, block, 0);
386	if (IS_ERR(bh))
387		return PTR_ERR(bh);
388
389	eh = (struct ext4_extent_header *)bh->b_data;
390	if (eh->eh_depth != 0) {
391		ix = EXT_FIRST_INDEX(eh);
392		for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
393			retval = free_ext_idx(handle, inode, ix);
394			if (retval)
395				break;
 
 
396		}
397	}
398	put_bh(bh);
399	extend_credit_for_blkdel(handle, inode);
 
 
 
400	ext4_free_blocks(handle, inode, NULL, block, 1,
401			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
402	return retval;
403}
404
405/*
406 * Free the extent meta data blocks only
407 */
408static int free_ext_block(handle_t *handle, struct inode *inode)
409{
410	int i, retval = 0;
411	struct ext4_inode_info *ei = EXT4_I(inode);
412	struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
413	struct ext4_extent_idx *ix;
414	if (eh->eh_depth == 0)
415		/*
416		 * No extra blocks allocated for extent meta data
417		 */
418		return 0;
419	ix = EXT_FIRST_INDEX(eh);
420	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
421		retval = free_ext_idx(handle, inode, ix);
422		if (retval)
423			return retval;
424	}
425	return retval;
426}
427
428int ext4_ext_migrate(struct inode *inode)
429{
 
430	handle_t *handle;
431	int retval = 0, i;
432	__le32 *i_data;
433	struct ext4_inode_info *ei;
434	struct inode *tmp_inode = NULL;
435	struct migrate_struct lb;
436	unsigned long max_entries;
437	__u32 goal;
438	uid_t owner[2];
439
440	/*
441	 * If the filesystem does not support extents, or the inode
442	 * already is extent-based, error out.
443	 */
444	if (!ext4_has_feature_extents(inode->i_sb) ||
445	    (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 
446		return -EINVAL;
447
448	if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
449		/*
450		 * don't migrate fast symlink
451		 */
452		return retval;
453
 
 
454	/*
455	 * Worst case we can touch the allocation bitmaps, a bgd
456	 * block, and a block to link in the orphan list.  We do need
457	 * need to worry about credits for modifying the quota inode.
458	 */
459	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
460		4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
461
462	if (IS_ERR(handle)) {
463		retval = PTR_ERR(handle);
464		return retval;
465	}
466	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
467		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
468	owner[0] = i_uid_read(inode);
469	owner[1] = i_gid_read(inode);
470	tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
471				   S_IFREG, NULL, goal, owner, 0);
472	if (IS_ERR(tmp_inode)) {
473		retval = PTR_ERR(tmp_inode);
474		ext4_journal_stop(handle);
475		return retval;
476	}
 
 
 
 
 
 
 
 
477	i_size_write(tmp_inode, i_size_read(inode));
478	/*
479	 * Set the i_nlink to zero so it will be deleted later
480	 * when we drop inode reference.
481	 */
482	clear_nlink(tmp_inode);
483
484	ext4_ext_tree_init(handle, tmp_inode);
485	ext4_orphan_add(handle, tmp_inode);
486	ext4_journal_stop(handle);
487
488	/*
489	 * start with one credit accounted for
490	 * superblock modification.
491	 *
492	 * For the tmp_inode we already have committed the
493	 * transaction that created the inode. Later as and
494	 * when we add extents we extent the journal
495	 */
496	/*
497	 * Even though we take i_mutex we can still cause block
498	 * allocation via mmap write to holes. If we have allocated
499	 * new blocks we fail migrate.  New block allocation will
500	 * clear EXT4_STATE_EXT_MIGRATE flag.  The flag is updated
501	 * with i_data_sem held to prevent racing with block
502	 * allocation.
503	 */
504	down_read(&EXT4_I(inode)->i_data_sem);
505	ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
506	up_read((&EXT4_I(inode)->i_data_sem));
507
508	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
509	if (IS_ERR(handle)) {
510		/*
511		 * It is impossible to update on-disk structures without
512		 * a handle, so just rollback in-core changes and live other
513		 * work to orphan_list_cleanup()
514		 */
515		ext4_orphan_del(NULL, tmp_inode);
516		retval = PTR_ERR(handle);
517		goto out;
518	}
519
520	ei = EXT4_I(inode);
521	i_data = ei->i_data;
522	memset(&lb, 0, sizeof(lb));
523
524	/* 32 bit block address 4 bytes */
525	max_entries = inode->i_sb->s_blocksize >> 2;
526	for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
527		if (i_data[i]) {
528			retval = update_extent_range(handle, tmp_inode,
529						le32_to_cpu(i_data[i]), &lb);
530			if (retval)
531				goto err_out;
532		} else
533			lb.curr_block++;
534	}
535	if (i_data[EXT4_IND_BLOCK]) {
536		retval = update_ind_extent_range(handle, tmp_inode,
537				le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
538		if (retval)
539			goto err_out;
540	} else
541		lb.curr_block += max_entries;
542	if (i_data[EXT4_DIND_BLOCK]) {
543		retval = update_dind_extent_range(handle, tmp_inode,
544				le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
545		if (retval)
546			goto err_out;
547	} else
548		lb.curr_block += max_entries * max_entries;
549	if (i_data[EXT4_TIND_BLOCK]) {
550		retval = update_tind_extent_range(handle, tmp_inode,
551				le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
552		if (retval)
553			goto err_out;
554	}
555	/*
556	 * Build the last extent
557	 */
558	retval = finish_range(handle, tmp_inode, &lb);
559err_out:
560	if (retval)
561		/*
562		 * Failure case delete the extent information with the
563		 * tmp_inode
564		 */
565		free_ext_block(handle, tmp_inode);
566	else {
567		retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
568		if (retval)
569			/*
570			 * if we fail to swap inode data free the extent
571			 * details of the tmp inode
572			 */
573			free_ext_block(handle, tmp_inode);
574	}
575
576	/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
577	if (ext4_journal_extend(handle, 1) != 0)
578		ext4_journal_restart(handle, 1);
579
580	/*
581	 * Mark the tmp_inode as of size zero
582	 */
583	i_size_write(tmp_inode, 0);
584
585	/*
586	 * set the  i_blocks count to zero
587	 * so that the ext4_evict_inode() does the
588	 * right job
589	 *
590	 * We don't need to take the i_lock because
591	 * the inode is not visible to user space.
592	 */
593	tmp_inode->i_blocks = 0;
 
594
595	/* Reset the extent details */
596	ext4_ext_tree_init(handle, tmp_inode);
 
597	ext4_journal_stop(handle);
598out:
599	unlock_new_inode(tmp_inode);
600	iput(tmp_inode);
601
 
602	return retval;
603}
604
605/*
606 * Migrate a simple extent-based inode to use the i_blocks[] array
607 */
608int ext4_ind_migrate(struct inode *inode)
609{
610	struct ext4_extent_header	*eh;
611	struct ext4_super_block		*es = EXT4_SB(inode->i_sb)->s_es;
 
612	struct ext4_inode_info		*ei = EXT4_I(inode);
613	struct ext4_extent		*ex;
614	unsigned int			i, len;
615	ext4_lblk_t			start, end;
616	ext4_fsblk_t			blk;
617	handle_t			*handle;
618	int				ret;
619
620	if (!ext4_has_feature_extents(inode->i_sb) ||
621	    (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
622		return -EINVAL;
623
624	if (ext4_has_feature_bigalloc(inode->i_sb))
625		return -EOPNOTSUPP;
626
627	/*
628	 * In order to get correct extent info, force all delayed allocation
629	 * blocks to be allocated, otherwise delayed allocation blocks may not
630	 * be reflected and bypass the checks on extent header.
631	 */
632	if (test_opt(inode->i_sb, DELALLOC))
633		ext4_alloc_da_blocks(inode);
634
 
 
635	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
636	if (IS_ERR(handle))
637		return PTR_ERR(handle);
 
 
638
639	down_write(&EXT4_I(inode)->i_data_sem);
640	ret = ext4_ext_check_inode(inode);
641	if (ret)
642		goto errout;
643
644	eh = ext_inode_hdr(inode);
645	ex  = EXT_FIRST_EXTENT(eh);
646	if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
647	    eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
648		ret = -EOPNOTSUPP;
649		goto errout;
650	}
651	if (eh->eh_entries == 0)
652		blk = len = start = end = 0;
653	else {
654		len = le16_to_cpu(ex->ee_len);
655		blk = ext4_ext_pblock(ex);
656		start = le32_to_cpu(ex->ee_block);
657		end = start + len - 1;
658		if (end >= EXT4_NDIR_BLOCKS) {
659			ret = -EOPNOTSUPP;
660			goto errout;
661		}
662	}
663
664	ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
665	memset(ei->i_data, 0, sizeof(ei->i_data));
666	for (i = start; i <= end; i++)
667		ei->i_data[i] = cpu_to_le32(blk++);
668	ext4_mark_inode_dirty(handle, inode);
 
 
669errout:
670	ext4_journal_stop(handle);
671	up_write(&EXT4_I(inode)->i_data_sem);
 
 
672	return ret;
673}