Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Copyright IBM Corporation, 2007
  3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of version 2.1 of the GNU Lesser General Public License
  7 * as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful, but
 10 * WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 12 *
 13 */
 14
 15#include <linux/slab.h>
 16#include "ext4_jbd2.h"
 
 17
 18/*
 19 * The contiguous blocks details which can be
 20 * represented by a single extent
 21 */
 22struct migrate_struct {
 23	ext4_lblk_t first_block, last_block, curr_block;
 24	ext4_fsblk_t first_pblock, last_pblock;
 25};
 26
 27static int finish_range(handle_t *handle, struct inode *inode,
 28				struct migrate_struct *lb)
 29
 30{
 31	int retval = 0, needed;
 32	struct ext4_extent newext;
 33	struct ext4_ext_path *path;
 34	if (lb->first_pblock == 0)
 35		return 0;
 36
 37	/* Add the extent to temp inode*/
 38	newext.ee_block = cpu_to_le32(lb->first_block);
 39	newext.ee_len   = cpu_to_le16(lb->last_block - lb->first_block + 1);
 40	ext4_ext_store_pblock(&newext, lb->first_pblock);
 41	path = ext4_ext_find_extent(inode, lb->first_block, NULL);
 42
 
 43	if (IS_ERR(path)) {
 44		retval = PTR_ERR(path);
 45		path = NULL;
 46		goto err_out;
 47	}
 48
 49	/*
 50	 * Calculate the credit needed to inserting this extent
 51	 * Since we are doing this in loop we may accumalate extra
 52	 * credit. But below we try to not accumalate too much
 53	 * of them by restarting the journal.
 54	 */
 55	needed = ext4_ext_calc_credits_for_single_extent(inode,
 56		    lb->last_block - lb->first_block + 1, path);
 57
 58	/*
 59	 * Make sure the credit we accumalated is not really high
 60	 */
 61	if (needed && ext4_handle_has_enough_credits(handle,
 62						EXT4_RESERVE_TRANS_BLOCKS)) {
 
 63		retval = ext4_journal_restart(handle, needed);
 
 64		if (retval)
 65			goto err_out;
 66	} else if (needed) {
 67		retval = ext4_journal_extend(handle, needed);
 68		if (retval) {
 69			/*
 70			 * IF not able to extend the journal restart the journal
 71			 */
 
 72			retval = ext4_journal_restart(handle, needed);
 
 73			if (retval)
 74				goto err_out;
 75		}
 76	}
 77	retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
 78err_out:
 79	if (path) {
 80		ext4_ext_drop_refs(path);
 81		kfree(path);
 82	}
 83	lb->first_pblock = 0;
 84	return retval;
 85}
 86
 87static int update_extent_range(handle_t *handle, struct inode *inode,
 88			       ext4_fsblk_t pblock, struct migrate_struct *lb)
 89{
 90	int retval;
 91	/*
 92	 * See if we can add on to the existing range (if it exists)
 93	 */
 94	if (lb->first_pblock &&
 95		(lb->last_pblock+1 == pblock) &&
 96		(lb->last_block+1 == lb->curr_block)) {
 97		lb->last_pblock = pblock;
 98		lb->last_block = lb->curr_block;
 99		lb->curr_block++;
100		return 0;
101	}
102	/*
103	 * Start a new range.
104	 */
105	retval = finish_range(handle, inode, lb);
106	lb->first_pblock = lb->last_pblock = pblock;
107	lb->first_block = lb->last_block = lb->curr_block;
108	lb->curr_block++;
109	return retval;
110}
111
112static int update_ind_extent_range(handle_t *handle, struct inode *inode,
113				   ext4_fsblk_t pblock,
114				   struct migrate_struct *lb)
115{
116	struct buffer_head *bh;
117	__le32 *i_data;
118	int i, retval = 0;
119	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
120
121	bh = sb_bread(inode->i_sb, pblock);
122	if (!bh)
123		return -EIO;
124
125	i_data = (__le32 *)bh->b_data;
126	for (i = 0; i < max_entries; i++) {
127		if (i_data[i]) {
128			retval = update_extent_range(handle, inode,
129						le32_to_cpu(i_data[i]), lb);
130			if (retval)
131				break;
132		} else {
133			lb->curr_block++;
134		}
135	}
136	put_bh(bh);
137	return retval;
138
139}
140
141static int update_dind_extent_range(handle_t *handle, struct inode *inode,
142				    ext4_fsblk_t pblock,
143				    struct migrate_struct *lb)
144{
145	struct buffer_head *bh;
146	__le32 *i_data;
147	int i, retval = 0;
148	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
149
150	bh = sb_bread(inode->i_sb, pblock);
151	if (!bh)
152		return -EIO;
153
154	i_data = (__le32 *)bh->b_data;
155	for (i = 0; i < max_entries; i++) {
156		if (i_data[i]) {
157			retval = update_ind_extent_range(handle, inode,
158						le32_to_cpu(i_data[i]), lb);
159			if (retval)
160				break;
161		} else {
162			/* Only update the file block number */
163			lb->curr_block += max_entries;
164		}
165	}
166	put_bh(bh);
167	return retval;
168
169}
170
171static int update_tind_extent_range(handle_t *handle, struct inode *inode,
172				    ext4_fsblk_t pblock,
173				    struct migrate_struct *lb)
174{
175	struct buffer_head *bh;
176	__le32 *i_data;
177	int i, retval = 0;
178	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
179
180	bh = sb_bread(inode->i_sb, pblock);
181	if (!bh)
182		return -EIO;
183
184	i_data = (__le32 *)bh->b_data;
185	for (i = 0; i < max_entries; i++) {
186		if (i_data[i]) {
187			retval = update_dind_extent_range(handle, inode,
188						le32_to_cpu(i_data[i]), lb);
189			if (retval)
190				break;
191		} else {
192			/* Only update the file block number */
193			lb->curr_block += max_entries * max_entries;
194		}
195	}
196	put_bh(bh);
197	return retval;
198
199}
200
201static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
202{
203	int retval = 0, needed;
204
205	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
206		return 0;
207	/*
208	 * We are freeing a blocks. During this we touch
209	 * superblock, group descriptor and block bitmap.
210	 * So allocate a credit of 3. We may update
211	 * quota (user and group).
212	 */
213	needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
214
215	if (ext4_journal_extend(handle, needed) != 0)
216		retval = ext4_journal_restart(handle, needed);
217
218	return retval;
219}
220
221static int free_dind_blocks(handle_t *handle,
222				struct inode *inode, __le32 i_data)
223{
224	int i;
225	__le32 *tmp_idata;
226	struct buffer_head *bh;
227	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
228
229	bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
230	if (!bh)
231		return -EIO;
232
233	tmp_idata = (__le32 *)bh->b_data;
234	for (i = 0; i < max_entries; i++) {
235		if (tmp_idata[i]) {
236			extend_credit_for_blkdel(handle, inode);
237			ext4_free_blocks(handle, inode, NULL,
238					 le32_to_cpu(tmp_idata[i]), 1,
239					 EXT4_FREE_BLOCKS_METADATA |
240					 EXT4_FREE_BLOCKS_FORGET);
241		}
242	}
243	put_bh(bh);
244	extend_credit_for_blkdel(handle, inode);
245	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
246			 EXT4_FREE_BLOCKS_METADATA |
247			 EXT4_FREE_BLOCKS_FORGET);
248	return 0;
249}
250
251static int free_tind_blocks(handle_t *handle,
252				struct inode *inode, __le32 i_data)
253{
254	int i, retval = 0;
255	__le32 *tmp_idata;
256	struct buffer_head *bh;
257	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
258
259	bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
260	if (!bh)
261		return -EIO;
262
263	tmp_idata = (__le32 *)bh->b_data;
264	for (i = 0; i < max_entries; i++) {
265		if (tmp_idata[i]) {
266			retval = free_dind_blocks(handle,
267					inode, tmp_idata[i]);
268			if (retval) {
269				put_bh(bh);
270				return retval;
271			}
272		}
273	}
274	put_bh(bh);
275	extend_credit_for_blkdel(handle, inode);
276	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
277			 EXT4_FREE_BLOCKS_METADATA |
278			 EXT4_FREE_BLOCKS_FORGET);
279	return 0;
280}
281
282static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
283{
284	int retval;
285
286	/* ei->i_data[EXT4_IND_BLOCK] */
287	if (i_data[0]) {
288		extend_credit_for_blkdel(handle, inode);
289		ext4_free_blocks(handle, inode, NULL,
290				le32_to_cpu(i_data[0]), 1,
291				 EXT4_FREE_BLOCKS_METADATA |
292				 EXT4_FREE_BLOCKS_FORGET);
293	}
294
295	/* ei->i_data[EXT4_DIND_BLOCK] */
296	if (i_data[1]) {
297		retval = free_dind_blocks(handle, inode, i_data[1]);
298		if (retval)
299			return retval;
300	}
301
302	/* ei->i_data[EXT4_TIND_BLOCK] */
303	if (i_data[2]) {
304		retval = free_tind_blocks(handle, inode, i_data[2]);
305		if (retval)
306			return retval;
307	}
308	return 0;
309}
310
311static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
312						struct inode *tmp_inode)
313{
314	int retval;
315	__le32	i_data[3];
316	struct ext4_inode_info *ei = EXT4_I(inode);
317	struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
318
319	/*
320	 * One credit accounted for writing the
321	 * i_data field of the original inode
322	 */
323	retval = ext4_journal_extend(handle, 1);
324	if (retval) {
325		retval = ext4_journal_restart(handle, 1);
326		if (retval)
327			goto err_out;
328	}
329
330	i_data[0] = ei->i_data[EXT4_IND_BLOCK];
331	i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
332	i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
333
334	down_write(&EXT4_I(inode)->i_data_sem);
335	/*
336	 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
337	 * happened after we started the migrate. We need to
338	 * fail the migrate
339	 */
340	if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
341		retval = -EAGAIN;
342		up_write(&EXT4_I(inode)->i_data_sem);
343		goto err_out;
344	} else
345		ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
346	/*
347	 * We have the extent map build with the tmp inode.
348	 * Now copy the i_data across
349	 */
350	ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
351	memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
352
353	/*
354	 * Update i_blocks with the new blocks that got
355	 * allocated while adding extents for extent index
356	 * blocks.
357	 *
358	 * While converting to extents we need not
359	 * update the orignal inode i_blocks for extent blocks
360	 * via quota APIs. The quota update happened via tmp_inode already.
361	 */
362	spin_lock(&inode->i_lock);
363	inode->i_blocks += tmp_inode->i_blocks;
364	spin_unlock(&inode->i_lock);
365	up_write(&EXT4_I(inode)->i_data_sem);
366
367	/*
368	 * We mark the inode dirty after, because we decrement the
369	 * i_blocks when freeing the indirect meta-data blocks
370	 */
371	retval = free_ind_block(handle, inode, i_data);
372	ext4_mark_inode_dirty(handle, inode);
373
374err_out:
375	return retval;
376}
377
378static int free_ext_idx(handle_t *handle, struct inode *inode,
379					struct ext4_extent_idx *ix)
380{
381	int i, retval = 0;
382	ext4_fsblk_t block;
383	struct buffer_head *bh;
384	struct ext4_extent_header *eh;
385
386	block = ext4_idx_pblock(ix);
387	bh = sb_bread(inode->i_sb, block);
388	if (!bh)
389		return -EIO;
390
391	eh = (struct ext4_extent_header *)bh->b_data;
392	if (eh->eh_depth != 0) {
393		ix = EXT_FIRST_INDEX(eh);
394		for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
395			retval = free_ext_idx(handle, inode, ix);
396			if (retval)
397				break;
398		}
399	}
400	put_bh(bh);
401	extend_credit_for_blkdel(handle, inode);
402	ext4_free_blocks(handle, inode, NULL, block, 1,
403			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
404	return retval;
405}
406
407/*
408 * Free the extent meta data blocks only
409 */
410static int free_ext_block(handle_t *handle, struct inode *inode)
411{
412	int i, retval = 0;
413	struct ext4_inode_info *ei = EXT4_I(inode);
414	struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
415	struct ext4_extent_idx *ix;
416	if (eh->eh_depth == 0)
417		/*
418		 * No extra blocks allocated for extent meta data
419		 */
420		return 0;
421	ix = EXT_FIRST_INDEX(eh);
422	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
423		retval = free_ext_idx(handle, inode, ix);
424		if (retval)
425			return retval;
426	}
427	return retval;
428
429}
430
431int ext4_ext_migrate(struct inode *inode)
432{
433	handle_t *handle;
434	int retval = 0, i;
435	__le32 *i_data;
436	struct ext4_inode_info *ei;
437	struct inode *tmp_inode = NULL;
438	struct migrate_struct lb;
439	unsigned long max_entries;
440	__u32 goal;
441	uid_t owner[2];
442
443	/*
444	 * If the filesystem does not support extents, or the inode
445	 * already is extent-based, error out.
446	 */
447	if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
448				       EXT4_FEATURE_INCOMPAT_EXTENTS) ||
449	    (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
450		return -EINVAL;
451
452	if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
453		/*
454		 * don't migrate fast symlink
455		 */
456		return retval;
457
458	handle = ext4_journal_start(inode,
459					EXT4_DATA_TRANS_BLOCKS(inode->i_sb) +
460					EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
461					EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)
462					+ 1);
 
 
 
463	if (IS_ERR(handle)) {
464		retval = PTR_ERR(handle);
465		return retval;
466	}
467	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
468		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
469	owner[0] = i_uid_read(inode);
470	owner[1] = i_gid_read(inode);
471	tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
472				   S_IFREG, NULL, goal, owner);
473	if (IS_ERR(tmp_inode)) {
474		retval = PTR_ERR(tmp_inode);
475		ext4_journal_stop(handle);
476		return retval;
477	}
478	i_size_write(tmp_inode, i_size_read(inode));
479	/*
480	 * Set the i_nlink to zero so it will be deleted later
481	 * when we drop inode reference.
482	 */
483	clear_nlink(tmp_inode);
484
485	ext4_ext_tree_init(handle, tmp_inode);
486	ext4_orphan_add(handle, tmp_inode);
487	ext4_journal_stop(handle);
488
489	/*
490	 * start with one credit accounted for
491	 * superblock modification.
492	 *
493	 * For the tmp_inode we already have committed the
494	 * trascation that created the inode. Later as and
495	 * when we add extents we extent the journal
496	 */
497	/*
498	 * Even though we take i_mutex we can still cause block
499	 * allocation via mmap write to holes. If we have allocated
500	 * new blocks we fail migrate.  New block allocation will
501	 * clear EXT4_STATE_EXT_MIGRATE flag.  The flag is updated
502	 * with i_data_sem held to prevent racing with block
503	 * allocation.
504	 */
505	down_read((&EXT4_I(inode)->i_data_sem));
506	ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
507	up_read((&EXT4_I(inode)->i_data_sem));
508
509	handle = ext4_journal_start(inode, 1);
510	if (IS_ERR(handle)) {
511		/*
512		 * It is impossible to update on-disk structures without
513		 * a handle, so just rollback in-core changes and live other
514		 * work to orphan_list_cleanup()
515		 */
516		ext4_orphan_del(NULL, tmp_inode);
517		retval = PTR_ERR(handle);
518		goto out;
519	}
520
521	ei = EXT4_I(inode);
522	i_data = ei->i_data;
523	memset(&lb, 0, sizeof(lb));
524
525	/* 32 bit block address 4 bytes */
526	max_entries = inode->i_sb->s_blocksize >> 2;
527	for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
528		if (i_data[i]) {
529			retval = update_extent_range(handle, tmp_inode,
530						le32_to_cpu(i_data[i]), &lb);
531			if (retval)
532				goto err_out;
533		} else
534			lb.curr_block++;
535	}
536	if (i_data[EXT4_IND_BLOCK]) {
537		retval = update_ind_extent_range(handle, tmp_inode,
538				le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
539			if (retval)
540				goto err_out;
541	} else
542		lb.curr_block += max_entries;
543	if (i_data[EXT4_DIND_BLOCK]) {
544		retval = update_dind_extent_range(handle, tmp_inode,
545				le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
546			if (retval)
547				goto err_out;
548	} else
549		lb.curr_block += max_entries * max_entries;
550	if (i_data[EXT4_TIND_BLOCK]) {
551		retval = update_tind_extent_range(handle, tmp_inode,
552				le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
553			if (retval)
554				goto err_out;
555	}
556	/*
557	 * Build the last extent
558	 */
559	retval = finish_range(handle, tmp_inode, &lb);
560err_out:
561	if (retval)
562		/*
563		 * Failure case delete the extent information with the
564		 * tmp_inode
565		 */
566		free_ext_block(handle, tmp_inode);
567	else {
568		retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
569		if (retval)
570			/*
571			 * if we fail to swap inode data free the extent
572			 * details of the tmp inode
573			 */
574			free_ext_block(handle, tmp_inode);
575	}
576
577	/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
578	if (ext4_journal_extend(handle, 1) != 0)
579		ext4_journal_restart(handle, 1);
580
581	/*
582	 * Mark the tmp_inode as of size zero
583	 */
584	i_size_write(tmp_inode, 0);
585
586	/*
587	 * set the  i_blocks count to zero
588	 * so that the ext4_delete_inode does the
589	 * right job
590	 *
591	 * We don't need to take the i_lock because
592	 * the inode is not visible to user space.
593	 */
594	tmp_inode->i_blocks = 0;
595
596	/* Reset the extent details */
597	ext4_ext_tree_init(handle, tmp_inode);
598	ext4_journal_stop(handle);
599out:
600	unlock_new_inode(tmp_inode);
601	iput(tmp_inode);
602
603	return retval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
604}
v4.17
  1// SPDX-License-Identifier: LGPL-2.1
  2/*
  3 * Copyright IBM Corporation, 2007
  4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  5 *
 
 
 
 
 
 
 
 
  6 */
  7
  8#include <linux/slab.h>
  9#include "ext4_jbd2.h"
 10#include "ext4_extents.h"
 11
 12/*
 13 * The contiguous blocks details which can be
 14 * represented by a single extent
 15 */
 16struct migrate_struct {
 17	ext4_lblk_t first_block, last_block, curr_block;
 18	ext4_fsblk_t first_pblock, last_pblock;
 19};
 20
 21static int finish_range(handle_t *handle, struct inode *inode,
 22				struct migrate_struct *lb)
 23
 24{
 25	int retval = 0, needed;
 26	struct ext4_extent newext;
 27	struct ext4_ext_path *path;
 28	if (lb->first_pblock == 0)
 29		return 0;
 30
 31	/* Add the extent to temp inode*/
 32	newext.ee_block = cpu_to_le32(lb->first_block);
 33	newext.ee_len   = cpu_to_le16(lb->last_block - lb->first_block + 1);
 34	ext4_ext_store_pblock(&newext, lb->first_pblock);
 35	/* Locking only for convinience since we are operating on temp inode */
 36	down_write(&EXT4_I(inode)->i_data_sem);
 37	path = ext4_find_extent(inode, lb->first_block, NULL, 0);
 38	if (IS_ERR(path)) {
 39		retval = PTR_ERR(path);
 40		path = NULL;
 41		goto err_out;
 42	}
 43
 44	/*
 45	 * Calculate the credit needed to inserting this extent
 46	 * Since we are doing this in loop we may accumalate extra
 47	 * credit. But below we try to not accumalate too much
 48	 * of them by restarting the journal.
 49	 */
 50	needed = ext4_ext_calc_credits_for_single_extent(inode,
 51		    lb->last_block - lb->first_block + 1, path);
 52
 53	/*
 54	 * Make sure the credit we accumalated is not really high
 55	 */
 56	if (needed && ext4_handle_has_enough_credits(handle,
 57						EXT4_RESERVE_TRANS_BLOCKS)) {
 58		up_write((&EXT4_I(inode)->i_data_sem));
 59		retval = ext4_journal_restart(handle, needed);
 60		down_write((&EXT4_I(inode)->i_data_sem));
 61		if (retval)
 62			goto err_out;
 63	} else if (needed) {
 64		retval = ext4_journal_extend(handle, needed);
 65		if (retval) {
 66			/*
 67			 * IF not able to extend the journal restart the journal
 68			 */
 69			up_write((&EXT4_I(inode)->i_data_sem));
 70			retval = ext4_journal_restart(handle, needed);
 71			down_write((&EXT4_I(inode)->i_data_sem));
 72			if (retval)
 73				goto err_out;
 74		}
 75	}
 76	retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
 77err_out:
 78	up_write((&EXT4_I(inode)->i_data_sem));
 79	ext4_ext_drop_refs(path);
 80	kfree(path);
 
 81	lb->first_pblock = 0;
 82	return retval;
 83}
 84
 85static int update_extent_range(handle_t *handle, struct inode *inode,
 86			       ext4_fsblk_t pblock, struct migrate_struct *lb)
 87{
 88	int retval;
 89	/*
 90	 * See if we can add on to the existing range (if it exists)
 91	 */
 92	if (lb->first_pblock &&
 93		(lb->last_pblock+1 == pblock) &&
 94		(lb->last_block+1 == lb->curr_block)) {
 95		lb->last_pblock = pblock;
 96		lb->last_block = lb->curr_block;
 97		lb->curr_block++;
 98		return 0;
 99	}
100	/*
101	 * Start a new range.
102	 */
103	retval = finish_range(handle, inode, lb);
104	lb->first_pblock = lb->last_pblock = pblock;
105	lb->first_block = lb->last_block = lb->curr_block;
106	lb->curr_block++;
107	return retval;
108}
109
110static int update_ind_extent_range(handle_t *handle, struct inode *inode,
111				   ext4_fsblk_t pblock,
112				   struct migrate_struct *lb)
113{
114	struct buffer_head *bh;
115	__le32 *i_data;
116	int i, retval = 0;
117	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
118
119	bh = sb_bread(inode->i_sb, pblock);
120	if (!bh)
121		return -EIO;
122
123	i_data = (__le32 *)bh->b_data;
124	for (i = 0; i < max_entries; i++) {
125		if (i_data[i]) {
126			retval = update_extent_range(handle, inode,
127						le32_to_cpu(i_data[i]), lb);
128			if (retval)
129				break;
130		} else {
131			lb->curr_block++;
132		}
133	}
134	put_bh(bh);
135	return retval;
136
137}
138
139static int update_dind_extent_range(handle_t *handle, struct inode *inode,
140				    ext4_fsblk_t pblock,
141				    struct migrate_struct *lb)
142{
143	struct buffer_head *bh;
144	__le32 *i_data;
145	int i, retval = 0;
146	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
147
148	bh = sb_bread(inode->i_sb, pblock);
149	if (!bh)
150		return -EIO;
151
152	i_data = (__le32 *)bh->b_data;
153	for (i = 0; i < max_entries; i++) {
154		if (i_data[i]) {
155			retval = update_ind_extent_range(handle, inode,
156						le32_to_cpu(i_data[i]), lb);
157			if (retval)
158				break;
159		} else {
160			/* Only update the file block number */
161			lb->curr_block += max_entries;
162		}
163	}
164	put_bh(bh);
165	return retval;
166
167}
168
169static int update_tind_extent_range(handle_t *handle, struct inode *inode,
170				    ext4_fsblk_t pblock,
171				    struct migrate_struct *lb)
172{
173	struct buffer_head *bh;
174	__le32 *i_data;
175	int i, retval = 0;
176	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
177
178	bh = sb_bread(inode->i_sb, pblock);
179	if (!bh)
180		return -EIO;
181
182	i_data = (__le32 *)bh->b_data;
183	for (i = 0; i < max_entries; i++) {
184		if (i_data[i]) {
185			retval = update_dind_extent_range(handle, inode,
186						le32_to_cpu(i_data[i]), lb);
187			if (retval)
188				break;
189		} else {
190			/* Only update the file block number */
191			lb->curr_block += max_entries * max_entries;
192		}
193	}
194	put_bh(bh);
195	return retval;
196
197}
198
199static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
200{
201	int retval = 0, needed;
202
203	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
204		return 0;
205	/*
206	 * We are freeing a blocks. During this we touch
207	 * superblock, group descriptor and block bitmap.
208	 * So allocate a credit of 3. We may update
209	 * quota (user and group).
210	 */
211	needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
212
213	if (ext4_journal_extend(handle, needed) != 0)
214		retval = ext4_journal_restart(handle, needed);
215
216	return retval;
217}
218
219static int free_dind_blocks(handle_t *handle,
220				struct inode *inode, __le32 i_data)
221{
222	int i;
223	__le32 *tmp_idata;
224	struct buffer_head *bh;
225	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
226
227	bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
228	if (!bh)
229		return -EIO;
230
231	tmp_idata = (__le32 *)bh->b_data;
232	for (i = 0; i < max_entries; i++) {
233		if (tmp_idata[i]) {
234			extend_credit_for_blkdel(handle, inode);
235			ext4_free_blocks(handle, inode, NULL,
236					 le32_to_cpu(tmp_idata[i]), 1,
237					 EXT4_FREE_BLOCKS_METADATA |
238					 EXT4_FREE_BLOCKS_FORGET);
239		}
240	}
241	put_bh(bh);
242	extend_credit_for_blkdel(handle, inode);
243	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
244			 EXT4_FREE_BLOCKS_METADATA |
245			 EXT4_FREE_BLOCKS_FORGET);
246	return 0;
247}
248
249static int free_tind_blocks(handle_t *handle,
250				struct inode *inode, __le32 i_data)
251{
252	int i, retval = 0;
253	__le32 *tmp_idata;
254	struct buffer_head *bh;
255	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
256
257	bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
258	if (!bh)
259		return -EIO;
260
261	tmp_idata = (__le32 *)bh->b_data;
262	for (i = 0; i < max_entries; i++) {
263		if (tmp_idata[i]) {
264			retval = free_dind_blocks(handle,
265					inode, tmp_idata[i]);
266			if (retval) {
267				put_bh(bh);
268				return retval;
269			}
270		}
271	}
272	put_bh(bh);
273	extend_credit_for_blkdel(handle, inode);
274	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
275			 EXT4_FREE_BLOCKS_METADATA |
276			 EXT4_FREE_BLOCKS_FORGET);
277	return 0;
278}
279
280static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
281{
282	int retval;
283
284	/* ei->i_data[EXT4_IND_BLOCK] */
285	if (i_data[0]) {
286		extend_credit_for_blkdel(handle, inode);
287		ext4_free_blocks(handle, inode, NULL,
288				le32_to_cpu(i_data[0]), 1,
289				 EXT4_FREE_BLOCKS_METADATA |
290				 EXT4_FREE_BLOCKS_FORGET);
291	}
292
293	/* ei->i_data[EXT4_DIND_BLOCK] */
294	if (i_data[1]) {
295		retval = free_dind_blocks(handle, inode, i_data[1]);
296		if (retval)
297			return retval;
298	}
299
300	/* ei->i_data[EXT4_TIND_BLOCK] */
301	if (i_data[2]) {
302		retval = free_tind_blocks(handle, inode, i_data[2]);
303		if (retval)
304			return retval;
305	}
306	return 0;
307}
308
309static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
310						struct inode *tmp_inode)
311{
312	int retval;
313	__le32	i_data[3];
314	struct ext4_inode_info *ei = EXT4_I(inode);
315	struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
316
317	/*
318	 * One credit accounted for writing the
319	 * i_data field of the original inode
320	 */
321	retval = ext4_journal_extend(handle, 1);
322	if (retval) {
323		retval = ext4_journal_restart(handle, 1);
324		if (retval)
325			goto err_out;
326	}
327
328	i_data[0] = ei->i_data[EXT4_IND_BLOCK];
329	i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
330	i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
331
332	down_write(&EXT4_I(inode)->i_data_sem);
333	/*
334	 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
335	 * happened after we started the migrate. We need to
336	 * fail the migrate
337	 */
338	if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
339		retval = -EAGAIN;
340		up_write(&EXT4_I(inode)->i_data_sem);
341		goto err_out;
342	} else
343		ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
344	/*
345	 * We have the extent map build with the tmp inode.
346	 * Now copy the i_data across
347	 */
348	ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
349	memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
350
351	/*
352	 * Update i_blocks with the new blocks that got
353	 * allocated while adding extents for extent index
354	 * blocks.
355	 *
356	 * While converting to extents we need not
357	 * update the original inode i_blocks for extent blocks
358	 * via quota APIs. The quota update happened via tmp_inode already.
359	 */
360	spin_lock(&inode->i_lock);
361	inode->i_blocks += tmp_inode->i_blocks;
362	spin_unlock(&inode->i_lock);
363	up_write(&EXT4_I(inode)->i_data_sem);
364
365	/*
366	 * We mark the inode dirty after, because we decrement the
367	 * i_blocks when freeing the indirect meta-data blocks
368	 */
369	retval = free_ind_block(handle, inode, i_data);
370	ext4_mark_inode_dirty(handle, inode);
371
372err_out:
373	return retval;
374}
375
376static int free_ext_idx(handle_t *handle, struct inode *inode,
377					struct ext4_extent_idx *ix)
378{
379	int i, retval = 0;
380	ext4_fsblk_t block;
381	struct buffer_head *bh;
382	struct ext4_extent_header *eh;
383
384	block = ext4_idx_pblock(ix);
385	bh = sb_bread(inode->i_sb, block);
386	if (!bh)
387		return -EIO;
388
389	eh = (struct ext4_extent_header *)bh->b_data;
390	if (eh->eh_depth != 0) {
391		ix = EXT_FIRST_INDEX(eh);
392		for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
393			retval = free_ext_idx(handle, inode, ix);
394			if (retval)
395				break;
396		}
397	}
398	put_bh(bh);
399	extend_credit_for_blkdel(handle, inode);
400	ext4_free_blocks(handle, inode, NULL, block, 1,
401			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
402	return retval;
403}
404
405/*
406 * Free the extent meta data blocks only
407 */
408static int free_ext_block(handle_t *handle, struct inode *inode)
409{
410	int i, retval = 0;
411	struct ext4_inode_info *ei = EXT4_I(inode);
412	struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
413	struct ext4_extent_idx *ix;
414	if (eh->eh_depth == 0)
415		/*
416		 * No extra blocks allocated for extent meta data
417		 */
418		return 0;
419	ix = EXT_FIRST_INDEX(eh);
420	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
421		retval = free_ext_idx(handle, inode, ix);
422		if (retval)
423			return retval;
424	}
425	return retval;
 
426}
427
428int ext4_ext_migrate(struct inode *inode)
429{
430	handle_t *handle;
431	int retval = 0, i;
432	__le32 *i_data;
433	struct ext4_inode_info *ei;
434	struct inode *tmp_inode = NULL;
435	struct migrate_struct lb;
436	unsigned long max_entries;
437	__u32 goal;
438	uid_t owner[2];
439
440	/*
441	 * If the filesystem does not support extents, or the inode
442	 * already is extent-based, error out.
443	 */
444	if (!ext4_has_feature_extents(inode->i_sb) ||
 
445	    (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
446		return -EINVAL;
447
448	if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
449		/*
450		 * don't migrate fast symlink
451		 */
452		return retval;
453
454	/*
455	 * Worst case we can touch the allocation bitmaps, a bgd
456	 * block, and a block to link in the orphan list.  We do need
457	 * need to worry about credits for modifying the quota inode.
458	 */
459	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
460		4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
461
462	if (IS_ERR(handle)) {
463		retval = PTR_ERR(handle);
464		return retval;
465	}
466	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
467		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
468	owner[0] = i_uid_read(inode);
469	owner[1] = i_gid_read(inode);
470	tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
471				   S_IFREG, NULL, goal, owner, 0);
472	if (IS_ERR(tmp_inode)) {
473		retval = PTR_ERR(tmp_inode);
474		ext4_journal_stop(handle);
475		return retval;
476	}
477	i_size_write(tmp_inode, i_size_read(inode));
478	/*
479	 * Set the i_nlink to zero so it will be deleted later
480	 * when we drop inode reference.
481	 */
482	clear_nlink(tmp_inode);
483
484	ext4_ext_tree_init(handle, tmp_inode);
485	ext4_orphan_add(handle, tmp_inode);
486	ext4_journal_stop(handle);
487
488	/*
489	 * start with one credit accounted for
490	 * superblock modification.
491	 *
492	 * For the tmp_inode we already have committed the
493	 * transaction that created the inode. Later as and
494	 * when we add extents we extent the journal
495	 */
496	/*
497	 * Even though we take i_mutex we can still cause block
498	 * allocation via mmap write to holes. If we have allocated
499	 * new blocks we fail migrate.  New block allocation will
500	 * clear EXT4_STATE_EXT_MIGRATE flag.  The flag is updated
501	 * with i_data_sem held to prevent racing with block
502	 * allocation.
503	 */
504	down_read(&EXT4_I(inode)->i_data_sem);
505	ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
506	up_read((&EXT4_I(inode)->i_data_sem));
507
508	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
509	if (IS_ERR(handle)) {
510		/*
511		 * It is impossible to update on-disk structures without
512		 * a handle, so just rollback in-core changes and live other
513		 * work to orphan_list_cleanup()
514		 */
515		ext4_orphan_del(NULL, tmp_inode);
516		retval = PTR_ERR(handle);
517		goto out;
518	}
519
520	ei = EXT4_I(inode);
521	i_data = ei->i_data;
522	memset(&lb, 0, sizeof(lb));
523
524	/* 32 bit block address 4 bytes */
525	max_entries = inode->i_sb->s_blocksize >> 2;
526	for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
527		if (i_data[i]) {
528			retval = update_extent_range(handle, tmp_inode,
529						le32_to_cpu(i_data[i]), &lb);
530			if (retval)
531				goto err_out;
532		} else
533			lb.curr_block++;
534	}
535	if (i_data[EXT4_IND_BLOCK]) {
536		retval = update_ind_extent_range(handle, tmp_inode,
537				le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
538			if (retval)
539				goto err_out;
540	} else
541		lb.curr_block += max_entries;
542	if (i_data[EXT4_DIND_BLOCK]) {
543		retval = update_dind_extent_range(handle, tmp_inode,
544				le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
545			if (retval)
546				goto err_out;
547	} else
548		lb.curr_block += max_entries * max_entries;
549	if (i_data[EXT4_TIND_BLOCK]) {
550		retval = update_tind_extent_range(handle, tmp_inode,
551				le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
552			if (retval)
553				goto err_out;
554	}
555	/*
556	 * Build the last extent
557	 */
558	retval = finish_range(handle, tmp_inode, &lb);
559err_out:
560	if (retval)
561		/*
562		 * Failure case delete the extent information with the
563		 * tmp_inode
564		 */
565		free_ext_block(handle, tmp_inode);
566	else {
567		retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
568		if (retval)
569			/*
570			 * if we fail to swap inode data free the extent
571			 * details of the tmp inode
572			 */
573			free_ext_block(handle, tmp_inode);
574	}
575
576	/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
577	if (ext4_journal_extend(handle, 1) != 0)
578		ext4_journal_restart(handle, 1);
579
580	/*
581	 * Mark the tmp_inode as of size zero
582	 */
583	i_size_write(tmp_inode, 0);
584
585	/*
586	 * set the  i_blocks count to zero
587	 * so that the ext4_evict_inode() does the
588	 * right job
589	 *
590	 * We don't need to take the i_lock because
591	 * the inode is not visible to user space.
592	 */
593	tmp_inode->i_blocks = 0;
594
595	/* Reset the extent details */
596	ext4_ext_tree_init(handle, tmp_inode);
597	ext4_journal_stop(handle);
598out:
599	unlock_new_inode(tmp_inode);
600	iput(tmp_inode);
601
602	return retval;
603}
604
605/*
606 * Migrate a simple extent-based inode to use the i_blocks[] array
607 */
608int ext4_ind_migrate(struct inode *inode)
609{
610	struct ext4_extent_header	*eh;
611	struct ext4_super_block		*es = EXT4_SB(inode->i_sb)->s_es;
612	struct ext4_inode_info		*ei = EXT4_I(inode);
613	struct ext4_extent		*ex;
614	unsigned int			i, len;
615	ext4_lblk_t			start, end;
616	ext4_fsblk_t			blk;
617	handle_t			*handle;
618	int				ret;
619
620	if (!ext4_has_feature_extents(inode->i_sb) ||
621	    (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
622		return -EINVAL;
623
624	if (ext4_has_feature_bigalloc(inode->i_sb))
625		return -EOPNOTSUPP;
626
627	/*
628	 * In order to get correct extent info, force all delayed allocation
629	 * blocks to be allocated, otherwise delayed allocation blocks may not
630	 * be reflected and bypass the checks on extent header.
631	 */
632	if (test_opt(inode->i_sb, DELALLOC))
633		ext4_alloc_da_blocks(inode);
634
635	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
636	if (IS_ERR(handle))
637		return PTR_ERR(handle);
638
639	down_write(&EXT4_I(inode)->i_data_sem);
640	ret = ext4_ext_check_inode(inode);
641	if (ret)
642		goto errout;
643
644	eh = ext_inode_hdr(inode);
645	ex  = EXT_FIRST_EXTENT(eh);
646	if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
647	    eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
648		ret = -EOPNOTSUPP;
649		goto errout;
650	}
651	if (eh->eh_entries == 0)
652		blk = len = start = end = 0;
653	else {
654		len = le16_to_cpu(ex->ee_len);
655		blk = ext4_ext_pblock(ex);
656		start = le32_to_cpu(ex->ee_block);
657		end = start + len - 1;
658		if (end >= EXT4_NDIR_BLOCKS) {
659			ret = -EOPNOTSUPP;
660			goto errout;
661		}
662	}
663
664	ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
665	memset(ei->i_data, 0, sizeof(ei->i_data));
666	for (i = start; i <= end; i++)
667		ei->i_data[i] = cpu_to_le32(blk++);
668	ext4_mark_inode_dirty(handle, inode);
669errout:
670	ext4_journal_stop(handle);
671	up_write(&EXT4_I(inode)->i_data_sem);
672	return ret;
673}