Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 *  linux/fs/fat/cache.c
  3 *
  4 *  Written 1992,1993 by Werner Almesberger
  5 *
  6 *  Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
  7 *	of inode number.
  8 *  May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
  9 */
 10
 
 11#include <linux/slab.h>
 
 12#include "fat.h"
 13
 14/* this must be > 0. */
 15#define FAT_MAX_CACHE	8
 16
 17struct fat_cache {
 18	struct list_head cache_list;
 19	int nr_contig;	/* number of contiguous clusters */
 20	int fcluster;	/* cluster number in the file. */
 21	int dcluster;	/* cluster number on disk. */
 22};
 23
 24struct fat_cache_id {
 25	unsigned int id;
 26	int nr_contig;
 27	int fcluster;
 28	int dcluster;
 29};
 30
 31static inline int fat_max_cache(struct inode *inode)
 32{
 33	return FAT_MAX_CACHE;
 34}
 35
 36static struct kmem_cache *fat_cache_cachep;
 37
 38static void init_once(void *foo)
 39{
 40	struct fat_cache *cache = (struct fat_cache *)foo;
 41
 42	INIT_LIST_HEAD(&cache->cache_list);
 43}
 44
 45int __init fat_cache_init(void)
 46{
 47	fat_cache_cachep = kmem_cache_create("fat_cache",
 48				sizeof(struct fat_cache),
 49				0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
 50				init_once);
 51	if (fat_cache_cachep == NULL)
 52		return -ENOMEM;
 53	return 0;
 54}
 55
 56void fat_cache_destroy(void)
 57{
 58	kmem_cache_destroy(fat_cache_cachep);
 59}
 60
 61static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
 62{
 63	return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS);
 64}
 65
 66static inline void fat_cache_free(struct fat_cache *cache)
 67{
 68	BUG_ON(!list_empty(&cache->cache_list));
 69	kmem_cache_free(fat_cache_cachep, cache);
 70}
 71
 72static inline void fat_cache_update_lru(struct inode *inode,
 73					struct fat_cache *cache)
 74{
 75	if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
 76		list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
 77}
 78
 79static int fat_cache_lookup(struct inode *inode, int fclus,
 80			    struct fat_cache_id *cid,
 81			    int *cached_fclus, int *cached_dclus)
 82{
 83	static struct fat_cache nohit = { .fcluster = 0, };
 84
 85	struct fat_cache *hit = &nohit, *p;
 86	int offset = -1;
 87
 88	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
 89	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
 90		/* Find the cache of "fclus" or nearest cache. */
 91		if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
 92			hit = p;
 93			if ((hit->fcluster + hit->nr_contig) < fclus) {
 94				offset = hit->nr_contig;
 95			} else {
 96				offset = fclus - hit->fcluster;
 97				break;
 98			}
 99		}
100	}
101	if (hit != &nohit) {
102		fat_cache_update_lru(inode, hit);
103
104		cid->id = MSDOS_I(inode)->cache_valid_id;
105		cid->nr_contig = hit->nr_contig;
106		cid->fcluster = hit->fcluster;
107		cid->dcluster = hit->dcluster;
108		*cached_fclus = cid->fcluster + offset;
109		*cached_dclus = cid->dcluster + offset;
110	}
111	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
112
113	return offset;
114}
115
116static struct fat_cache *fat_cache_merge(struct inode *inode,
117					 struct fat_cache_id *new)
118{
119	struct fat_cache *p;
120
121	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
122		/* Find the same part as "new" in cluster-chain. */
123		if (p->fcluster == new->fcluster) {
124			BUG_ON(p->dcluster != new->dcluster);
125			if (new->nr_contig > p->nr_contig)
126				p->nr_contig = new->nr_contig;
127			return p;
128		}
129	}
130	return NULL;
131}
132
133static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
134{
135	struct fat_cache *cache, *tmp;
136
137	if (new->fcluster == -1) /* dummy cache */
138		return;
139
140	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
141	if (new->id != FAT_CACHE_VALID &&
142	    new->id != MSDOS_I(inode)->cache_valid_id)
143		goto out;	/* this cache was invalidated */
144
145	cache = fat_cache_merge(inode, new);
146	if (cache == NULL) {
147		if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
148			MSDOS_I(inode)->nr_caches++;
149			spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
150
151			tmp = fat_cache_alloc(inode);
152			if (!tmp) {
153				spin_lock(&MSDOS_I(inode)->cache_lru_lock);
154				MSDOS_I(inode)->nr_caches--;
155				spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
156				return;
157			}
158
159			spin_lock(&MSDOS_I(inode)->cache_lru_lock);
160			cache = fat_cache_merge(inode, new);
161			if (cache != NULL) {
162				MSDOS_I(inode)->nr_caches--;
163				fat_cache_free(tmp);
164				goto out_update_lru;
165			}
166			cache = tmp;
167		} else {
168			struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
169			cache = list_entry(p, struct fat_cache, cache_list);
170		}
171		cache->fcluster = new->fcluster;
172		cache->dcluster = new->dcluster;
173		cache->nr_contig = new->nr_contig;
174	}
175out_update_lru:
176	fat_cache_update_lru(inode, cache);
177out:
178	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
179}
180
181/*
182 * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
183 * fixes itself after a while.
184 */
185static void __fat_cache_inval_inode(struct inode *inode)
186{
187	struct msdos_inode_info *i = MSDOS_I(inode);
188	struct fat_cache *cache;
189
190	while (!list_empty(&i->cache_lru)) {
191		cache = list_entry(i->cache_lru.next,
192				   struct fat_cache, cache_list);
193		list_del_init(&cache->cache_list);
194		i->nr_caches--;
195		fat_cache_free(cache);
196	}
197	/* Update. The copy of caches before this id is discarded. */
198	i->cache_valid_id++;
199	if (i->cache_valid_id == FAT_CACHE_VALID)
200		i->cache_valid_id++;
201}
202
203void fat_cache_inval_inode(struct inode *inode)
204{
205	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
206	__fat_cache_inval_inode(inode);
207	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
208}
209
210static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
211{
212	cid->nr_contig++;
213	return ((cid->dcluster + cid->nr_contig) == dclus);
214}
215
216static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
217{
218	cid->id = FAT_CACHE_VALID;
219	cid->fcluster = fclus;
220	cid->dcluster = dclus;
221	cid->nr_contig = 0;
222}
223
224int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
225{
226	struct super_block *sb = inode->i_sb;
227	const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
228	struct fat_entry fatent;
229	struct fat_cache_id cid;
230	int nr;
231
232	BUG_ON(MSDOS_I(inode)->i_start == 0);
233
234	*fclus = 0;
235	*dclus = MSDOS_I(inode)->i_start;
236	if (cluster == 0)
237		return 0;
238
239	if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
240		/*
241		 * dummy, always not contiguous
242		 * This is reinitialized by cache_init(), later.
243		 */
244		cache_init(&cid, -1, -1);
245	}
246
247	fatent_init(&fatent);
248	while (*fclus < cluster) {
249		/* prevent the infinite loop of cluster chain */
250		if (*fclus > limit) {
251			fat_fs_error_ratelimit(sb,
252					"%s: detected the cluster chain loop"
253					" (i_pos %lld)", __func__,
254					MSDOS_I(inode)->i_pos);
255			nr = -EIO;
256			goto out;
257		}
258
259		nr = fat_ent_read(inode, &fatent, *dclus);
260		if (nr < 0)
261			goto out;
262		else if (nr == FAT_ENT_FREE) {
263			fat_fs_error_ratelimit(sb,
264				       "%s: invalid cluster chain (i_pos %lld)",
265				       __func__,
266				       MSDOS_I(inode)->i_pos);
267			nr = -EIO;
268			goto out;
269		} else if (nr == FAT_ENT_EOF) {
270			fat_cache_add(inode, &cid);
271			goto out;
272		}
273		(*fclus)++;
274		*dclus = nr;
275		if (!cache_contiguous(&cid, *dclus))
276			cache_init(&cid, *fclus, *dclus);
277	}
278	nr = 0;
279	fat_cache_add(inode, &cid);
280out:
281	fatent_brelse(&fatent);
282	return nr;
283}
284
285static int fat_bmap_cluster(struct inode *inode, int cluster)
286{
287	struct super_block *sb = inode->i_sb;
288	int ret, fclus, dclus;
289
290	if (MSDOS_I(inode)->i_start == 0)
291		return 0;
292
293	ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
294	if (ret < 0)
295		return ret;
296	else if (ret == FAT_ENT_EOF) {
297		fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)",
298			     __func__, MSDOS_I(inode)->i_pos);
299		return -EIO;
300	}
301	return dclus;
302}
303
304int fat_get_mapped_cluster(struct inode *inode, sector_t sector,
305			   sector_t last_block,
306			   unsigned long *mapped_blocks, sector_t *bmap)
307{
308	struct super_block *sb = inode->i_sb;
309	struct msdos_sb_info *sbi = MSDOS_SB(sb);
310	int cluster, offset;
311
312	cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
313	offset  = sector & (sbi->sec_per_clus - 1);
314	cluster = fat_bmap_cluster(inode, cluster);
315	if (cluster < 0)
316		return cluster;
317	else if (cluster) {
318		*bmap = fat_clus_to_blknr(sbi, cluster) + offset;
319		*mapped_blocks = sbi->sec_per_clus - offset;
320		if (*mapped_blocks > last_block - sector)
321			*mapped_blocks = last_block - sector;
322	}
323
324	return 0;
325}
326
327static int is_exceed_eof(struct inode *inode, sector_t sector,
328			 sector_t *last_block, int create)
329{
330	struct super_block *sb = inode->i_sb;
331	const unsigned long blocksize = sb->s_blocksize;
332	const unsigned char blocksize_bits = sb->s_blocksize_bits;
333
334	*last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
335	if (sector >= *last_block) {
336		if (!create)
337			return 1;
338
339		/*
340		 * ->mmu_private can access on only allocation path.
341		 * (caller must hold ->i_mutex)
342		 */
343		*last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
344			>> blocksize_bits;
345		if (sector >= *last_block)
346			return 1;
347	}
348
349	return 0;
350}
351
352int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
353	     unsigned long *mapped_blocks, int create, bool from_bmap)
354{
355	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
356	sector_t last_block;
 
357
358	*phys = 0;
359	*mapped_blocks = 0;
360	if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) {
361		if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
362			*phys = sector + sbi->dir_start;
363			*mapped_blocks = 1;
364		}
365		return 0;
366	}
367
368	if (!from_bmap) {
369		if (is_exceed_eof(inode, sector, &last_block, create))
 
370			return 0;
371	} else {
372		last_block = inode->i_blocks >>
373				(inode->i_sb->s_blocksize_bits - 9);
 
 
 
 
374		if (sector >= last_block)
375			return 0;
376	}
377
378	return fat_get_mapped_cluster(inode, sector, last_block, mapped_blocks,
379				      phys);
 
 
 
 
 
 
 
 
 
 
380}
v3.15
  1/*
  2 *  linux/fs/fat/cache.c
  3 *
  4 *  Written 1992,1993 by Werner Almesberger
  5 *
  6 *  Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
  7 *	of inode number.
  8 *  May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
  9 */
 10
 11#include <linux/fs.h>
 12#include <linux/slab.h>
 13#include <linux/buffer_head.h>
 14#include "fat.h"
 15
 16/* this must be > 0. */
 17#define FAT_MAX_CACHE	8
 18
 19struct fat_cache {
 20	struct list_head cache_list;
 21	int nr_contig;	/* number of contiguous clusters */
 22	int fcluster;	/* cluster number in the file. */
 23	int dcluster;	/* cluster number on disk. */
 24};
 25
 26struct fat_cache_id {
 27	unsigned int id;
 28	int nr_contig;
 29	int fcluster;
 30	int dcluster;
 31};
 32
 33static inline int fat_max_cache(struct inode *inode)
 34{
 35	return FAT_MAX_CACHE;
 36}
 37
 38static struct kmem_cache *fat_cache_cachep;
 39
 40static void init_once(void *foo)
 41{
 42	struct fat_cache *cache = (struct fat_cache *)foo;
 43
 44	INIT_LIST_HEAD(&cache->cache_list);
 45}
 46
 47int __init fat_cache_init(void)
 48{
 49	fat_cache_cachep = kmem_cache_create("fat_cache",
 50				sizeof(struct fat_cache),
 51				0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
 52				init_once);
 53	if (fat_cache_cachep == NULL)
 54		return -ENOMEM;
 55	return 0;
 56}
 57
 58void fat_cache_destroy(void)
 59{
 60	kmem_cache_destroy(fat_cache_cachep);
 61}
 62
 63static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
 64{
 65	return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS);
 66}
 67
 68static inline void fat_cache_free(struct fat_cache *cache)
 69{
 70	BUG_ON(!list_empty(&cache->cache_list));
 71	kmem_cache_free(fat_cache_cachep, cache);
 72}
 73
 74static inline void fat_cache_update_lru(struct inode *inode,
 75					struct fat_cache *cache)
 76{
 77	if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
 78		list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
 79}
 80
 81static int fat_cache_lookup(struct inode *inode, int fclus,
 82			    struct fat_cache_id *cid,
 83			    int *cached_fclus, int *cached_dclus)
 84{
 85	static struct fat_cache nohit = { .fcluster = 0, };
 86
 87	struct fat_cache *hit = &nohit, *p;
 88	int offset = -1;
 89
 90	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
 91	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
 92		/* Find the cache of "fclus" or nearest cache. */
 93		if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
 94			hit = p;
 95			if ((hit->fcluster + hit->nr_contig) < fclus) {
 96				offset = hit->nr_contig;
 97			} else {
 98				offset = fclus - hit->fcluster;
 99				break;
100			}
101		}
102	}
103	if (hit != &nohit) {
104		fat_cache_update_lru(inode, hit);
105
106		cid->id = MSDOS_I(inode)->cache_valid_id;
107		cid->nr_contig = hit->nr_contig;
108		cid->fcluster = hit->fcluster;
109		cid->dcluster = hit->dcluster;
110		*cached_fclus = cid->fcluster + offset;
111		*cached_dclus = cid->dcluster + offset;
112	}
113	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
114
115	return offset;
116}
117
118static struct fat_cache *fat_cache_merge(struct inode *inode,
119					 struct fat_cache_id *new)
120{
121	struct fat_cache *p;
122
123	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
124		/* Find the same part as "new" in cluster-chain. */
125		if (p->fcluster == new->fcluster) {
126			BUG_ON(p->dcluster != new->dcluster);
127			if (new->nr_contig > p->nr_contig)
128				p->nr_contig = new->nr_contig;
129			return p;
130		}
131	}
132	return NULL;
133}
134
135static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
136{
137	struct fat_cache *cache, *tmp;
138
139	if (new->fcluster == -1) /* dummy cache */
140		return;
141
142	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
143	if (new->id != FAT_CACHE_VALID &&
144	    new->id != MSDOS_I(inode)->cache_valid_id)
145		goto out;	/* this cache was invalidated */
146
147	cache = fat_cache_merge(inode, new);
148	if (cache == NULL) {
149		if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
150			MSDOS_I(inode)->nr_caches++;
151			spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
152
153			tmp = fat_cache_alloc(inode);
154			if (!tmp) {
155				spin_lock(&MSDOS_I(inode)->cache_lru_lock);
156				MSDOS_I(inode)->nr_caches--;
157				spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
158				return;
159			}
160
161			spin_lock(&MSDOS_I(inode)->cache_lru_lock);
162			cache = fat_cache_merge(inode, new);
163			if (cache != NULL) {
164				MSDOS_I(inode)->nr_caches--;
165				fat_cache_free(tmp);
166				goto out_update_lru;
167			}
168			cache = tmp;
169		} else {
170			struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
171			cache = list_entry(p, struct fat_cache, cache_list);
172		}
173		cache->fcluster = new->fcluster;
174		cache->dcluster = new->dcluster;
175		cache->nr_contig = new->nr_contig;
176	}
177out_update_lru:
178	fat_cache_update_lru(inode, cache);
179out:
180	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
181}
182
183/*
184 * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
185 * fixes itself after a while.
186 */
187static void __fat_cache_inval_inode(struct inode *inode)
188{
189	struct msdos_inode_info *i = MSDOS_I(inode);
190	struct fat_cache *cache;
191
192	while (!list_empty(&i->cache_lru)) {
193		cache = list_entry(i->cache_lru.next,
194				   struct fat_cache, cache_list);
195		list_del_init(&cache->cache_list);
196		i->nr_caches--;
197		fat_cache_free(cache);
198	}
199	/* Update. The copy of caches before this id is discarded. */
200	i->cache_valid_id++;
201	if (i->cache_valid_id == FAT_CACHE_VALID)
202		i->cache_valid_id++;
203}
204
205void fat_cache_inval_inode(struct inode *inode)
206{
207	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
208	__fat_cache_inval_inode(inode);
209	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
210}
211
212static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
213{
214	cid->nr_contig++;
215	return ((cid->dcluster + cid->nr_contig) == dclus);
216}
217
218static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
219{
220	cid->id = FAT_CACHE_VALID;
221	cid->fcluster = fclus;
222	cid->dcluster = dclus;
223	cid->nr_contig = 0;
224}
225
226int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
227{
228	struct super_block *sb = inode->i_sb;
229	const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
230	struct fat_entry fatent;
231	struct fat_cache_id cid;
232	int nr;
233
234	BUG_ON(MSDOS_I(inode)->i_start == 0);
235
236	*fclus = 0;
237	*dclus = MSDOS_I(inode)->i_start;
238	if (cluster == 0)
239		return 0;
240
241	if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
242		/*
243		 * dummy, always not contiguous
244		 * This is reinitialized by cache_init(), later.
245		 */
246		cache_init(&cid, -1, -1);
247	}
248
249	fatent_init(&fatent);
250	while (*fclus < cluster) {
251		/* prevent the infinite loop of cluster chain */
252		if (*fclus > limit) {
253			fat_fs_error_ratelimit(sb,
254					"%s: detected the cluster chain loop"
255					" (i_pos %lld)", __func__,
256					MSDOS_I(inode)->i_pos);
257			nr = -EIO;
258			goto out;
259		}
260
261		nr = fat_ent_read(inode, &fatent, *dclus);
262		if (nr < 0)
263			goto out;
264		else if (nr == FAT_ENT_FREE) {
265			fat_fs_error_ratelimit(sb,
266				       "%s: invalid cluster chain (i_pos %lld)",
267				       __func__,
268				       MSDOS_I(inode)->i_pos);
269			nr = -EIO;
270			goto out;
271		} else if (nr == FAT_ENT_EOF) {
272			fat_cache_add(inode, &cid);
273			goto out;
274		}
275		(*fclus)++;
276		*dclus = nr;
277		if (!cache_contiguous(&cid, *dclus))
278			cache_init(&cid, *fclus, *dclus);
279	}
280	nr = 0;
281	fat_cache_add(inode, &cid);
282out:
283	fatent_brelse(&fatent);
284	return nr;
285}
286
287static int fat_bmap_cluster(struct inode *inode, int cluster)
288{
289	struct super_block *sb = inode->i_sb;
290	int ret, fclus, dclus;
291
292	if (MSDOS_I(inode)->i_start == 0)
293		return 0;
294
295	ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
296	if (ret < 0)
297		return ret;
298	else if (ret == FAT_ENT_EOF) {
299		fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)",
300			     __func__, MSDOS_I(inode)->i_pos);
301		return -EIO;
302	}
303	return dclus;
304}
305
306int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
307	     unsigned long *mapped_blocks, int create)
 
308{
309	struct super_block *sb = inode->i_sb;
310	struct msdos_sb_info *sbi = MSDOS_SB(sb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311	const unsigned long blocksize = sb->s_blocksize;
312	const unsigned char blocksize_bits = sb->s_blocksize_bits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313	sector_t last_block;
314	int cluster, offset;
315
316	*phys = 0;
317	*mapped_blocks = 0;
318	if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) {
319		if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
320			*phys = sector + sbi->dir_start;
321			*mapped_blocks = 1;
322		}
323		return 0;
324	}
325
326	last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
327	if (sector >= last_block) {
328		if (!create)
329			return 0;
330
331		/*
332		 * ->mmu_private can access on only allocation path.
333		 * (caller must hold ->i_mutex)
334		 */
335		last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
336			>> blocksize_bits;
337		if (sector >= last_block)
338			return 0;
339	}
340
341	cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
342	offset  = sector & (sbi->sec_per_clus - 1);
343	cluster = fat_bmap_cluster(inode, cluster);
344	if (cluster < 0)
345		return cluster;
346	else if (cluster) {
347		*phys = fat_clus_to_blknr(sbi, cluster) + offset;
348		*mapped_blocks = sbi->sec_per_clus - offset;
349		if (*mapped_blocks > last_block - sector)
350			*mapped_blocks = last_block - sector;
351	}
352	return 0;
353}