Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/fat/cache.c
  4 *
  5 *  Written 1992,1993 by Werner Almesberger
  6 *
  7 *  Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
  8 *	of inode number.
  9 *  May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
 10 */
 11
 
 12#include <linux/slab.h>
 
 13#include "fat.h"
 14
 15/* this must be > 0. */
 16#define FAT_MAX_CACHE	8
 17
 18struct fat_cache {
 19	struct list_head cache_list;
 20	int nr_contig;	/* number of contiguous clusters */
 21	int fcluster;	/* cluster number in the file. */
 22	int dcluster;	/* cluster number on disk. */
 23};
 24
 25struct fat_cache_id {
 26	unsigned int id;
 27	int nr_contig;
 28	int fcluster;
 29	int dcluster;
 30};
 31
 32static inline int fat_max_cache(struct inode *inode)
 33{
 34	return FAT_MAX_CACHE;
 35}
 36
 37static struct kmem_cache *fat_cache_cachep;
 38
 39static void init_once(void *foo)
 40{
 41	struct fat_cache *cache = (struct fat_cache *)foo;
 42
 43	INIT_LIST_HEAD(&cache->cache_list);
 44}
 45
 46int __init fat_cache_init(void)
 47{
 48	fat_cache_cachep = kmem_cache_create("fat_cache",
 49				sizeof(struct fat_cache),
 50				0, SLAB_RECLAIM_ACCOUNT,
 51				init_once);
 52	if (fat_cache_cachep == NULL)
 53		return -ENOMEM;
 54	return 0;
 55}
 56
 57void fat_cache_destroy(void)
 58{
 59	kmem_cache_destroy(fat_cache_cachep);
 60}
 61
 62static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
 63{
 64	return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS);
 65}
 66
 67static inline void fat_cache_free(struct fat_cache *cache)
 68{
 69	BUG_ON(!list_empty(&cache->cache_list));
 70	kmem_cache_free(fat_cache_cachep, cache);
 71}
 72
 73static inline void fat_cache_update_lru(struct inode *inode,
 74					struct fat_cache *cache)
 75{
 76	if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
 77		list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
 78}
 79
 80static int fat_cache_lookup(struct inode *inode, int fclus,
 81			    struct fat_cache_id *cid,
 82			    int *cached_fclus, int *cached_dclus)
 83{
 84	static struct fat_cache nohit = { .fcluster = 0, };
 85
 86	struct fat_cache *hit = &nohit, *p;
 87	int offset = -1;
 88
 89	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
 90	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
 91		/* Find the cache of "fclus" or nearest cache. */
 92		if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
 93			hit = p;
 94			if ((hit->fcluster + hit->nr_contig) < fclus) {
 95				offset = hit->nr_contig;
 96			} else {
 97				offset = fclus - hit->fcluster;
 98				break;
 99			}
100		}
101	}
102	if (hit != &nohit) {
103		fat_cache_update_lru(inode, hit);
104
105		cid->id = MSDOS_I(inode)->cache_valid_id;
106		cid->nr_contig = hit->nr_contig;
107		cid->fcluster = hit->fcluster;
108		cid->dcluster = hit->dcluster;
109		*cached_fclus = cid->fcluster + offset;
110		*cached_dclus = cid->dcluster + offset;
111	}
112	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
113
114	return offset;
115}
116
117static struct fat_cache *fat_cache_merge(struct inode *inode,
118					 struct fat_cache_id *new)
119{
120	struct fat_cache *p;
121
122	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
123		/* Find the same part as "new" in cluster-chain. */
124		if (p->fcluster == new->fcluster) {
125			BUG_ON(p->dcluster != new->dcluster);
126			if (new->nr_contig > p->nr_contig)
127				p->nr_contig = new->nr_contig;
128			return p;
129		}
130	}
131	return NULL;
132}
133
134static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
135{
136	struct fat_cache *cache, *tmp;
137
138	if (new->fcluster == -1) /* dummy cache */
139		return;
140
141	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
142	if (new->id != FAT_CACHE_VALID &&
143	    new->id != MSDOS_I(inode)->cache_valid_id)
144		goto out;	/* this cache was invalidated */
145
146	cache = fat_cache_merge(inode, new);
147	if (cache == NULL) {
148		if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
149			MSDOS_I(inode)->nr_caches++;
150			spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
151
152			tmp = fat_cache_alloc(inode);
153			if (!tmp) {
154				spin_lock(&MSDOS_I(inode)->cache_lru_lock);
155				MSDOS_I(inode)->nr_caches--;
156				spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
157				return;
158			}
159
160			spin_lock(&MSDOS_I(inode)->cache_lru_lock);
161			cache = fat_cache_merge(inode, new);
162			if (cache != NULL) {
163				MSDOS_I(inode)->nr_caches--;
164				fat_cache_free(tmp);
165				goto out_update_lru;
166			}
167			cache = tmp;
168		} else {
169			struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
170			cache = list_entry(p, struct fat_cache, cache_list);
171		}
172		cache->fcluster = new->fcluster;
173		cache->dcluster = new->dcluster;
174		cache->nr_contig = new->nr_contig;
175	}
176out_update_lru:
177	fat_cache_update_lru(inode, cache);
178out:
179	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
180}
181
182/*
183 * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
184 * fixes itself after a while.
185 */
186static void __fat_cache_inval_inode(struct inode *inode)
187{
188	struct msdos_inode_info *i = MSDOS_I(inode);
189	struct fat_cache *cache;
190
191	while (!list_empty(&i->cache_lru)) {
192		cache = list_entry(i->cache_lru.next,
193				   struct fat_cache, cache_list);
194		list_del_init(&cache->cache_list);
195		i->nr_caches--;
196		fat_cache_free(cache);
197	}
198	/* Update. The copy of caches before this id is discarded. */
199	i->cache_valid_id++;
200	if (i->cache_valid_id == FAT_CACHE_VALID)
201		i->cache_valid_id++;
202}
203
204void fat_cache_inval_inode(struct inode *inode)
205{
206	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
207	__fat_cache_inval_inode(inode);
208	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
209}
210
211static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
212{
213	cid->nr_contig++;
214	return ((cid->dcluster + cid->nr_contig) == dclus);
215}
216
217static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
218{
219	cid->id = FAT_CACHE_VALID;
220	cid->fcluster = fclus;
221	cid->dcluster = dclus;
222	cid->nr_contig = 0;
223}
224
225int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
226{
227	struct super_block *sb = inode->i_sb;
228	struct msdos_sb_info *sbi = MSDOS_SB(sb);
229	const int limit = sb->s_maxbytes >> sbi->cluster_bits;
230	struct fat_entry fatent;
231	struct fat_cache_id cid;
232	int nr;
233
234	BUG_ON(MSDOS_I(inode)->i_start == 0);
235
236	*fclus = 0;
237	*dclus = MSDOS_I(inode)->i_start;
238	if (!fat_valid_entry(sbi, *dclus)) {
239		fat_fs_error_ratelimit(sb,
240			"%s: invalid start cluster (i_pos %lld, start %08x)",
241			__func__, MSDOS_I(inode)->i_pos, *dclus);
242		return -EIO;
243	}
244	if (cluster == 0)
245		return 0;
246
247	if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
248		/*
249		 * dummy, always not contiguous
250		 * This is reinitialized by cache_init(), later.
251		 */
252		cache_init(&cid, -1, -1);
253	}
254
255	fatent_init(&fatent);
256	while (*fclus < cluster) {
257		/* prevent the infinite loop of cluster chain */
258		if (*fclus > limit) {
259			fat_fs_error_ratelimit(sb,
260				"%s: detected the cluster chain loop (i_pos %lld)",
261				__func__, MSDOS_I(inode)->i_pos);
 
262			nr = -EIO;
263			goto out;
264		}
265
266		nr = fat_ent_read(inode, &fatent, *dclus);
267		if (nr < 0)
268			goto out;
269		else if (nr == FAT_ENT_FREE) {
270			fat_fs_error_ratelimit(sb,
271				"%s: invalid cluster chain (i_pos %lld)",
272				__func__, MSDOS_I(inode)->i_pos);
273			nr = -EIO;
274			goto out;
275		} else if (nr == FAT_ENT_EOF) {
276			fat_cache_add(inode, &cid);
277			goto out;
278		}
279		(*fclus)++;
280		*dclus = nr;
281		if (!cache_contiguous(&cid, *dclus))
282			cache_init(&cid, *fclus, *dclus);
283	}
284	nr = 0;
285	fat_cache_add(inode, &cid);
286out:
287	fatent_brelse(&fatent);
288	return nr;
289}
290
291static int fat_bmap_cluster(struct inode *inode, int cluster)
292{
293	struct super_block *sb = inode->i_sb;
294	int ret, fclus, dclus;
295
296	if (MSDOS_I(inode)->i_start == 0)
297		return 0;
298
299	ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
300	if (ret < 0)
301		return ret;
302	else if (ret == FAT_ENT_EOF) {
303		fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)",
304			     __func__, MSDOS_I(inode)->i_pos);
305		return -EIO;
306	}
307	return dclus;
308}
309
310int fat_get_mapped_cluster(struct inode *inode, sector_t sector,
311			   sector_t last_block,
312			   unsigned long *mapped_blocks, sector_t *bmap)
313{
314	struct super_block *sb = inode->i_sb;
315	struct msdos_sb_info *sbi = MSDOS_SB(sb);
316	int cluster, offset;
317
318	cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
319	offset  = sector & (sbi->sec_per_clus - 1);
320	cluster = fat_bmap_cluster(inode, cluster);
321	if (cluster < 0)
322		return cluster;
323	else if (cluster) {
324		*bmap = fat_clus_to_blknr(sbi, cluster) + offset;
325		*mapped_blocks = sbi->sec_per_clus - offset;
326		if (*mapped_blocks > last_block - sector)
327			*mapped_blocks = last_block - sector;
328	}
329
330	return 0;
331}
332
333static int is_exceed_eof(struct inode *inode, sector_t sector,
334			 sector_t *last_block, int create)
335{
336	struct super_block *sb = inode->i_sb;
337	const unsigned long blocksize = sb->s_blocksize;
338	const unsigned char blocksize_bits = sb->s_blocksize_bits;
339
340	*last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
341	if (sector >= *last_block) {
342		if (!create)
343			return 1;
344
345		/*
346		 * ->mmu_private can access on only allocation path.
347		 * (caller must hold ->i_mutex)
348		 */
349		*last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
350			>> blocksize_bits;
351		if (sector >= *last_block)
352			return 1;
353	}
354
355	return 0;
356}
357
358int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
359	     unsigned long *mapped_blocks, int create, bool from_bmap)
360{
361	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
362	sector_t last_block;
 
363
364	*phys = 0;
365	*mapped_blocks = 0;
366	if (!is_fat32(sbi) && (inode->i_ino == MSDOS_ROOT_INO)) {
367		if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
368			*phys = sector + sbi->dir_start;
369			*mapped_blocks = 1;
370		}
371		return 0;
372	}
373
374	if (!from_bmap) {
375		if (is_exceed_eof(inode, sector, &last_block, create))
 
376			return 0;
377	} else {
378		last_block = inode->i_blocks >>
379				(inode->i_sb->s_blocksize_bits - 9);
 
 
 
 
380		if (sector >= last_block)
381			return 0;
382	}
383
384	return fat_get_mapped_cluster(inode, sector, last_block, mapped_blocks,
385				      phys);
 
 
 
 
 
 
 
 
 
 
386}
v3.5.6
 
  1/*
  2 *  linux/fs/fat/cache.c
  3 *
  4 *  Written 1992,1993 by Werner Almesberger
  5 *
  6 *  Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
  7 *	of inode number.
  8 *  May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
  9 */
 10
 11#include <linux/fs.h>
 12#include <linux/slab.h>
 13#include <linux/buffer_head.h>
 14#include "fat.h"
 15
 16/* this must be > 0. */
 17#define FAT_MAX_CACHE	8
 18
 19struct fat_cache {
 20	struct list_head cache_list;
 21	int nr_contig;	/* number of contiguous clusters */
 22	int fcluster;	/* cluster number in the file. */
 23	int dcluster;	/* cluster number on disk. */
 24};
 25
 26struct fat_cache_id {
 27	unsigned int id;
 28	int nr_contig;
 29	int fcluster;
 30	int dcluster;
 31};
 32
 33static inline int fat_max_cache(struct inode *inode)
 34{
 35	return FAT_MAX_CACHE;
 36}
 37
 38static struct kmem_cache *fat_cache_cachep;
 39
 40static void init_once(void *foo)
 41{
 42	struct fat_cache *cache = (struct fat_cache *)foo;
 43
 44	INIT_LIST_HEAD(&cache->cache_list);
 45}
 46
 47int __init fat_cache_init(void)
 48{
 49	fat_cache_cachep = kmem_cache_create("fat_cache",
 50				sizeof(struct fat_cache),
 51				0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
 52				init_once);
 53	if (fat_cache_cachep == NULL)
 54		return -ENOMEM;
 55	return 0;
 56}
 57
 58void fat_cache_destroy(void)
 59{
 60	kmem_cache_destroy(fat_cache_cachep);
 61}
 62
 63static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
 64{
 65	return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS);
 66}
 67
 68static inline void fat_cache_free(struct fat_cache *cache)
 69{
 70	BUG_ON(!list_empty(&cache->cache_list));
 71	kmem_cache_free(fat_cache_cachep, cache);
 72}
 73
 74static inline void fat_cache_update_lru(struct inode *inode,
 75					struct fat_cache *cache)
 76{
 77	if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
 78		list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
 79}
 80
 81static int fat_cache_lookup(struct inode *inode, int fclus,
 82			    struct fat_cache_id *cid,
 83			    int *cached_fclus, int *cached_dclus)
 84{
 85	static struct fat_cache nohit = { .fcluster = 0, };
 86
 87	struct fat_cache *hit = &nohit, *p;
 88	int offset = -1;
 89
 90	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
 91	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
 92		/* Find the cache of "fclus" or nearest cache. */
 93		if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
 94			hit = p;
 95			if ((hit->fcluster + hit->nr_contig) < fclus) {
 96				offset = hit->nr_contig;
 97			} else {
 98				offset = fclus - hit->fcluster;
 99				break;
100			}
101		}
102	}
103	if (hit != &nohit) {
104		fat_cache_update_lru(inode, hit);
105
106		cid->id = MSDOS_I(inode)->cache_valid_id;
107		cid->nr_contig = hit->nr_contig;
108		cid->fcluster = hit->fcluster;
109		cid->dcluster = hit->dcluster;
110		*cached_fclus = cid->fcluster + offset;
111		*cached_dclus = cid->dcluster + offset;
112	}
113	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
114
115	return offset;
116}
117
118static struct fat_cache *fat_cache_merge(struct inode *inode,
119					 struct fat_cache_id *new)
120{
121	struct fat_cache *p;
122
123	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
124		/* Find the same part as "new" in cluster-chain. */
125		if (p->fcluster == new->fcluster) {
126			BUG_ON(p->dcluster != new->dcluster);
127			if (new->nr_contig > p->nr_contig)
128				p->nr_contig = new->nr_contig;
129			return p;
130		}
131	}
132	return NULL;
133}
134
135static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
136{
137	struct fat_cache *cache, *tmp;
138
139	if (new->fcluster == -1) /* dummy cache */
140		return;
141
142	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
143	if (new->id != FAT_CACHE_VALID &&
144	    new->id != MSDOS_I(inode)->cache_valid_id)
145		goto out;	/* this cache was invalidated */
146
147	cache = fat_cache_merge(inode, new);
148	if (cache == NULL) {
149		if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
150			MSDOS_I(inode)->nr_caches++;
151			spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
152
153			tmp = fat_cache_alloc(inode);
154			if (!tmp) {
155				spin_lock(&MSDOS_I(inode)->cache_lru_lock);
156				MSDOS_I(inode)->nr_caches--;
157				spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
158				return;
159			}
160
161			spin_lock(&MSDOS_I(inode)->cache_lru_lock);
162			cache = fat_cache_merge(inode, new);
163			if (cache != NULL) {
164				MSDOS_I(inode)->nr_caches--;
165				fat_cache_free(tmp);
166				goto out_update_lru;
167			}
168			cache = tmp;
169		} else {
170			struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
171			cache = list_entry(p, struct fat_cache, cache_list);
172		}
173		cache->fcluster = new->fcluster;
174		cache->dcluster = new->dcluster;
175		cache->nr_contig = new->nr_contig;
176	}
177out_update_lru:
178	fat_cache_update_lru(inode, cache);
179out:
180	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
181}
182
183/*
184 * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
185 * fixes itself after a while.
186 */
187static void __fat_cache_inval_inode(struct inode *inode)
188{
189	struct msdos_inode_info *i = MSDOS_I(inode);
190	struct fat_cache *cache;
191
192	while (!list_empty(&i->cache_lru)) {
193		cache = list_entry(i->cache_lru.next, struct fat_cache, cache_list);
 
194		list_del_init(&cache->cache_list);
195		i->nr_caches--;
196		fat_cache_free(cache);
197	}
198	/* Update. The copy of caches before this id is discarded. */
199	i->cache_valid_id++;
200	if (i->cache_valid_id == FAT_CACHE_VALID)
201		i->cache_valid_id++;
202}
203
204void fat_cache_inval_inode(struct inode *inode)
205{
206	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
207	__fat_cache_inval_inode(inode);
208	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
209}
210
211static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
212{
213	cid->nr_contig++;
214	return ((cid->dcluster + cid->nr_contig) == dclus);
215}
216
217static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
218{
219	cid->id = FAT_CACHE_VALID;
220	cid->fcluster = fclus;
221	cid->dcluster = dclus;
222	cid->nr_contig = 0;
223}
224
225int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
226{
227	struct super_block *sb = inode->i_sb;
228	const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
 
229	struct fat_entry fatent;
230	struct fat_cache_id cid;
231	int nr;
232
233	BUG_ON(MSDOS_I(inode)->i_start == 0);
234
235	*fclus = 0;
236	*dclus = MSDOS_I(inode)->i_start;
 
 
 
 
 
 
237	if (cluster == 0)
238		return 0;
239
240	if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
241		/*
242		 * dummy, always not contiguous
243		 * This is reinitialized by cache_init(), later.
244		 */
245		cache_init(&cid, -1, -1);
246	}
247
248	fatent_init(&fatent);
249	while (*fclus < cluster) {
250		/* prevent the infinite loop of cluster chain */
251		if (*fclus > limit) {
252			fat_fs_error_ratelimit(sb,
253					"%s: detected the cluster chain loop"
254					" (i_pos %lld)", __func__,
255					MSDOS_I(inode)->i_pos);
256			nr = -EIO;
257			goto out;
258		}
259
260		nr = fat_ent_read(inode, &fatent, *dclus);
261		if (nr < 0)
262			goto out;
263		else if (nr == FAT_ENT_FREE) {
264			fat_fs_error_ratelimit(sb, "%s: invalid cluster chain"
265					       " (i_pos %lld)", __func__,
266					       MSDOS_I(inode)->i_pos);
267			nr = -EIO;
268			goto out;
269		} else if (nr == FAT_ENT_EOF) {
270			fat_cache_add(inode, &cid);
271			goto out;
272		}
273		(*fclus)++;
274		*dclus = nr;
275		if (!cache_contiguous(&cid, *dclus))
276			cache_init(&cid, *fclus, *dclus);
277	}
278	nr = 0;
279	fat_cache_add(inode, &cid);
280out:
281	fatent_brelse(&fatent);
282	return nr;
283}
284
285static int fat_bmap_cluster(struct inode *inode, int cluster)
286{
287	struct super_block *sb = inode->i_sb;
288	int ret, fclus, dclus;
289
290	if (MSDOS_I(inode)->i_start == 0)
291		return 0;
292
293	ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
294	if (ret < 0)
295		return ret;
296	else if (ret == FAT_ENT_EOF) {
297		fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)",
298			     __func__, MSDOS_I(inode)->i_pos);
299		return -EIO;
300	}
301	return dclus;
302}
303
304int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
305	     unsigned long *mapped_blocks, int create)
 
306{
307	struct super_block *sb = inode->i_sb;
308	struct msdos_sb_info *sbi = MSDOS_SB(sb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309	const unsigned long blocksize = sb->s_blocksize;
310	const unsigned char blocksize_bits = sb->s_blocksize_bits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311	sector_t last_block;
312	int cluster, offset;
313
314	*phys = 0;
315	*mapped_blocks = 0;
316	if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) {
317		if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
318			*phys = sector + sbi->dir_start;
319			*mapped_blocks = 1;
320		}
321		return 0;
322	}
323
324	last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
325	if (sector >= last_block) {
326		if (!create)
327			return 0;
328
329		/*
330		 * ->mmu_private can access on only allocation path.
331		 * (caller must hold ->i_mutex)
332		 */
333		last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
334			>> blocksize_bits;
335		if (sector >= last_block)
336			return 0;
337	}
338
339	cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
340	offset  = sector & (sbi->sec_per_clus - 1);
341	cluster = fat_bmap_cluster(inode, cluster);
342	if (cluster < 0)
343		return cluster;
344	else if (cluster) {
345		*phys = fat_clus_to_blknr(sbi, cluster) + offset;
346		*mapped_blocks = sbi->sec_per_clus - offset;
347		if (*mapped_blocks > last_block - sector)
348			*mapped_blocks = last_block - sector;
349	}
350	return 0;
351}