Linux Audio

Check our new training course

Loading...
  1/*
  2 * partition.c
  3 *
  4 * PURPOSE
  5 *      Partition handling routines for the OSTA-UDF(tm) filesystem.
  6 *
  7 * COPYRIGHT
  8 *      This file is distributed under the terms of the GNU General Public
  9 *      License (GPL). Copies of the GPL can be obtained from:
 10 *              ftp://prep.ai.mit.edu/pub/gnu/GPL
 11 *      Each contributing author retains all rights to their own work.
 12 *
 13 *  (C) 1998-2001 Ben Fennema
 14 *
 15 * HISTORY
 16 *
 17 * 12/06/98 blf  Created file.
 18 *
 19 */
 20
 21#include "udfdecl.h"
 22#include "udf_sb.h"
 23#include "udf_i.h"
 24
 25#include <linux/fs.h>
 26#include <linux/string.h>
 27#include <linux/mutex.h>
 28
 29uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
 30			uint16_t partition, uint32_t offset)
 31{
 32	struct udf_sb_info *sbi = UDF_SB(sb);
 33	struct udf_part_map *map;
 34	if (partition >= sbi->s_partitions) {
 35		udf_debug("block=%u, partition=%u, offset=%u: invalid partition\n",
 36			  block, partition, offset);
 37		return 0xFFFFFFFF;
 38	}
 39	map = &sbi->s_partmaps[partition];
 40	if (map->s_partition_func)
 41		return map->s_partition_func(sb, block, partition, offset);
 42	else
 43		return map->s_partition_root + block + offset;
 44}
 45
 46uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
 47			       uint16_t partition, uint32_t offset)
 48{
 49	struct buffer_head *bh = NULL;
 50	uint32_t newblock;
 51	uint32_t index;
 52	uint32_t loc;
 53	struct udf_sb_info *sbi = UDF_SB(sb);
 54	struct udf_part_map *map;
 55	struct udf_virtual_data *vdata;
 56	struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode);
 57
 58	map = &sbi->s_partmaps[partition];
 59	vdata = &map->s_type_specific.s_virtual;
 60
 61	if (block > vdata->s_num_entries) {
 62		udf_debug("Trying to access block beyond end of VAT (%u max %u)\n",
 63			  block, vdata->s_num_entries);
 64		return 0xFFFFFFFF;
 65	}
 66
 67	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 68		loc = le32_to_cpu(((__le32 *)(iinfo->i_ext.i_data +
 69			vdata->s_start_offset))[block]);
 70		goto translate;
 71	}
 72	index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
 73	if (block >= index) {
 74		block -= index;
 75		newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
 76		index = block % (sb->s_blocksize / sizeof(uint32_t));
 77	} else {
 78		newblock = 0;
 79		index = vdata->s_start_offset / sizeof(uint32_t) + block;
 80	}
 81
 82	loc = udf_block_map(sbi->s_vat_inode, newblock);
 83
 84	bh = sb_bread(sb, loc);
 85	if (!bh) {
 86		udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%u,%u) VAT: %u[%u]\n",
 87			  sb, block, partition, loc, index);
 88		return 0xFFFFFFFF;
 89	}
 90
 91	loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
 92
 93	brelse(bh);
 94
 95translate:
 96	if (iinfo->i_location.partitionReferenceNum == partition) {
 97		udf_debug("recursive call to udf_get_pblock!\n");
 98		return 0xFFFFFFFF;
 99	}
100
101	return udf_get_pblock(sb, loc,
102			      iinfo->i_location.partitionReferenceNum,
103			      offset);
104}
105
106inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block,
107				      uint16_t partition, uint32_t offset)
108{
109	return udf_get_pblock_virt15(sb, block, partition, offset);
110}
111
112uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
113			       uint16_t partition, uint32_t offset)
114{
115	int i;
116	struct sparingTable *st = NULL;
117	struct udf_sb_info *sbi = UDF_SB(sb);
118	struct udf_part_map *map;
119	uint32_t packet;
120	struct udf_sparing_data *sdata;
121
122	map = &sbi->s_partmaps[partition];
123	sdata = &map->s_type_specific.s_sparing;
124	packet = (block + offset) & ~(sdata->s_packet_len - 1);
125
126	for (i = 0; i < 4; i++) {
127		if (sdata->s_spar_map[i] != NULL) {
128			st = (struct sparingTable *)
129					sdata->s_spar_map[i]->b_data;
130			break;
131		}
132	}
133
134	if (st) {
135		for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
136			struct sparingEntry *entry = &st->mapEntry[i];
137			u32 origLoc = le32_to_cpu(entry->origLocation);
138			if (origLoc >= 0xFFFFFFF0)
139				break;
140			else if (origLoc == packet)
141				return le32_to_cpu(entry->mappedLocation) +
142					((block + offset) &
143						(sdata->s_packet_len - 1));
144			else if (origLoc > packet)
145				break;
146		}
147	}
148
149	return map->s_partition_root + block + offset;
150}
151
152int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
153{
154	struct udf_sparing_data *sdata;
155	struct sparingTable *st = NULL;
156	struct sparingEntry mapEntry;
157	uint32_t packet;
158	int i, j, k, l;
159	struct udf_sb_info *sbi = UDF_SB(sb);
160	u16 reallocationTableLen;
161	struct buffer_head *bh;
162	int ret = 0;
163
164	mutex_lock(&sbi->s_alloc_mutex);
165	for (i = 0; i < sbi->s_partitions; i++) {
166		struct udf_part_map *map = &sbi->s_partmaps[i];
167		if (old_block > map->s_partition_root &&
168		    old_block < map->s_partition_root + map->s_partition_len) {
169			sdata = &map->s_type_specific.s_sparing;
170			packet = (old_block - map->s_partition_root) &
171						~(sdata->s_packet_len - 1);
172
173			for (j = 0; j < 4; j++)
174				if (sdata->s_spar_map[j] != NULL) {
175					st = (struct sparingTable *)
176						sdata->s_spar_map[j]->b_data;
177					break;
178				}
179
180			if (!st) {
181				ret = 1;
182				goto out;
183			}
184
185			reallocationTableLen =
186					le16_to_cpu(st->reallocationTableLen);
187			for (k = 0; k < reallocationTableLen; k++) {
188				struct sparingEntry *entry = &st->mapEntry[k];
189				u32 origLoc = le32_to_cpu(entry->origLocation);
190
191				if (origLoc == 0xFFFFFFFF) {
192					for (; j < 4; j++) {
193						int len;
194						bh = sdata->s_spar_map[j];
195						if (!bh)
196							continue;
197
198						st = (struct sparingTable *)
199								bh->b_data;
200						entry->origLocation =
201							cpu_to_le32(packet);
202						len =
203						  sizeof(struct sparingTable) +
204						  reallocationTableLen *
205						  sizeof(struct sparingEntry);
206						udf_update_tag((char *)st, len);
207						mark_buffer_dirty(bh);
208					}
209					*new_block = le32_to_cpu(
210							entry->mappedLocation) +
211						     ((old_block -
212							map->s_partition_root) &
213						     (sdata->s_packet_len - 1));
214					ret = 0;
215					goto out;
216				} else if (origLoc == packet) {
217					*new_block = le32_to_cpu(
218							entry->mappedLocation) +
219						     ((old_block -
220							map->s_partition_root) &
221						     (sdata->s_packet_len - 1));
222					ret = 0;
223					goto out;
224				} else if (origLoc > packet)
225					break;
226			}
227
228			for (l = k; l < reallocationTableLen; l++) {
229				struct sparingEntry *entry = &st->mapEntry[l];
230				u32 origLoc = le32_to_cpu(entry->origLocation);
231
232				if (origLoc != 0xFFFFFFFF)
233					continue;
234
235				for (; j < 4; j++) {
236					bh = sdata->s_spar_map[j];
237					if (!bh)
238						continue;
239
240					st = (struct sparingTable *)bh->b_data;
241					mapEntry = st->mapEntry[l];
242					mapEntry.origLocation =
243							cpu_to_le32(packet);
244					memmove(&st->mapEntry[k + 1],
245						&st->mapEntry[k],
246						(l - k) *
247						sizeof(struct sparingEntry));
248					st->mapEntry[k] = mapEntry;
249					udf_update_tag((char *)st,
250						sizeof(struct sparingTable) +
251						reallocationTableLen *
252						sizeof(struct sparingEntry));
253					mark_buffer_dirty(bh);
254				}
255				*new_block =
256					le32_to_cpu(
257					      st->mapEntry[k].mappedLocation) +
258					((old_block - map->s_partition_root) &
259					 (sdata->s_packet_len - 1));
260				ret = 0;
261				goto out;
262			}
263
264			ret = 1;
265			goto out;
266		} /* if old_block */
267	}
268
269	if (i == sbi->s_partitions) {
270		/* outside of partitions */
271		/* for now, fail =) */
272		ret = 1;
273	}
274
275out:
276	mutex_unlock(&sbi->s_alloc_mutex);
277	return ret;
278}
279
280static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
281					uint16_t partition, uint32_t offset)
282{
283	struct super_block *sb = inode->i_sb;
284	struct udf_part_map *map;
285	struct kernel_lb_addr eloc;
286	uint32_t elen;
287	sector_t ext_offset;
288	struct extent_position epos = {};
289	uint32_t phyblock;
290
291	if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) !=
292						(EXT_RECORDED_ALLOCATED >> 30))
293		phyblock = 0xFFFFFFFF;
294	else {
295		map = &UDF_SB(sb)->s_partmaps[partition];
296		/* map to sparable/physical partition desc */
297		phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
298			map->s_type_specific.s_metadata.s_phys_partition_ref,
299			ext_offset + offset);
300	}
301
302	brelse(epos.bh);
303	return phyblock;
304}
305
306uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
307				uint16_t partition, uint32_t offset)
308{
309	struct udf_sb_info *sbi = UDF_SB(sb);
310	struct udf_part_map *map;
311	struct udf_meta_data *mdata;
312	uint32_t retblk;
313	struct inode *inode;
314
315	udf_debug("READING from METADATA\n");
316
317	map = &sbi->s_partmaps[partition];
318	mdata = &map->s_type_specific.s_metadata;
319	inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
320
321	if (!inode)
322		return 0xFFFFFFFF;
323
324	retblk = udf_try_read_meta(inode, block, partition, offset);
325	if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
326		udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
327		if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
328			mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
329				mdata->s_mirror_file_loc,
330				mdata->s_phys_partition_ref);
331			if (IS_ERR(mdata->s_mirror_fe))
332				mdata->s_mirror_fe = NULL;
333			mdata->s_flags |= MF_MIRROR_FE_LOADED;
334		}
335
336		inode = mdata->s_mirror_fe;
337		if (!inode)
338			return 0xFFFFFFFF;
339		retblk = udf_try_read_meta(inode, block, partition, offset);
340	}
341
342	return retblk;
343}
  1/*
  2 * partition.c
  3 *
  4 * PURPOSE
  5 *      Partition handling routines for the OSTA-UDF(tm) filesystem.
  6 *
  7 * COPYRIGHT
  8 *      This file is distributed under the terms of the GNU General Public
  9 *      License (GPL). Copies of the GPL can be obtained from:
 10 *              ftp://prep.ai.mit.edu/pub/gnu/GPL
 11 *      Each contributing author retains all rights to their own work.
 12 *
 13 *  (C) 1998-2001 Ben Fennema
 14 *
 15 * HISTORY
 16 *
 17 * 12/06/98 blf  Created file.
 18 *
 19 */
 20
 21#include "udfdecl.h"
 22#include "udf_sb.h"
 23#include "udf_i.h"
 24
 25#include <linux/fs.h>
 26#include <linux/string.h>
 27#include <linux/mutex.h>
 28
 29uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
 30			uint16_t partition, uint32_t offset)
 31{
 32	struct udf_sb_info *sbi = UDF_SB(sb);
 33	struct udf_part_map *map;
 34	if (partition >= sbi->s_partitions) {
 35		udf_debug("block=%u, partition=%u, offset=%u: invalid partition\n",
 36			  block, partition, offset);
 37		return 0xFFFFFFFF;
 38	}
 39	map = &sbi->s_partmaps[partition];
 40	if (map->s_partition_func)
 41		return map->s_partition_func(sb, block, partition, offset);
 42	else
 43		return map->s_partition_root + block + offset;
 44}
 45
 46uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
 47			       uint16_t partition, uint32_t offset)
 48{
 49	struct buffer_head *bh = NULL;
 50	uint32_t newblock;
 51	uint32_t index;
 52	uint32_t loc;
 53	struct udf_sb_info *sbi = UDF_SB(sb);
 54	struct udf_part_map *map;
 55	struct udf_virtual_data *vdata;
 56	struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode);
 57
 58	map = &sbi->s_partmaps[partition];
 59	vdata = &map->s_type_specific.s_virtual;
 60
 61	if (block > vdata->s_num_entries) {
 62		udf_debug("Trying to access block beyond end of VAT (%u max %u)\n",
 63			  block, vdata->s_num_entries);
 64		return 0xFFFFFFFF;
 65	}
 66
 67	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 68		loc = le32_to_cpu(((__le32 *)(iinfo->i_ext.i_data +
 69			vdata->s_start_offset))[block]);
 70		goto translate;
 71	}
 72	index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
 73	if (block >= index) {
 74		block -= index;
 75		newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
 76		index = block % (sb->s_blocksize / sizeof(uint32_t));
 77	} else {
 78		newblock = 0;
 79		index = vdata->s_start_offset / sizeof(uint32_t) + block;
 80	}
 81
 82	loc = udf_block_map(sbi->s_vat_inode, newblock);
 83
 84	bh = sb_bread(sb, loc);
 85	if (!bh) {
 86		udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%u,%u) VAT: %u[%u]\n",
 87			  sb, block, partition, loc, index);
 88		return 0xFFFFFFFF;
 89	}
 90
 91	loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
 92
 93	brelse(bh);
 94
 95translate:
 96	if (iinfo->i_location.partitionReferenceNum == partition) {
 97		udf_debug("recursive call to udf_get_pblock!\n");
 98		return 0xFFFFFFFF;
 99	}
100
101	return udf_get_pblock(sb, loc,
102			      iinfo->i_location.partitionReferenceNum,
103			      offset);
104}
105
106inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block,
107				      uint16_t partition, uint32_t offset)
108{
109	return udf_get_pblock_virt15(sb, block, partition, offset);
110}
111
112uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
113			       uint16_t partition, uint32_t offset)
114{
115	int i;
116	struct sparingTable *st = NULL;
117	struct udf_sb_info *sbi = UDF_SB(sb);
118	struct udf_part_map *map;
119	uint32_t packet;
120	struct udf_sparing_data *sdata;
121
122	map = &sbi->s_partmaps[partition];
123	sdata = &map->s_type_specific.s_sparing;
124	packet = (block + offset) & ~(sdata->s_packet_len - 1);
125
126	for (i = 0; i < 4; i++) {
127		if (sdata->s_spar_map[i] != NULL) {
128			st = (struct sparingTable *)
129					sdata->s_spar_map[i]->b_data;
130			break;
131		}
132	}
133
134	if (st) {
135		for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
136			struct sparingEntry *entry = &st->mapEntry[i];
137			u32 origLoc = le32_to_cpu(entry->origLocation);
138			if (origLoc >= 0xFFFFFFF0)
139				break;
140			else if (origLoc == packet)
141				return le32_to_cpu(entry->mappedLocation) +
142					((block + offset) &
143						(sdata->s_packet_len - 1));
144			else if (origLoc > packet)
145				break;
146		}
147	}
148
149	return map->s_partition_root + block + offset;
150}
151
152int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
153{
154	struct udf_sparing_data *sdata;
155	struct sparingTable *st = NULL;
156	struct sparingEntry mapEntry;
157	uint32_t packet;
158	int i, j, k, l;
159	struct udf_sb_info *sbi = UDF_SB(sb);
160	u16 reallocationTableLen;
161	struct buffer_head *bh;
162	int ret = 0;
163
164	mutex_lock(&sbi->s_alloc_mutex);
165	for (i = 0; i < sbi->s_partitions; i++) {
166		struct udf_part_map *map = &sbi->s_partmaps[i];
167		if (old_block > map->s_partition_root &&
168		    old_block < map->s_partition_root + map->s_partition_len) {
169			sdata = &map->s_type_specific.s_sparing;
170			packet = (old_block - map->s_partition_root) &
171						~(sdata->s_packet_len - 1);
172
173			for (j = 0; j < 4; j++)
174				if (sdata->s_spar_map[j] != NULL) {
175					st = (struct sparingTable *)
176						sdata->s_spar_map[j]->b_data;
177					break;
178				}
179
180			if (!st) {
181				ret = 1;
182				goto out;
183			}
184
185			reallocationTableLen =
186					le16_to_cpu(st->reallocationTableLen);
187			for (k = 0; k < reallocationTableLen; k++) {
188				struct sparingEntry *entry = &st->mapEntry[k];
189				u32 origLoc = le32_to_cpu(entry->origLocation);
190
191				if (origLoc == 0xFFFFFFFF) {
192					for (; j < 4; j++) {
193						int len;
194						bh = sdata->s_spar_map[j];
195						if (!bh)
196							continue;
197
198						st = (struct sparingTable *)
199								bh->b_data;
200						entry->origLocation =
201							cpu_to_le32(packet);
202						len =
203						  sizeof(struct sparingTable) +
204						  reallocationTableLen *
205						  sizeof(struct sparingEntry);
206						udf_update_tag((char *)st, len);
207						mark_buffer_dirty(bh);
208					}
209					*new_block = le32_to_cpu(
210							entry->mappedLocation) +
211						     ((old_block -
212							map->s_partition_root) &
213						     (sdata->s_packet_len - 1));
214					ret = 0;
215					goto out;
216				} else if (origLoc == packet) {
217					*new_block = le32_to_cpu(
218							entry->mappedLocation) +
219						     ((old_block -
220							map->s_partition_root) &
221						     (sdata->s_packet_len - 1));
222					ret = 0;
223					goto out;
224				} else if (origLoc > packet)
225					break;
226			}
227
228			for (l = k; l < reallocationTableLen; l++) {
229				struct sparingEntry *entry = &st->mapEntry[l];
230				u32 origLoc = le32_to_cpu(entry->origLocation);
231
232				if (origLoc != 0xFFFFFFFF)
233					continue;
234
235				for (; j < 4; j++) {
236					bh = sdata->s_spar_map[j];
237					if (!bh)
238						continue;
239
240					st = (struct sparingTable *)bh->b_data;
241					mapEntry = st->mapEntry[l];
242					mapEntry.origLocation =
243							cpu_to_le32(packet);
244					memmove(&st->mapEntry[k + 1],
245						&st->mapEntry[k],
246						(l - k) *
247						sizeof(struct sparingEntry));
248					st->mapEntry[k] = mapEntry;
249					udf_update_tag((char *)st,
250						sizeof(struct sparingTable) +
251						reallocationTableLen *
252						sizeof(struct sparingEntry));
253					mark_buffer_dirty(bh);
254				}
255				*new_block =
256					le32_to_cpu(
257					      st->mapEntry[k].mappedLocation) +
258					((old_block - map->s_partition_root) &
259					 (sdata->s_packet_len - 1));
260				ret = 0;
261				goto out;
262			}
263
264			ret = 1;
265			goto out;
266		} /* if old_block */
267	}
268
269	if (i == sbi->s_partitions) {
270		/* outside of partitions */
271		/* for now, fail =) */
272		ret = 1;
273	}
274
275out:
276	mutex_unlock(&sbi->s_alloc_mutex);
277	return ret;
278}
279
280static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
281					uint16_t partition, uint32_t offset)
282{
283	struct super_block *sb = inode->i_sb;
284	struct udf_part_map *map;
285	struct kernel_lb_addr eloc;
286	uint32_t elen;
287	sector_t ext_offset;
288	struct extent_position epos = {};
289	uint32_t phyblock;
290
291	if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) !=
292						(EXT_RECORDED_ALLOCATED >> 30))
293		phyblock = 0xFFFFFFFF;
294	else {
295		map = &UDF_SB(sb)->s_partmaps[partition];
296		/* map to sparable/physical partition desc */
297		phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
298			map->s_type_specific.s_metadata.s_phys_partition_ref,
299			ext_offset + offset);
300	}
301
302	brelse(epos.bh);
303	return phyblock;
304}
305
306uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
307				uint16_t partition, uint32_t offset)
308{
309	struct udf_sb_info *sbi = UDF_SB(sb);
310	struct udf_part_map *map;
311	struct udf_meta_data *mdata;
312	uint32_t retblk;
313	struct inode *inode;
314
315	udf_debug("READING from METADATA\n");
316
317	map = &sbi->s_partmaps[partition];
318	mdata = &map->s_type_specific.s_metadata;
319	inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
320
321	if (!inode)
322		return 0xFFFFFFFF;
323
324	retblk = udf_try_read_meta(inode, block, partition, offset);
325	if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
326		udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
327		if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
328			mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
329				mdata->s_mirror_file_loc,
330				mdata->s_phys_partition_ref);
331			if (IS_ERR(mdata->s_mirror_fe))
332				mdata->s_mirror_fe = NULL;
333			mdata->s_flags |= MF_MIRROR_FE_LOADED;
334		}
335
336		inode = mdata->s_mirror_fe;
337		if (!inode)
338			return 0xFFFFFFFF;
339		retblk = udf_try_read_meta(inode, block, partition, offset);
340	}
341
342	return retblk;
343}