Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * partition.c
4 *
5 * PURPOSE
6 * Partition handling routines for the OSTA-UDF(tm) filesystem.
7 *
8 * COPYRIGHT
9 * (C) 1998-2001 Ben Fennema
10 *
11 * HISTORY
12 *
13 * 12/06/98 blf Created file.
14 *
15 */
16
17#include "udfdecl.h"
18#include "udf_sb.h"
19#include "udf_i.h"
20
21#include <linux/fs.h>
22#include <linux/string.h>
23#include <linux/mutex.h>
24
25uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
26 uint16_t partition, uint32_t offset)
27{
28 struct udf_sb_info *sbi = UDF_SB(sb);
29 struct udf_part_map *map;
30 if (partition >= sbi->s_partitions) {
31 udf_debug("block=%u, partition=%u, offset=%u: invalid partition\n",
32 block, partition, offset);
33 return 0xFFFFFFFF;
34 }
35 map = &sbi->s_partmaps[partition];
36 if (map->s_partition_func)
37 return map->s_partition_func(sb, block, partition, offset);
38 else
39 return map->s_partition_root + block + offset;
40}
41
42uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
43 uint16_t partition, uint32_t offset)
44{
45 struct buffer_head *bh = NULL;
46 uint32_t newblock;
47 uint32_t index;
48 uint32_t loc;
49 struct udf_sb_info *sbi = UDF_SB(sb);
50 struct udf_part_map *map;
51 struct udf_virtual_data *vdata;
52 struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode);
53 int err;
54
55 map = &sbi->s_partmaps[partition];
56 vdata = &map->s_type_specific.s_virtual;
57
58 if (block > vdata->s_num_entries) {
59 udf_debug("Trying to access block beyond end of VAT (%u max %u)\n",
60 block, vdata->s_num_entries);
61 return 0xFFFFFFFF;
62 }
63
64 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
65 loc = le32_to_cpu(((__le32 *)(iinfo->i_data +
66 vdata->s_start_offset))[block]);
67 goto translate;
68 }
69 index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
70 if (block >= index) {
71 block -= index;
72 newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
73 index = block % (sb->s_blocksize / sizeof(uint32_t));
74 } else {
75 newblock = 0;
76 index = vdata->s_start_offset / sizeof(uint32_t) + block;
77 }
78
79 bh = udf_bread(sbi->s_vat_inode, newblock, 0, &err);
80 if (!bh) {
81 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%u,%u)\n",
82 sb, block, partition);
83 return 0xFFFFFFFF;
84 }
85
86 loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
87
88 brelse(bh);
89
90translate:
91 if (iinfo->i_location.partitionReferenceNum == partition) {
92 udf_debug("recursive call to udf_get_pblock!\n");
93 return 0xFFFFFFFF;
94 }
95
96 return udf_get_pblock(sb, loc,
97 iinfo->i_location.partitionReferenceNum,
98 offset);
99}
100
101inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block,
102 uint16_t partition, uint32_t offset)
103{
104 return udf_get_pblock_virt15(sb, block, partition, offset);
105}
106
107uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
108 uint16_t partition, uint32_t offset)
109{
110 int i;
111 struct sparingTable *st = NULL;
112 struct udf_sb_info *sbi = UDF_SB(sb);
113 struct udf_part_map *map;
114 uint32_t packet;
115 struct udf_sparing_data *sdata;
116
117 map = &sbi->s_partmaps[partition];
118 sdata = &map->s_type_specific.s_sparing;
119 packet = (block + offset) & ~(sdata->s_packet_len - 1);
120
121 for (i = 0; i < 4; i++) {
122 if (sdata->s_spar_map[i] != NULL) {
123 st = (struct sparingTable *)
124 sdata->s_spar_map[i]->b_data;
125 break;
126 }
127 }
128
129 if (st) {
130 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
131 struct sparingEntry *entry = &st->mapEntry[i];
132 u32 origLoc = le32_to_cpu(entry->origLocation);
133 if (origLoc >= 0xFFFFFFF0)
134 break;
135 else if (origLoc == packet)
136 return le32_to_cpu(entry->mappedLocation) +
137 ((block + offset) &
138 (sdata->s_packet_len - 1));
139 else if (origLoc > packet)
140 break;
141 }
142 }
143
144 return map->s_partition_root + block + offset;
145}
146
147int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
148{
149 struct udf_sparing_data *sdata;
150 struct sparingTable *st = NULL;
151 struct sparingEntry mapEntry;
152 uint32_t packet;
153 int i, j, k, l;
154 struct udf_sb_info *sbi = UDF_SB(sb);
155 u16 reallocationTableLen;
156 struct buffer_head *bh;
157 int ret = 0;
158
159 mutex_lock(&sbi->s_alloc_mutex);
160 for (i = 0; i < sbi->s_partitions; i++) {
161 struct udf_part_map *map = &sbi->s_partmaps[i];
162 if (old_block > map->s_partition_root &&
163 old_block < map->s_partition_root + map->s_partition_len) {
164 sdata = &map->s_type_specific.s_sparing;
165 packet = (old_block - map->s_partition_root) &
166 ~(sdata->s_packet_len - 1);
167
168 for (j = 0; j < 4; j++)
169 if (sdata->s_spar_map[j] != NULL) {
170 st = (struct sparingTable *)
171 sdata->s_spar_map[j]->b_data;
172 break;
173 }
174
175 if (!st) {
176 ret = 1;
177 goto out;
178 }
179
180 reallocationTableLen =
181 le16_to_cpu(st->reallocationTableLen);
182 for (k = 0; k < reallocationTableLen; k++) {
183 struct sparingEntry *entry = &st->mapEntry[k];
184 u32 origLoc = le32_to_cpu(entry->origLocation);
185
186 if (origLoc == 0xFFFFFFFF) {
187 for (; j < 4; j++) {
188 int len;
189 bh = sdata->s_spar_map[j];
190 if (!bh)
191 continue;
192
193 st = (struct sparingTable *)
194 bh->b_data;
195 entry->origLocation =
196 cpu_to_le32(packet);
197 len =
198 sizeof(struct sparingTable) +
199 reallocationTableLen *
200 sizeof(struct sparingEntry);
201 udf_update_tag((char *)st, len);
202 mark_buffer_dirty(bh);
203 }
204 *new_block = le32_to_cpu(
205 entry->mappedLocation) +
206 ((old_block -
207 map->s_partition_root) &
208 (sdata->s_packet_len - 1));
209 ret = 0;
210 goto out;
211 } else if (origLoc == packet) {
212 *new_block = le32_to_cpu(
213 entry->mappedLocation) +
214 ((old_block -
215 map->s_partition_root) &
216 (sdata->s_packet_len - 1));
217 ret = 0;
218 goto out;
219 } else if (origLoc > packet)
220 break;
221 }
222
223 for (l = k; l < reallocationTableLen; l++) {
224 struct sparingEntry *entry = &st->mapEntry[l];
225 u32 origLoc = le32_to_cpu(entry->origLocation);
226
227 if (origLoc != 0xFFFFFFFF)
228 continue;
229
230 for (; j < 4; j++) {
231 bh = sdata->s_spar_map[j];
232 if (!bh)
233 continue;
234
235 st = (struct sparingTable *)bh->b_data;
236 mapEntry = st->mapEntry[l];
237 mapEntry.origLocation =
238 cpu_to_le32(packet);
239 memmove(&st->mapEntry[k + 1],
240 &st->mapEntry[k],
241 (l - k) *
242 sizeof(struct sparingEntry));
243 st->mapEntry[k] = mapEntry;
244 udf_update_tag((char *)st,
245 sizeof(struct sparingTable) +
246 reallocationTableLen *
247 sizeof(struct sparingEntry));
248 mark_buffer_dirty(bh);
249 }
250 *new_block =
251 le32_to_cpu(
252 st->mapEntry[k].mappedLocation) +
253 ((old_block - map->s_partition_root) &
254 (sdata->s_packet_len - 1));
255 ret = 0;
256 goto out;
257 }
258
259 ret = 1;
260 goto out;
261 } /* if old_block */
262 }
263
264 if (i == sbi->s_partitions) {
265 /* outside of partitions */
266 /* for now, fail =) */
267 ret = 1;
268 }
269
270out:
271 mutex_unlock(&sbi->s_alloc_mutex);
272 return ret;
273}
274
275static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
276 uint16_t partition, uint32_t offset)
277{
278 struct super_block *sb = inode->i_sb;
279 struct udf_part_map *map;
280 struct kernel_lb_addr eloc;
281 uint32_t elen;
282 sector_t ext_offset;
283 struct extent_position epos = {};
284 uint32_t phyblock;
285 int8_t etype;
286 int err = 0;
287
288 err = inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset, &etype);
289 if (err <= 0 || etype != (EXT_RECORDED_ALLOCATED >> 30))
290 phyblock = 0xFFFFFFFF;
291 else {
292 map = &UDF_SB(sb)->s_partmaps[partition];
293 /* map to sparable/physical partition desc */
294 phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
295 map->s_type_specific.s_metadata.s_phys_partition_ref,
296 ext_offset + offset);
297 }
298
299 brelse(epos.bh);
300 return phyblock;
301}
302
303uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
304 uint16_t partition, uint32_t offset)
305{
306 struct udf_sb_info *sbi = UDF_SB(sb);
307 struct udf_part_map *map;
308 struct udf_meta_data *mdata;
309 uint32_t retblk;
310 struct inode *inode;
311
312 udf_debug("READING from METADATA\n");
313
314 map = &sbi->s_partmaps[partition];
315 mdata = &map->s_type_specific.s_metadata;
316 inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
317
318 if (!inode)
319 return 0xFFFFFFFF;
320
321 retblk = udf_try_read_meta(inode, block, partition, offset);
322 if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
323 udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
324 if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
325 mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
326 mdata->s_mirror_file_loc,
327 mdata->s_phys_partition_ref);
328 if (IS_ERR(mdata->s_mirror_fe))
329 mdata->s_mirror_fe = NULL;
330 mdata->s_flags |= MF_MIRROR_FE_LOADED;
331 }
332
333 inode = mdata->s_mirror_fe;
334 if (!inode)
335 return 0xFFFFFFFF;
336 retblk = udf_try_read_meta(inode, block, partition, offset);
337 }
338
339 return retblk;
340}
1/*
2 * partition.c
3 *
4 * PURPOSE
5 * Partition handling routines for the OSTA-UDF(tm) filesystem.
6 *
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
12 *
13 * (C) 1998-2001 Ben Fennema
14 *
15 * HISTORY
16 *
17 * 12/06/98 blf Created file.
18 *
19 */
20
21#include "udfdecl.h"
22#include "udf_sb.h"
23#include "udf_i.h"
24
25#include <linux/fs.h>
26#include <linux/string.h>
27#include <linux/buffer_head.h>
28#include <linux/mutex.h>
29
30uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
31 uint16_t partition, uint32_t offset)
32{
33 struct udf_sb_info *sbi = UDF_SB(sb);
34 struct udf_part_map *map;
35 if (partition >= sbi->s_partitions) {
36 udf_debug("block=%d, partition=%d, offset=%d: "
37 "invalid partition\n", block, partition, offset);
38 return 0xFFFFFFFF;
39 }
40 map = &sbi->s_partmaps[partition];
41 if (map->s_partition_func)
42 return map->s_partition_func(sb, block, partition, offset);
43 else
44 return map->s_partition_root + block + offset;
45}
46
47uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
48 uint16_t partition, uint32_t offset)
49{
50 struct buffer_head *bh = NULL;
51 uint32_t newblock;
52 uint32_t index;
53 uint32_t loc;
54 struct udf_sb_info *sbi = UDF_SB(sb);
55 struct udf_part_map *map;
56 struct udf_virtual_data *vdata;
57 struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode);
58
59 map = &sbi->s_partmaps[partition];
60 vdata = &map->s_type_specific.s_virtual;
61
62 if (block > vdata->s_num_entries) {
63 udf_debug("Trying to access block beyond end of VAT "
64 "(%d max %d)\n", block, vdata->s_num_entries);
65 return 0xFFFFFFFF;
66 }
67
68 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
69 loc = le32_to_cpu(((__le32 *)(iinfo->i_ext.i_data +
70 vdata->s_start_offset))[block]);
71 goto translate;
72 }
73 index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
74 if (block >= index) {
75 block -= index;
76 newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
77 index = block % (sb->s_blocksize / sizeof(uint32_t));
78 } else {
79 newblock = 0;
80 index = vdata->s_start_offset / sizeof(uint32_t) + block;
81 }
82
83 loc = udf_block_map(sbi->s_vat_inode, newblock);
84
85 bh = sb_bread(sb, loc);
86 if (!bh) {
87 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
88 sb, block, partition, loc, index);
89 return 0xFFFFFFFF;
90 }
91
92 loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
93
94 brelse(bh);
95
96translate:
97 if (iinfo->i_location.partitionReferenceNum == partition) {
98 udf_debug("recursive call to udf_get_pblock!\n");
99 return 0xFFFFFFFF;
100 }
101
102 return udf_get_pblock(sb, loc,
103 iinfo->i_location.partitionReferenceNum,
104 offset);
105}
106
107inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block,
108 uint16_t partition, uint32_t offset)
109{
110 return udf_get_pblock_virt15(sb, block, partition, offset);
111}
112
113uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
114 uint16_t partition, uint32_t offset)
115{
116 int i;
117 struct sparingTable *st = NULL;
118 struct udf_sb_info *sbi = UDF_SB(sb);
119 struct udf_part_map *map;
120 uint32_t packet;
121 struct udf_sparing_data *sdata;
122
123 map = &sbi->s_partmaps[partition];
124 sdata = &map->s_type_specific.s_sparing;
125 packet = (block + offset) & ~(sdata->s_packet_len - 1);
126
127 for (i = 0; i < 4; i++) {
128 if (sdata->s_spar_map[i] != NULL) {
129 st = (struct sparingTable *)
130 sdata->s_spar_map[i]->b_data;
131 break;
132 }
133 }
134
135 if (st) {
136 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
137 struct sparingEntry *entry = &st->mapEntry[i];
138 u32 origLoc = le32_to_cpu(entry->origLocation);
139 if (origLoc >= 0xFFFFFFF0)
140 break;
141 else if (origLoc == packet)
142 return le32_to_cpu(entry->mappedLocation) +
143 ((block + offset) &
144 (sdata->s_packet_len - 1));
145 else if (origLoc > packet)
146 break;
147 }
148 }
149
150 return map->s_partition_root + block + offset;
151}
152
153int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
154{
155 struct udf_sparing_data *sdata;
156 struct sparingTable *st = NULL;
157 struct sparingEntry mapEntry;
158 uint32_t packet;
159 int i, j, k, l;
160 struct udf_sb_info *sbi = UDF_SB(sb);
161 u16 reallocationTableLen;
162 struct buffer_head *bh;
163 int ret = 0;
164
165 mutex_lock(&sbi->s_alloc_mutex);
166 for (i = 0; i < sbi->s_partitions; i++) {
167 struct udf_part_map *map = &sbi->s_partmaps[i];
168 if (old_block > map->s_partition_root &&
169 old_block < map->s_partition_root + map->s_partition_len) {
170 sdata = &map->s_type_specific.s_sparing;
171 packet = (old_block - map->s_partition_root) &
172 ~(sdata->s_packet_len - 1);
173
174 for (j = 0; j < 4; j++)
175 if (sdata->s_spar_map[j] != NULL) {
176 st = (struct sparingTable *)
177 sdata->s_spar_map[j]->b_data;
178 break;
179 }
180
181 if (!st) {
182 ret = 1;
183 goto out;
184 }
185
186 reallocationTableLen =
187 le16_to_cpu(st->reallocationTableLen);
188 for (k = 0; k < reallocationTableLen; k++) {
189 struct sparingEntry *entry = &st->mapEntry[k];
190 u32 origLoc = le32_to_cpu(entry->origLocation);
191
192 if (origLoc == 0xFFFFFFFF) {
193 for (; j < 4; j++) {
194 int len;
195 bh = sdata->s_spar_map[j];
196 if (!bh)
197 continue;
198
199 st = (struct sparingTable *)
200 bh->b_data;
201 entry->origLocation =
202 cpu_to_le32(packet);
203 len =
204 sizeof(struct sparingTable) +
205 reallocationTableLen *
206 sizeof(struct sparingEntry);
207 udf_update_tag((char *)st, len);
208 mark_buffer_dirty(bh);
209 }
210 *new_block = le32_to_cpu(
211 entry->mappedLocation) +
212 ((old_block -
213 map->s_partition_root) &
214 (sdata->s_packet_len - 1));
215 ret = 0;
216 goto out;
217 } else if (origLoc == packet) {
218 *new_block = le32_to_cpu(
219 entry->mappedLocation) +
220 ((old_block -
221 map->s_partition_root) &
222 (sdata->s_packet_len - 1));
223 ret = 0;
224 goto out;
225 } else if (origLoc > packet)
226 break;
227 }
228
229 for (l = k; l < reallocationTableLen; l++) {
230 struct sparingEntry *entry = &st->mapEntry[l];
231 u32 origLoc = le32_to_cpu(entry->origLocation);
232
233 if (origLoc != 0xFFFFFFFF)
234 continue;
235
236 for (; j < 4; j++) {
237 bh = sdata->s_spar_map[j];
238 if (!bh)
239 continue;
240
241 st = (struct sparingTable *)bh->b_data;
242 mapEntry = st->mapEntry[l];
243 mapEntry.origLocation =
244 cpu_to_le32(packet);
245 memmove(&st->mapEntry[k + 1],
246 &st->mapEntry[k],
247 (l - k) *
248 sizeof(struct sparingEntry));
249 st->mapEntry[k] = mapEntry;
250 udf_update_tag((char *)st,
251 sizeof(struct sparingTable) +
252 reallocationTableLen *
253 sizeof(struct sparingEntry));
254 mark_buffer_dirty(bh);
255 }
256 *new_block =
257 le32_to_cpu(
258 st->mapEntry[k].mappedLocation) +
259 ((old_block - map->s_partition_root) &
260 (sdata->s_packet_len - 1));
261 ret = 0;
262 goto out;
263 }
264
265 ret = 1;
266 goto out;
267 } /* if old_block */
268 }
269
270 if (i == sbi->s_partitions) {
271 /* outside of partitions */
272 /* for now, fail =) */
273 ret = 1;
274 }
275
276out:
277 mutex_unlock(&sbi->s_alloc_mutex);
278 return ret;
279}
280
281static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
282 uint16_t partition, uint32_t offset)
283{
284 struct super_block *sb = inode->i_sb;
285 struct udf_part_map *map;
286 struct kernel_lb_addr eloc;
287 uint32_t elen;
288 sector_t ext_offset;
289 struct extent_position epos = {};
290 uint32_t phyblock;
291
292 if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) !=
293 (EXT_RECORDED_ALLOCATED >> 30))
294 phyblock = 0xFFFFFFFF;
295 else {
296 map = &UDF_SB(sb)->s_partmaps[partition];
297 /* map to sparable/physical partition desc */
298 phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
299 map->s_partition_num, ext_offset + offset);
300 }
301
302 brelse(epos.bh);
303 return phyblock;
304}
305
306uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
307 uint16_t partition, uint32_t offset)
308{
309 struct udf_sb_info *sbi = UDF_SB(sb);
310 struct udf_part_map *map;
311 struct udf_meta_data *mdata;
312 uint32_t retblk;
313 struct inode *inode;
314
315 udf_debug("READING from METADATA\n");
316
317 map = &sbi->s_partmaps[partition];
318 mdata = &map->s_type_specific.s_metadata;
319 inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
320
321 /* We shouldn't mount such media... */
322 BUG_ON(!inode);
323 retblk = udf_try_read_meta(inode, block, partition, offset);
324 if (retblk == 0xFFFFFFFF) {
325 udf_warning(sb, __func__, "error reading from METADATA, "
326 "trying to read from MIRROR");
327 inode = mdata->s_mirror_fe;
328 if (!inode)
329 return 0xFFFFFFFF;
330 retblk = udf_try_read_meta(inode, block, partition, offset);
331 }
332
333 return retblk;
334}