Loading...
1/*
2 * linux/fs/hpfs/buffer.c
3 *
4 * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
5 *
6 * general buffer i/o
7 */
8#include <linux/sched.h>
9#include <linux/slab.h>
10#include "hpfs_fn.h"
11
12/* Map a sector into a buffer and return pointers to it and to the buffer. */
13
14void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
15 int ahead)
16{
17 struct buffer_head *bh;
18
19 hpfs_lock_assert(s);
20
21 cond_resched();
22
23 *bhp = bh = sb_bread(s, secno);
24 if (bh != NULL)
25 return bh->b_data;
26 else {
27 printk("HPFS: hpfs_map_sector: read error\n");
28 return NULL;
29 }
30}
31
32/* Like hpfs_map_sector but don't read anything */
33
34void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
35{
36 struct buffer_head *bh;
37 /*return hpfs_map_sector(s, secno, bhp, 0);*/
38
39 hpfs_lock_assert(s);
40
41 cond_resched();
42
43 if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
44 if (!buffer_uptodate(bh)) wait_on_buffer(bh);
45 set_buffer_uptodate(bh);
46 return bh->b_data;
47 } else {
48 printk("HPFS: hpfs_get_sector: getblk failed\n");
49 return NULL;
50 }
51}
52
53/* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */
54
55void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
56 int ahead)
57{
58 struct buffer_head *bh;
59 char *data;
60
61 hpfs_lock_assert(s);
62
63 cond_resched();
64
65 if (secno & 3) {
66 printk("HPFS: hpfs_map_4sectors: unaligned read\n");
67 return NULL;
68 }
69
70 qbh->data = data = kmalloc(2048, GFP_NOFS);
71 if (!data) {
72 printk("HPFS: hpfs_map_4sectors: out of memory\n");
73 goto bail;
74 }
75
76 qbh->bh[0] = bh = sb_bread(s, secno);
77 if (!bh)
78 goto bail0;
79 memcpy(data, bh->b_data, 512);
80
81 qbh->bh[1] = bh = sb_bread(s, secno + 1);
82 if (!bh)
83 goto bail1;
84 memcpy(data + 512, bh->b_data, 512);
85
86 qbh->bh[2] = bh = sb_bread(s, secno + 2);
87 if (!bh)
88 goto bail2;
89 memcpy(data + 2 * 512, bh->b_data, 512);
90
91 qbh->bh[3] = bh = sb_bread(s, secno + 3);
92 if (!bh)
93 goto bail3;
94 memcpy(data + 3 * 512, bh->b_data, 512);
95
96 return data;
97
98 bail3:
99 brelse(qbh->bh[2]);
100 bail2:
101 brelse(qbh->bh[1]);
102 bail1:
103 brelse(qbh->bh[0]);
104 bail0:
105 kfree(data);
106 printk("HPFS: hpfs_map_4sectors: read error\n");
107 bail:
108 return NULL;
109}
110
111/* Don't read sectors */
112
113void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
114 struct quad_buffer_head *qbh)
115{
116 cond_resched();
117
118 hpfs_lock_assert(s);
119
120 if (secno & 3) {
121 printk("HPFS: hpfs_get_4sectors: unaligned read\n");
122 return NULL;
123 }
124
125 /*return hpfs_map_4sectors(s, secno, qbh, 0);*/
126 if (!(qbh->data = kmalloc(2048, GFP_NOFS))) {
127 printk("HPFS: hpfs_get_4sectors: out of memory\n");
128 return NULL;
129 }
130 if (!(hpfs_get_sector(s, secno, &qbh->bh[0]))) goto bail0;
131 if (!(hpfs_get_sector(s, secno + 1, &qbh->bh[1]))) goto bail1;
132 if (!(hpfs_get_sector(s, secno + 2, &qbh->bh[2]))) goto bail2;
133 if (!(hpfs_get_sector(s, secno + 3, &qbh->bh[3]))) goto bail3;
134 memcpy(qbh->data, qbh->bh[0]->b_data, 512);
135 memcpy(qbh->data + 512, qbh->bh[1]->b_data, 512);
136 memcpy(qbh->data + 2*512, qbh->bh[2]->b_data, 512);
137 memcpy(qbh->data + 3*512, qbh->bh[3]->b_data, 512);
138 return qbh->data;
139
140 bail3: brelse(qbh->bh[2]);
141 bail2: brelse(qbh->bh[1]);
142 bail1: brelse(qbh->bh[0]);
143 bail0:
144 return NULL;
145}
146
147
148void hpfs_brelse4(struct quad_buffer_head *qbh)
149{
150 brelse(qbh->bh[3]);
151 brelse(qbh->bh[2]);
152 brelse(qbh->bh[1]);
153 brelse(qbh->bh[0]);
154 kfree(qbh->data);
155}
156
157void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
158{
159 memcpy(qbh->bh[0]->b_data, qbh->data, 512);
160 memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512);
161 memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
162 memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
163 mark_buffer_dirty(qbh->bh[0]);
164 mark_buffer_dirty(qbh->bh[1]);
165 mark_buffer_dirty(qbh->bh[2]);
166 mark_buffer_dirty(qbh->bh[3]);
167}
1/*
2 * linux/fs/hpfs/buffer.c
3 *
4 * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
5 *
6 * general buffer i/o
7 */
8#include <linux/sched.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include "hpfs_fn.h"
12
13secno hpfs_search_hotfix_map(struct super_block *s, secno sec)
14{
15 unsigned i;
16 struct hpfs_sb_info *sbi = hpfs_sb(s);
17 for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
18 if (sbi->hotfix_from[i] == sec) {
19 return sbi->hotfix_to[i];
20 }
21 }
22 return sec;
23}
24
25unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n)
26{
27 unsigned i;
28 struct hpfs_sb_info *sbi = hpfs_sb(s);
29 for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
30 if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) {
31 n = sbi->hotfix_from[i] - sec;
32 }
33 }
34 return n;
35}
36
37void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
38{
39 struct buffer_head *bh;
40 struct blk_plug plug;
41
42 if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size))
43 return;
44
45 if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n))
46 return;
47
48 bh = sb_find_get_block(s, secno);
49 if (bh) {
50 if (buffer_uptodate(bh)) {
51 brelse(bh);
52 return;
53 }
54 brelse(bh);
55 };
56
57 blk_start_plug(&plug);
58 while (n > 0) {
59 if (unlikely(secno >= hpfs_sb(s)->sb_fs_size))
60 break;
61 sb_breadahead(s, secno);
62 secno++;
63 n--;
64 }
65 blk_finish_plug(&plug);
66}
67
68/* Map a sector into a buffer and return pointers to it and to the buffer. */
69
70void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
71 int ahead)
72{
73 struct buffer_head *bh;
74
75 hpfs_lock_assert(s);
76
77 hpfs_prefetch_sectors(s, secno, ahead);
78
79 cond_resched();
80
81 *bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno));
82 if (bh != NULL)
83 return bh->b_data;
84 else {
85 pr_err("%s(): read error\n", __func__);
86 return NULL;
87 }
88}
89
90/* Like hpfs_map_sector but don't read anything */
91
92void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
93{
94 struct buffer_head *bh;
95 /*return hpfs_map_sector(s, secno, bhp, 0);*/
96
97 hpfs_lock_assert(s);
98
99 cond_resched();
100
101 if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) {
102 if (!buffer_uptodate(bh)) wait_on_buffer(bh);
103 set_buffer_uptodate(bh);
104 return bh->b_data;
105 } else {
106 pr_err("%s(): getblk failed\n", __func__);
107 return NULL;
108 }
109}
110
111/* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */
112
113void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
114 int ahead)
115{
116 char *data;
117
118 hpfs_lock_assert(s);
119
120 cond_resched();
121
122 if (secno & 3) {
123 pr_err("%s(): unaligned read\n", __func__);
124 return NULL;
125 }
126
127 hpfs_prefetch_sectors(s, secno, 4 + ahead);
128
129 if (!hpfs_map_sector(s, secno + 0, &qbh->bh[0], 0)) goto bail0;
130 if (!hpfs_map_sector(s, secno + 1, &qbh->bh[1], 0)) goto bail1;
131 if (!hpfs_map_sector(s, secno + 2, &qbh->bh[2], 0)) goto bail2;
132 if (!hpfs_map_sector(s, secno + 3, &qbh->bh[3], 0)) goto bail3;
133
134 if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
135 likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
136 likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
137 return qbh->data = qbh->bh[0]->b_data;
138 }
139
140 qbh->data = data = kmalloc(2048, GFP_NOFS);
141 if (!data) {
142 pr_err("%s(): out of memory\n", __func__);
143 goto bail4;
144 }
145
146 memcpy(data + 0 * 512, qbh->bh[0]->b_data, 512);
147 memcpy(data + 1 * 512, qbh->bh[1]->b_data, 512);
148 memcpy(data + 2 * 512, qbh->bh[2]->b_data, 512);
149 memcpy(data + 3 * 512, qbh->bh[3]->b_data, 512);
150
151 return data;
152
153 bail4:
154 brelse(qbh->bh[3]);
155 bail3:
156 brelse(qbh->bh[2]);
157 bail2:
158 brelse(qbh->bh[1]);
159 bail1:
160 brelse(qbh->bh[0]);
161 bail0:
162 return NULL;
163}
164
165/* Don't read sectors */
166
167void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
168 struct quad_buffer_head *qbh)
169{
170 cond_resched();
171
172 hpfs_lock_assert(s);
173
174 if (secno & 3) {
175 pr_err("%s(): unaligned read\n", __func__);
176 return NULL;
177 }
178
179 if (!hpfs_get_sector(s, secno + 0, &qbh->bh[0])) goto bail0;
180 if (!hpfs_get_sector(s, secno + 1, &qbh->bh[1])) goto bail1;
181 if (!hpfs_get_sector(s, secno + 2, &qbh->bh[2])) goto bail2;
182 if (!hpfs_get_sector(s, secno + 3, &qbh->bh[3])) goto bail3;
183
184 if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
185 likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
186 likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
187 return qbh->data = qbh->bh[0]->b_data;
188 }
189
190 if (!(qbh->data = kmalloc(2048, GFP_NOFS))) {
191 pr_err("%s(): out of memory\n", __func__);
192 goto bail4;
193 }
194 return qbh->data;
195
196bail4:
197 brelse(qbh->bh[3]);
198bail3:
199 brelse(qbh->bh[2]);
200bail2:
201 brelse(qbh->bh[1]);
202bail1:
203 brelse(qbh->bh[0]);
204bail0:
205 return NULL;
206}
207
208
209void hpfs_brelse4(struct quad_buffer_head *qbh)
210{
211 if (unlikely(qbh->data != qbh->bh[0]->b_data))
212 kfree(qbh->data);
213 brelse(qbh->bh[0]);
214 brelse(qbh->bh[1]);
215 brelse(qbh->bh[2]);
216 brelse(qbh->bh[3]);
217}
218
219void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
220{
221 if (unlikely(qbh->data != qbh->bh[0]->b_data)) {
222 memcpy(qbh->bh[0]->b_data, qbh->data + 0 * 512, 512);
223 memcpy(qbh->bh[1]->b_data, qbh->data + 1 * 512, 512);
224 memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
225 memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
226 }
227 mark_buffer_dirty(qbh->bh[0]);
228 mark_buffer_dirty(qbh->bh[1]);
229 mark_buffer_dirty(qbh->bh[2]);
230 mark_buffer_dirty(qbh->bh[3]);
231}