Loading...
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright © 2001-2007 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/jffs2.h>
18#include "nodelist.h"
19
20/* These are initialised to NULL in the kernel startup code.
21 If you're porting to other operating systems, beware */
22static struct kmem_cache *full_dnode_slab;
23static struct kmem_cache *raw_dirent_slab;
24static struct kmem_cache *raw_inode_slab;
25static struct kmem_cache *tmp_dnode_info_slab;
26static struct kmem_cache *raw_node_ref_slab;
27static struct kmem_cache *node_frag_slab;
28static struct kmem_cache *inode_cache_slab;
29#ifdef CONFIG_JFFS2_FS_XATTR
30static struct kmem_cache *xattr_datum_cache;
31static struct kmem_cache *xattr_ref_cache;
32#endif
33
34int __init jffs2_create_slab_caches(void)
35{
36 full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
37 sizeof(struct jffs2_full_dnode),
38 0, 0, NULL);
39 if (!full_dnode_slab)
40 goto err;
41
42 raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
43 sizeof(struct jffs2_raw_dirent),
44 0, SLAB_HWCACHE_ALIGN, NULL);
45 if (!raw_dirent_slab)
46 goto err;
47
48 raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
49 sizeof(struct jffs2_raw_inode),
50 0, SLAB_HWCACHE_ALIGN, NULL);
51 if (!raw_inode_slab)
52 goto err;
53
54 tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
55 sizeof(struct jffs2_tmp_dnode_info),
56 0, 0, NULL);
57 if (!tmp_dnode_info_slab)
58 goto err;
59
60 raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
61 sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
62 0, 0, NULL);
63 if (!raw_node_ref_slab)
64 goto err;
65
66 node_frag_slab = kmem_cache_create("jffs2_node_frag",
67 sizeof(struct jffs2_node_frag),
68 0, 0, NULL);
69 if (!node_frag_slab)
70 goto err;
71
72 inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
73 sizeof(struct jffs2_inode_cache),
74 0, 0, NULL);
75 if (!inode_cache_slab)
76 goto err;
77
78#ifdef CONFIG_JFFS2_FS_XATTR
79 xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
80 sizeof(struct jffs2_xattr_datum),
81 0, 0, NULL);
82 if (!xattr_datum_cache)
83 goto err;
84
85 xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
86 sizeof(struct jffs2_xattr_ref),
87 0, 0, NULL);
88 if (!xattr_ref_cache)
89 goto err;
90#endif
91
92 return 0;
93 err:
94 jffs2_destroy_slab_caches();
95 return -ENOMEM;
96}
97
98void jffs2_destroy_slab_caches(void)
99{
100 if(full_dnode_slab)
101 kmem_cache_destroy(full_dnode_slab);
102 if(raw_dirent_slab)
103 kmem_cache_destroy(raw_dirent_slab);
104 if(raw_inode_slab)
105 kmem_cache_destroy(raw_inode_slab);
106 if(tmp_dnode_info_slab)
107 kmem_cache_destroy(tmp_dnode_info_slab);
108 if(raw_node_ref_slab)
109 kmem_cache_destroy(raw_node_ref_slab);
110 if(node_frag_slab)
111 kmem_cache_destroy(node_frag_slab);
112 if(inode_cache_slab)
113 kmem_cache_destroy(inode_cache_slab);
114#ifdef CONFIG_JFFS2_FS_XATTR
115 if (xattr_datum_cache)
116 kmem_cache_destroy(xattr_datum_cache);
117 if (xattr_ref_cache)
118 kmem_cache_destroy(xattr_ref_cache);
119#endif
120}
121
122struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
123{
124 struct jffs2_full_dirent *ret;
125 ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
126 dbg_memalloc("%p\n", ret);
127 return ret;
128}
129
130void jffs2_free_full_dirent(struct jffs2_full_dirent *x)
131{
132 dbg_memalloc("%p\n", x);
133 kfree(x);
134}
135
136struct jffs2_full_dnode *jffs2_alloc_full_dnode(void)
137{
138 struct jffs2_full_dnode *ret;
139 ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
140 dbg_memalloc("%p\n", ret);
141 return ret;
142}
143
144void jffs2_free_full_dnode(struct jffs2_full_dnode *x)
145{
146 dbg_memalloc("%p\n", x);
147 kmem_cache_free(full_dnode_slab, x);
148}
149
150struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void)
151{
152 struct jffs2_raw_dirent *ret;
153 ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
154 dbg_memalloc("%p\n", ret);
155 return ret;
156}
157
158void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x)
159{
160 dbg_memalloc("%p\n", x);
161 kmem_cache_free(raw_dirent_slab, x);
162}
163
164struct jffs2_raw_inode *jffs2_alloc_raw_inode(void)
165{
166 struct jffs2_raw_inode *ret;
167 ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
168 dbg_memalloc("%p\n", ret);
169 return ret;
170}
171
172void jffs2_free_raw_inode(struct jffs2_raw_inode *x)
173{
174 dbg_memalloc("%p\n", x);
175 kmem_cache_free(raw_inode_slab, x);
176}
177
178struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void)
179{
180 struct jffs2_tmp_dnode_info *ret;
181 ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
182 dbg_memalloc("%p\n",
183 ret);
184 return ret;
185}
186
187void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
188{
189 dbg_memalloc("%p\n", x);
190 kmem_cache_free(tmp_dnode_info_slab, x);
191}
192
193static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
194{
195 struct jffs2_raw_node_ref *ret;
196
197 ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
198 if (ret) {
199 int i = 0;
200 for (i=0; i < REFS_PER_BLOCK; i++) {
201 ret[i].flash_offset = REF_EMPTY_NODE;
202 ret[i].next_in_ino = NULL;
203 }
204 ret[i].flash_offset = REF_LINK_NODE;
205 ret[i].next_in_ino = NULL;
206 }
207 return ret;
208}
209
210int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
211 struct jffs2_eraseblock *jeb, int nr)
212{
213 struct jffs2_raw_node_ref **p, *ref;
214 int i = nr;
215
216 dbg_memalloc("%d\n", nr);
217
218 p = &jeb->last_node;
219 ref = *p;
220
221 dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset);
222
223 /* If jeb->last_node is really a valid node then skip over it */
224 if (ref && ref->flash_offset != REF_EMPTY_NODE)
225 ref++;
226
227 while (i) {
228 if (!ref) {
229 dbg_memalloc("Allocating new refblock linked from %p\n", p);
230 ref = *p = jffs2_alloc_refblock();
231 if (!ref)
232 return -ENOMEM;
233 }
234 if (ref->flash_offset == REF_LINK_NODE) {
235 p = &ref->next_in_ino;
236 ref = *p;
237 continue;
238 }
239 i--;
240 ref++;
241 }
242 jeb->allocated_refs = nr;
243
244 dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n",
245 nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset,
246 jeb->last_node->next_in_ino);
247
248 return 0;
249}
250
251void jffs2_free_refblock(struct jffs2_raw_node_ref *x)
252{
253 dbg_memalloc("%p\n", x);
254 kmem_cache_free(raw_node_ref_slab, x);
255}
256
257struct jffs2_node_frag *jffs2_alloc_node_frag(void)
258{
259 struct jffs2_node_frag *ret;
260 ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
261 dbg_memalloc("%p\n", ret);
262 return ret;
263}
264
265void jffs2_free_node_frag(struct jffs2_node_frag *x)
266{
267 dbg_memalloc("%p\n", x);
268 kmem_cache_free(node_frag_slab, x);
269}
270
271struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
272{
273 struct jffs2_inode_cache *ret;
274 ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
275 dbg_memalloc("%p\n", ret);
276 return ret;
277}
278
279void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
280{
281 dbg_memalloc("%p\n", x);
282 kmem_cache_free(inode_cache_slab, x);
283}
284
285#ifdef CONFIG_JFFS2_FS_XATTR
286struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
287{
288 struct jffs2_xattr_datum *xd;
289 xd = kmem_cache_zalloc(xattr_datum_cache, GFP_KERNEL);
290 dbg_memalloc("%p\n", xd);
291
292 xd->class = RAWNODE_CLASS_XATTR_DATUM;
293 xd->node = (void *)xd;
294 INIT_LIST_HEAD(&xd->xindex);
295 return xd;
296}
297
298void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd)
299{
300 dbg_memalloc("%p\n", xd);
301 kmem_cache_free(xattr_datum_cache, xd);
302}
303
304struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
305{
306 struct jffs2_xattr_ref *ref;
307 ref = kmem_cache_zalloc(xattr_ref_cache, GFP_KERNEL);
308 dbg_memalloc("%p\n", ref);
309
310 ref->class = RAWNODE_CLASS_XATTR_REF;
311 ref->node = (void *)ref;
312 return ref;
313}
314
315void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref)
316{
317 dbg_memalloc("%p\n", ref);
318 kmem_cache_free(xattr_ref_cache, ref);
319}
320#endif
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright © 2001-2007 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/jffs2.h>
18#include "nodelist.h"
19
20/* These are initialised to NULL in the kernel startup code.
21 If you're porting to other operating systems, beware */
22static struct kmem_cache *full_dnode_slab;
23static struct kmem_cache *raw_dirent_slab;
24static struct kmem_cache *raw_inode_slab;
25static struct kmem_cache *tmp_dnode_info_slab;
26static struct kmem_cache *raw_node_ref_slab;
27static struct kmem_cache *node_frag_slab;
28static struct kmem_cache *inode_cache_slab;
29#ifdef CONFIG_JFFS2_FS_XATTR
30static struct kmem_cache *xattr_datum_cache;
31static struct kmem_cache *xattr_ref_cache;
32#endif
33
34int __init jffs2_create_slab_caches(void)
35{
36 full_dnode_slab = KMEM_CACHE(jffs2_full_dnode, 0);
37 if (!full_dnode_slab)
38 goto err;
39
40 raw_dirent_slab = KMEM_CACHE(jffs2_raw_dirent, SLAB_HWCACHE_ALIGN);
41 if (!raw_dirent_slab)
42 goto err;
43
44 raw_inode_slab = KMEM_CACHE(jffs2_raw_inode, SLAB_HWCACHE_ALIGN);
45 if (!raw_inode_slab)
46 goto err;
47
48 tmp_dnode_info_slab = KMEM_CACHE(jffs2_tmp_dnode_info, 0);
49 if (!tmp_dnode_info_slab)
50 goto err;
51
52 raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
53 sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
54 0, 0, NULL);
55 if (!raw_node_ref_slab)
56 goto err;
57
58 node_frag_slab = KMEM_CACHE(jffs2_node_frag, 0);
59 if (!node_frag_slab)
60 goto err;
61
62 inode_cache_slab = KMEM_CACHE(jffs2_inode_cache, 0);
63 if (!inode_cache_slab)
64 goto err;
65
66#ifdef CONFIG_JFFS2_FS_XATTR
67 xattr_datum_cache = KMEM_CACHE(jffs2_xattr_datum, 0);
68 if (!xattr_datum_cache)
69 goto err;
70
71 xattr_ref_cache = KMEM_CACHE(jffs2_xattr_ref, 0);
72 if (!xattr_ref_cache)
73 goto err;
74#endif
75
76 return 0;
77 err:
78 jffs2_destroy_slab_caches();
79 return -ENOMEM;
80}
81
82void jffs2_destroy_slab_caches(void)
83{
84 kmem_cache_destroy(full_dnode_slab);
85 kmem_cache_destroy(raw_dirent_slab);
86 kmem_cache_destroy(raw_inode_slab);
87 kmem_cache_destroy(tmp_dnode_info_slab);
88 kmem_cache_destroy(raw_node_ref_slab);
89 kmem_cache_destroy(node_frag_slab);
90 kmem_cache_destroy(inode_cache_slab);
91#ifdef CONFIG_JFFS2_FS_XATTR
92 kmem_cache_destroy(xattr_datum_cache);
93 kmem_cache_destroy(xattr_ref_cache);
94#endif
95}
96
97struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
98{
99 struct jffs2_full_dirent *ret;
100 ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
101 dbg_memalloc("%p\n", ret);
102 return ret;
103}
104
105void jffs2_free_full_dirent(struct jffs2_full_dirent *x)
106{
107 dbg_memalloc("%p\n", x);
108 kfree(x);
109}
110
111struct jffs2_full_dnode *jffs2_alloc_full_dnode(void)
112{
113 struct jffs2_full_dnode *ret;
114 ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
115 dbg_memalloc("%p\n", ret);
116 return ret;
117}
118
119void jffs2_free_full_dnode(struct jffs2_full_dnode *x)
120{
121 dbg_memalloc("%p\n", x);
122 kmem_cache_free(full_dnode_slab, x);
123}
124
125struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void)
126{
127 struct jffs2_raw_dirent *ret;
128 ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
129 dbg_memalloc("%p\n", ret);
130 return ret;
131}
132
133void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x)
134{
135 dbg_memalloc("%p\n", x);
136 kmem_cache_free(raw_dirent_slab, x);
137}
138
139struct jffs2_raw_inode *jffs2_alloc_raw_inode(void)
140{
141 struct jffs2_raw_inode *ret;
142 ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
143 dbg_memalloc("%p\n", ret);
144 return ret;
145}
146
147void jffs2_free_raw_inode(struct jffs2_raw_inode *x)
148{
149 dbg_memalloc("%p\n", x);
150 kmem_cache_free(raw_inode_slab, x);
151}
152
153struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void)
154{
155 struct jffs2_tmp_dnode_info *ret;
156 ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
157 dbg_memalloc("%p\n",
158 ret);
159 return ret;
160}
161
162void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
163{
164 dbg_memalloc("%p\n", x);
165 kmem_cache_free(tmp_dnode_info_slab, x);
166}
167
168static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
169{
170 struct jffs2_raw_node_ref *ret;
171
172 ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
173 if (ret) {
174 int i = 0;
175 for (i=0; i < REFS_PER_BLOCK; i++) {
176 ret[i].flash_offset = REF_EMPTY_NODE;
177 ret[i].next_in_ino = NULL;
178 }
179 ret[i].flash_offset = REF_LINK_NODE;
180 ret[i].next_in_ino = NULL;
181 }
182 return ret;
183}
184
185int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
186 struct jffs2_eraseblock *jeb, int nr)
187{
188 struct jffs2_raw_node_ref **p, *ref;
189 int i = nr;
190
191 dbg_memalloc("%d\n", nr);
192
193 p = &jeb->last_node;
194 ref = *p;
195
196 dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset);
197
198 /* If jeb->last_node is really a valid node then skip over it */
199 if (ref && ref->flash_offset != REF_EMPTY_NODE)
200 ref++;
201
202 while (i) {
203 if (!ref) {
204 dbg_memalloc("Allocating new refblock linked from %p\n", p);
205 ref = *p = jffs2_alloc_refblock();
206 if (!ref)
207 return -ENOMEM;
208 }
209 if (ref->flash_offset == REF_LINK_NODE) {
210 p = &ref->next_in_ino;
211 ref = *p;
212 continue;
213 }
214 i--;
215 ref++;
216 }
217 jeb->allocated_refs = nr;
218
219 dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n",
220 nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset,
221 jeb->last_node->next_in_ino);
222
223 return 0;
224}
225
226void jffs2_free_refblock(struct jffs2_raw_node_ref *x)
227{
228 dbg_memalloc("%p\n", x);
229 kmem_cache_free(raw_node_ref_slab, x);
230}
231
232struct jffs2_node_frag *jffs2_alloc_node_frag(void)
233{
234 struct jffs2_node_frag *ret;
235 ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
236 dbg_memalloc("%p\n", ret);
237 return ret;
238}
239
240void jffs2_free_node_frag(struct jffs2_node_frag *x)
241{
242 dbg_memalloc("%p\n", x);
243 kmem_cache_free(node_frag_slab, x);
244}
245
246struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
247{
248 struct jffs2_inode_cache *ret;
249 ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
250 dbg_memalloc("%p\n", ret);
251 return ret;
252}
253
254void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
255{
256 dbg_memalloc("%p\n", x);
257 kmem_cache_free(inode_cache_slab, x);
258}
259
260#ifdef CONFIG_JFFS2_FS_XATTR
261struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
262{
263 struct jffs2_xattr_datum *xd;
264 xd = kmem_cache_zalloc(xattr_datum_cache, GFP_KERNEL);
265 dbg_memalloc("%p\n", xd);
266 if (!xd)
267 return NULL;
268
269 xd->class = RAWNODE_CLASS_XATTR_DATUM;
270 xd->node = (void *)xd;
271 INIT_LIST_HEAD(&xd->xindex);
272 return xd;
273}
274
275void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd)
276{
277 dbg_memalloc("%p\n", xd);
278 kmem_cache_free(xattr_datum_cache, xd);
279}
280
281struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
282{
283 struct jffs2_xattr_ref *ref;
284 ref = kmem_cache_zalloc(xattr_ref_cache, GFP_KERNEL);
285 dbg_memalloc("%p\n", ref);
286 if (!ref)
287 return NULL;
288
289 ref->class = RAWNODE_CLASS_XATTR_REF;
290 ref->node = (void *)ref;
291 return ref;
292}
293
294void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref)
295{
296 dbg_memalloc("%p\n", ref);
297 kmem_cache_free(xattr_ref_cache, ref);
298}
299#endif