Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/ufs/cylinder.c
  4 *
  5 * Copyright (C) 1998
  6 * Daniel Pirkl <daniel.pirkl@email.cz>
  7 * Charles University, Faculty of Mathematics and Physics
  8 *
  9 *  ext2 - inode (block) bitmap caching inspired
 10 */
 11
 12#include <linux/fs.h>
 13#include <linux/time.h>
 14#include <linux/stat.h>
 15#include <linux/string.h>
 16#include <linux/bitops.h>
 17
 18#include <asm/byteorder.h>
 19
 20#include "ufs_fs.h"
 21#include "ufs.h"
 22#include "swab.h"
 23#include "util.h"
 24
 25/*
 26 * Read cylinder group into cache. The memory space for ufs_cg_private_info
 27 * structure is already allocated during ufs_read_super.
 28 */
 29static void ufs_read_cylinder (struct super_block * sb,
 30	unsigned cgno, unsigned bitmap_nr)
 31{
 32	struct ufs_sb_info * sbi = UFS_SB(sb);
 33	struct ufs_sb_private_info * uspi;
 34	struct ufs_cg_private_info * ucpi;
 35	struct ufs_cylinder_group * ucg;
 36	unsigned i, j;
 37
 38	UFSD("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr);
 39	uspi = sbi->s_uspi;
 40	ucpi = sbi->s_ucpi[bitmap_nr];
 41	ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
 42
 43	UCPI_UBH(ucpi)->fragment = ufs_cgcmin(cgno);
 44	UCPI_UBH(ucpi)->count = uspi->s_cgsize >> sb->s_blocksize_bits;
 45	/*
 46	 * We have already the first fragment of cylinder group block in buffer
 47	 */
 48	UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno];
 49	for (i = 1; i < UCPI_UBH(ucpi)->count; i++)
 50		if (!(UCPI_UBH(ucpi)->bh[i] = sb_bread(sb, UCPI_UBH(ucpi)->fragment + i)))
 
 51			goto failed;
 
 52	sbi->s_cgno[bitmap_nr] = cgno;
 53			
 54	ucpi->c_cgx	= fs32_to_cpu(sb, ucg->cg_cgx);
 55	ucpi->c_ncyl	= fs16_to_cpu(sb, ucg->cg_ncyl);
 56	ucpi->c_niblk	= fs16_to_cpu(sb, ucg->cg_niblk);
 57	ucpi->c_ndblk	= fs32_to_cpu(sb, ucg->cg_ndblk);
 58	ucpi->c_rotor	= fs32_to_cpu(sb, ucg->cg_rotor);
 59	ucpi->c_frotor	= fs32_to_cpu(sb, ucg->cg_frotor);
 60	ucpi->c_irotor	= fs32_to_cpu(sb, ucg->cg_irotor);
 61	ucpi->c_btotoff	= fs32_to_cpu(sb, ucg->cg_btotoff);
 62	ucpi->c_boff	= fs32_to_cpu(sb, ucg->cg_boff);
 63	ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff);
 64	ucpi->c_freeoff	= fs32_to_cpu(sb, ucg->cg_freeoff);
 65	ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff);
 66	ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff);
 67	ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff);
 68	ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks);
 69	UFSD("EXIT\n");
 70	return;	
 71	
 72failed:
 73	for (j = 1; j < i; j++)
 74		brelse (sbi->s_ucg[j]);
 75	sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
 76	ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno);
 
 77}
 78
 79/*
 80 * Remove cylinder group from cache, doesn't release memory
 81 * allocated for cylinder group (this is done at ufs_put_super only).
 82 */
 83void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
 84{
 85	struct ufs_sb_info * sbi = UFS_SB(sb);
 86	struct ufs_sb_private_info * uspi; 
 87	struct ufs_cg_private_info * ucpi;
 88	struct ufs_cylinder_group * ucg;
 89	unsigned i;
 90
 91	UFSD("ENTER, bitmap_nr %u\n", bitmap_nr);
 92
 93	uspi = sbi->s_uspi;
 94	if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
 95		UFSD("EXIT\n");
 96		return;
 97	}
 98	ucpi = sbi->s_ucpi[bitmap_nr];
 99	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
100
101	if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
102		ufs_panic (sb, "ufs_put_cylinder", "internal error");
103		return;
104	}
105	/*
106	 * rotor is not so important data, so we put it to disk 
107	 * at the end of working with cylinder
108	 */
109	ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
110	ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
111	ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
112	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
113	for (i = 1; i < UCPI_UBH(ucpi)->count; i++) {
114		brelse (UCPI_UBH(ucpi)->bh[i]);
115	}
116
117	sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
118	UFSD("EXIT\n");
119}
120
121/*
122 * Find cylinder group in cache and return it as pointer.
123 * If cylinder group is not in cache, we will load it from disk.
124 *
125 * The cache is managed by LRU algorithm. 
126 */
127struct ufs_cg_private_info * ufs_load_cylinder (
128	struct super_block * sb, unsigned cgno)
129{
130	struct ufs_sb_info * sbi = UFS_SB(sb);
131	struct ufs_sb_private_info * uspi;
132	struct ufs_cg_private_info * ucpi;
133	unsigned cg, i, j;
134
135	UFSD("ENTER, cgno %u\n", cgno);
136
137	uspi = sbi->s_uspi;
138	if (cgno >= uspi->s_ncg) {
139		ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg");
140		return NULL;
141	}
142	/*
143	 * Cylinder group number cg it in cache and it was last used
144	 */
145	if (sbi->s_cgno[0] == cgno) {
146		UFSD("EXIT\n");
147		return sbi->s_ucpi[0];
148	}
149	/*
150	 * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
151	 */
152	if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) {
153		if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
154			if (sbi->s_cgno[cgno] != cgno) {
155				ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
156				UFSD("EXIT (FAILED)\n");
157				return NULL;
158			}
159			else {
160				UFSD("EXIT\n");
161				return sbi->s_ucpi[cgno];
162			}
163		} else {
164			ufs_read_cylinder (sb, cgno, cgno);
165			UFSD("EXIT\n");
166			return sbi->s_ucpi[cgno];
 
167		}
 
 
168	}
169	/*
170	 * Cylinder group number cg is in cache but it was not last used, 
171	 * we will move to the first position
172	 */
173	for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++);
174	if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) {
175		cg = sbi->s_cgno[i];
176		ucpi = sbi->s_ucpi[i];
177		for (j = i; j > 0; j--) {
178			sbi->s_cgno[j] = sbi->s_cgno[j-1];
179			sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
180		}
181		sbi->s_cgno[0] = cg;
182		sbi->s_ucpi[0] = ucpi;
183	/*
184	 * Cylinder group number cg is not in cache, we will read it from disk
185	 * and put it to the first position
186	 */
187	} else {
188		if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED)
189			sbi->s_cg_loaded++;
190		else
191			ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1);
192		ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1];
193		for (j = sbi->s_cg_loaded - 1; j > 0; j--) {
194			sbi->s_cgno[j] = sbi->s_cgno[j-1];
195			sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
196		}
197		sbi->s_ucpi[0] = ucpi;
198		ufs_read_cylinder (sb, cgno, 0);
 
 
 
199	}
200	UFSD("EXIT\n");
201	return sbi->s_ucpi[0];
202}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/ufs/cylinder.c
  4 *
  5 * Copyright (C) 1998
  6 * Daniel Pirkl <daniel.pirkl@email.cz>
  7 * Charles University, Faculty of Mathematics and Physics
  8 *
  9 *  ext2 - inode (block) bitmap caching inspired
 10 */
 11
 12#include <linux/fs.h>
 13#include <linux/time.h>
 14#include <linux/stat.h>
 15#include <linux/string.h>
 16#include <linux/bitops.h>
 17
 18#include <asm/byteorder.h>
 19
 20#include "ufs_fs.h"
 21#include "ufs.h"
 22#include "swab.h"
 23#include "util.h"
 24
 25/*
 26 * Read cylinder group into cache. The memory space for ufs_cg_private_info
 27 * structure is already allocated during ufs_read_super.
 28 */
 29static bool ufs_read_cylinder(struct super_block *sb,
 30	unsigned cgno, unsigned bitmap_nr)
 31{
 32	struct ufs_sb_info * sbi = UFS_SB(sb);
 33	struct ufs_sb_private_info * uspi;
 34	struct ufs_cg_private_info * ucpi;
 35	struct ufs_cylinder_group * ucg;
 36	unsigned i, j;
 37
 38	UFSD("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr);
 39	uspi = sbi->s_uspi;
 40	ucpi = sbi->s_ucpi[bitmap_nr];
 41	ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
 42
 43	UCPI_UBH(ucpi)->fragment = ufs_cgcmin(cgno);
 44	UCPI_UBH(ucpi)->count = uspi->s_cgsize >> sb->s_blocksize_bits;
 45	/*
 46	 * We have already the first fragment of cylinder group block in buffer
 47	 */
 48	UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno];
 49	for (i = 1; i < UCPI_UBH(ucpi)->count; i++) {
 50		UCPI_UBH(ucpi)->bh[i] = sb_bread(sb, UCPI_UBH(ucpi)->fragment + i);
 51		if (!UCPI_UBH(ucpi)->bh[i])
 52			goto failed;
 53	}
 54	sbi->s_cgno[bitmap_nr] = cgno;
 55			
 56	ucpi->c_cgx	= fs32_to_cpu(sb, ucg->cg_cgx);
 57	ucpi->c_ncyl	= fs16_to_cpu(sb, ucg->cg_ncyl);
 58	ucpi->c_niblk	= fs16_to_cpu(sb, ucg->cg_niblk);
 59	ucpi->c_ndblk	= fs32_to_cpu(sb, ucg->cg_ndblk);
 60	ucpi->c_rotor	= fs32_to_cpu(sb, ucg->cg_rotor);
 61	ucpi->c_frotor	= fs32_to_cpu(sb, ucg->cg_frotor);
 62	ucpi->c_irotor	= fs32_to_cpu(sb, ucg->cg_irotor);
 63	ucpi->c_btotoff	= fs32_to_cpu(sb, ucg->cg_btotoff);
 64	ucpi->c_boff	= fs32_to_cpu(sb, ucg->cg_boff);
 65	ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff);
 66	ucpi->c_freeoff	= fs32_to_cpu(sb, ucg->cg_freeoff);
 67	ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff);
 68	ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff);
 69	ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff);
 70	ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks);
 71	UFSD("EXIT\n");
 72	return true;
 73	
 74failed:
 75	for (j = 1; j < i; j++)
 76		brelse(UCPI_UBH(ucpi)->bh[j]);
 77	sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
 78	ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno);
 79	return false;
 80}
 81
 82/*
 83 * Remove cylinder group from cache, doesn't release memory
 84 * allocated for cylinder group (this is done at ufs_put_super only).
 85 */
 86void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
 87{
 88	struct ufs_sb_info * sbi = UFS_SB(sb);
 89	struct ufs_sb_private_info * uspi; 
 90	struct ufs_cg_private_info * ucpi;
 91	struct ufs_cylinder_group * ucg;
 92	unsigned i;
 93
 94	UFSD("ENTER, bitmap_nr %u\n", bitmap_nr);
 95
 96	uspi = sbi->s_uspi;
 97	if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
 98		UFSD("EXIT\n");
 99		return;
100	}
101	ucpi = sbi->s_ucpi[bitmap_nr];
102	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
103
104	if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
105		ufs_panic (sb, "ufs_put_cylinder", "internal error");
106		return;
107	}
108	/*
109	 * rotor is not so important data, so we put it to disk 
110	 * at the end of working with cylinder
111	 */
112	ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
113	ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
114	ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
115	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
116	for (i = 1; i < UCPI_UBH(ucpi)->count; i++) {
117		brelse (UCPI_UBH(ucpi)->bh[i]);
118	}
119
120	sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
121	UFSD("EXIT\n");
122}
123
124/*
125 * Find cylinder group in cache and return it as pointer.
126 * If cylinder group is not in cache, we will load it from disk.
127 *
128 * The cache is managed by LRU algorithm. 
129 */
130struct ufs_cg_private_info * ufs_load_cylinder (
131	struct super_block * sb, unsigned cgno)
132{
133	struct ufs_sb_info * sbi = UFS_SB(sb);
134	struct ufs_sb_private_info * uspi;
135	struct ufs_cg_private_info * ucpi;
136	unsigned cg, i, j;
137
138	UFSD("ENTER, cgno %u\n", cgno);
139
140	uspi = sbi->s_uspi;
141	if (cgno >= uspi->s_ncg) {
142		ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg");
143		return NULL;
144	}
145	/*
146	 * Cylinder group number cg it in cache and it was last used
147	 */
148	if (sbi->s_cgno[0] == cgno) {
149		UFSD("EXIT\n");
150		return sbi->s_ucpi[0];
151	}
152	/*
153	 * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
154	 */
155	if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) {
156		if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
157			if (sbi->s_cgno[cgno] != cgno) {
158				ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
159				UFSD("EXIT (FAILED)\n");
160				return NULL;
161			}
 
 
 
 
162		} else {
163			if (unlikely(!ufs_read_cylinder (sb, cgno, cgno))) {
164				UFSD("EXIT (FAILED)\n");
165				return NULL;
166			}
167		}
168		UFSD("EXIT\n");
169		return sbi->s_ucpi[cgno];
170	}
171	/*
172	 * Cylinder group number cg is in cache but it was not last used, 
173	 * we will move to the first position
174	 */
175	for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++);
176	if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) {
177		cg = sbi->s_cgno[i];
178		ucpi = sbi->s_ucpi[i];
179		for (j = i; j > 0; j--) {
180			sbi->s_cgno[j] = sbi->s_cgno[j-1];
181			sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
182		}
183		sbi->s_cgno[0] = cg;
184		sbi->s_ucpi[0] = ucpi;
185	/*
186	 * Cylinder group number cg is not in cache, we will read it from disk
187	 * and put it to the first position
188	 */
189	} else {
190		if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED)
191			sbi->s_cg_loaded++;
192		else
193			ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1);
194		ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1];
195		for (j = sbi->s_cg_loaded - 1; j > 0; j--) {
196			sbi->s_cgno[j] = sbi->s_cgno[j-1];
197			sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
198		}
199		sbi->s_ucpi[0] = ucpi;
200		if (unlikely(!ufs_read_cylinder (sb, cgno, 0))) {
201			UFSD("EXIT (FAILED)\n");
202			return NULL;
203		}
204	}
205	UFSD("EXIT\n");
206	return sbi->s_ucpi[0];
207}