Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * This file contains NUMA specific variables and functions which can
  7 * be split away from DISCONTIGMEM and are used on NUMA machines with
  8 * contiguous memory.
  9 * 		2002/08/07 Erich Focht <efocht@ess.nec.de>
 10 * Populate cpu entries in sysfs for non-numa systems as well
 11 *  	Intel Corporation - Ashok Raj
 12 * 02/27/2006 Zhang, Yanmin
 13 *	Populate cpu cache entries in sysfs for cpu cache info
 14 */
 15
 16#include <linux/cpu.h>
 17#include <linux/kernel.h>
 18#include <linux/mm.h>
 19#include <linux/node.h>
 20#include <linux/slab.h>
 21#include <linux/init.h>
 22#include <linux/memblock.h>
 23#include <linux/nodemask.h>
 24#include <linux/notifier.h>
 25#include <linux/export.h>
 26#include <asm/mmzone.h>
 27#include <asm/numa.h>
 28#include <asm/cpu.h>
 29
 30static struct ia64_cpu *sysfs_cpus;
 31
 32void arch_fix_phys_package_id(int num, u32 slot)
 33{
 34#ifdef CONFIG_SMP
 35	if (cpu_data(num)->socket_id == -1)
 36		cpu_data(num)->socket_id = slot;
 37#endif
 38}
 39EXPORT_SYMBOL_GPL(arch_fix_phys_package_id);
 40
 41
 42#ifdef CONFIG_HOTPLUG_CPU
 43int __ref arch_register_cpu(int num)
 44{
 
 45	/*
 46	 * If CPEI can be re-targeted or if this is not
 47	 * CPEI target, then it is hotpluggable
 48	 */
 49	if (can_cpei_retarget() || !is_cpu_cpei_target(num))
 50		sysfs_cpus[num].cpu.hotpluggable = 1;
 51	map_cpu_to_node(num, node_cpuid[num].nid);
 
 52	return register_cpu(&sysfs_cpus[num].cpu, num);
 53}
 54EXPORT_SYMBOL(arch_register_cpu);
 55
 56void __ref arch_unregister_cpu(int num)
 57{
 58	unregister_cpu(&sysfs_cpus[num].cpu);
 
 59	unmap_cpu_from_node(num, cpu_to_node(num));
 
 60}
 61EXPORT_SYMBOL(arch_unregister_cpu);
 62#else
 63static int __init arch_register_cpu(int num)
 64{
 65	return register_cpu(&sysfs_cpus[num].cpu, num);
 66}
 67#endif /*CONFIG_HOTPLUG_CPU*/
 68
 69
 70static int __init topology_init(void)
 71{
 72	int i, err = 0;
 73
 74#ifdef CONFIG_NUMA
 75	/*
 76	 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
 77	 */
 78	for_each_online_node(i) {
 79		if ((err = register_one_node(i)))
 80			goto out;
 81	}
 82#endif
 83
 84	sysfs_cpus = kcalloc(NR_CPUS, sizeof(struct ia64_cpu), GFP_KERNEL);
 85	if (!sysfs_cpus)
 86		panic("kzalloc in topology_init failed - NR_CPUS too big?");
 87
 88	for_each_present_cpu(i) {
 89		if((err = arch_register_cpu(i)))
 90			goto out;
 91	}
 92out:
 93	return err;
 94}
 95
 96subsys_initcall(topology_init);
 97
 98
 99/*
100 * Export cpu cache information through sysfs
101 */
102
103/*
104 *  A bunch of string array to get pretty printing
105 */
106static const char *cache_types[] = {
107	"",			/* not used */
108	"Instruction",
109	"Data",
110	"Unified"	/* unified */
111};
112
113static const char *cache_mattrib[]={
114	"WriteThrough",
115	"WriteBack",
116	"",		/* reserved */
117	""		/* reserved */
118};
119
120struct cache_info {
121	pal_cache_config_info_t	cci;
122	cpumask_t shared_cpu_map;
123	int level;
124	int type;
125	struct kobject kobj;
126};
127
128struct cpu_cache_info {
129	struct cache_info *cache_leaves;
130	int	num_cache_leaves;
131	struct kobject kobj;
132};
133
134static struct cpu_cache_info	all_cpu_cache_info[NR_CPUS];
135#define LEAF_KOBJECT_PTR(x,y)    (&all_cpu_cache_info[x].cache_leaves[y])
136
137#ifdef CONFIG_SMP
138static void cache_shared_cpu_map_setup(unsigned int cpu,
139		struct cache_info * this_leaf)
140{
141	pal_cache_shared_info_t	csi;
142	int num_shared, i = 0;
143	unsigned int j;
144
145	if (cpu_data(cpu)->threads_per_core <= 1 &&
146		cpu_data(cpu)->cores_per_socket <= 1) {
147		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
148		return;
149	}
150
151	if (ia64_pal_cache_shared_info(this_leaf->level,
152					this_leaf->type,
153					0,
154					&csi) != PAL_STATUS_SUCCESS)
155		return;
156
157	num_shared = (int) csi.num_shared;
158	do {
159		for_each_possible_cpu(j)
160			if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
161				&& cpu_data(j)->core_id == csi.log1_cid
162				&& cpu_data(j)->thread_id == csi.log1_tid)
163				cpumask_set_cpu(j, &this_leaf->shared_cpu_map);
164
165		i++;
166	} while (i < num_shared &&
167		ia64_pal_cache_shared_info(this_leaf->level,
168				this_leaf->type,
169				i,
170				&csi) == PAL_STATUS_SUCCESS);
171}
172#else
173static void cache_shared_cpu_map_setup(unsigned int cpu,
174		struct cache_info * this_leaf)
175{
176	cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
177	return;
178}
179#endif
180
181static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
182					char *buf)
183{
184	return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
185}
186
187static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
188					char *buf)
189{
190	return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
191}
192
193static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
194{
195	return sprintf(buf,
196			"%s\n",
197			cache_mattrib[this_leaf->cci.pcci_cache_attr]);
198}
199
200static ssize_t show_size(struct cache_info *this_leaf, char *buf)
201{
202	return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
203}
204
205static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
206{
207	unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
208	number_of_sets /= this_leaf->cci.pcci_assoc;
209	number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
210
211	return sprintf(buf, "%u\n", number_of_sets);
212}
213
214static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
215{
216	cpumask_t shared_cpu_map;
217
218	cpumask_and(&shared_cpu_map,
219				&this_leaf->shared_cpu_map, cpu_online_mask);
220	return scnprintf(buf, PAGE_SIZE, "%*pb\n",
221			 cpumask_pr_args(&shared_cpu_map));
222}
223
224static ssize_t show_type(struct cache_info *this_leaf, char *buf)
225{
226	int type = this_leaf->type + this_leaf->cci.pcci_unified;
227	return sprintf(buf, "%s\n", cache_types[type]);
228}
229
230static ssize_t show_level(struct cache_info *this_leaf, char *buf)
231{
232	return sprintf(buf, "%u\n", this_leaf->level);
233}
234
235struct cache_attr {
236	struct attribute attr;
237	ssize_t (*show)(struct cache_info *, char *);
238	ssize_t (*store)(struct cache_info *, const char *, size_t count);
239};
240
241#ifdef define_one_ro
242	#undef define_one_ro
243#endif
244#define define_one_ro(_name) \
245	static struct cache_attr _name = \
246__ATTR(_name, 0444, show_##_name, NULL)
247
248define_one_ro(level);
249define_one_ro(type);
250define_one_ro(coherency_line_size);
251define_one_ro(ways_of_associativity);
252define_one_ro(size);
253define_one_ro(number_of_sets);
254define_one_ro(shared_cpu_map);
255define_one_ro(attributes);
256
257static struct attribute * cache_default_attrs[] = {
258	&type.attr,
259	&level.attr,
260	&coherency_line_size.attr,
261	&ways_of_associativity.attr,
262	&attributes.attr,
263	&size.attr,
264	&number_of_sets.attr,
265	&shared_cpu_map.attr,
266	NULL
267};
268
269#define to_object(k) container_of(k, struct cache_info, kobj)
270#define to_attr(a) container_of(a, struct cache_attr, attr)
271
272static ssize_t ia64_cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
273{
274	struct cache_attr *fattr = to_attr(attr);
275	struct cache_info *this_leaf = to_object(kobj);
276	ssize_t ret;
277
278	ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
279	return ret;
280}
281
282static const struct sysfs_ops cache_sysfs_ops = {
283	.show   = ia64_cache_show
284};
285
286static struct kobj_type cache_ktype = {
287	.sysfs_ops	= &cache_sysfs_ops,
288	.default_attrs	= cache_default_attrs,
289};
290
291static struct kobj_type cache_ktype_percpu_entry = {
292	.sysfs_ops	= &cache_sysfs_ops,
293};
294
295static void cpu_cache_sysfs_exit(unsigned int cpu)
296{
297	kfree(all_cpu_cache_info[cpu].cache_leaves);
298	all_cpu_cache_info[cpu].cache_leaves = NULL;
299	all_cpu_cache_info[cpu].num_cache_leaves = 0;
300	memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
301	return;
302}
303
304static int cpu_cache_sysfs_init(unsigned int cpu)
305{
306	unsigned long i, levels, unique_caches;
307	pal_cache_config_info_t cci;
308	int j;
309	long status;
310	struct cache_info *this_cache;
311	int num_cache_leaves = 0;
312
313	if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
314		printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
315		return -1;
316	}
317
318	this_cache=kcalloc(unique_caches, sizeof(struct cache_info),
319			   GFP_KERNEL);
320	if (this_cache == NULL)
321		return -ENOMEM;
322
323	for (i=0; i < levels; i++) {
324		for (j=2; j >0 ; j--) {
325			if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
326					PAL_STATUS_SUCCESS)
327				continue;
328
329			this_cache[num_cache_leaves].cci = cci;
330			this_cache[num_cache_leaves].level = i + 1;
331			this_cache[num_cache_leaves].type = j;
332
333			cache_shared_cpu_map_setup(cpu,
334					&this_cache[num_cache_leaves]);
335			num_cache_leaves ++;
336		}
337	}
338
339	all_cpu_cache_info[cpu].cache_leaves = this_cache;
340	all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
341
342	memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
343
344	return 0;
345}
346
347/* Add cache interface for CPU device */
348static int cache_add_dev(unsigned int cpu)
349{
350	struct device *sys_dev = get_cpu_device(cpu);
351	unsigned long i, j;
352	struct cache_info *this_object;
353	int retval = 0;
 
354
355	if (all_cpu_cache_info[cpu].kobj.parent)
356		return 0;
357
 
 
 
 
358
359	retval = cpu_cache_sysfs_init(cpu);
 
360	if (unlikely(retval < 0))
361		return retval;
362
363	retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
364				      &cache_ktype_percpu_entry, &sys_dev->kobj,
365				      "%s", "cache");
366	if (unlikely(retval < 0)) {
367		cpu_cache_sysfs_exit(cpu);
368		return retval;
369	}
370
371	for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
372		this_object = LEAF_KOBJECT_PTR(cpu,i);
373		retval = kobject_init_and_add(&(this_object->kobj),
374					      &cache_ktype,
375					      &all_cpu_cache_info[cpu].kobj,
376					      "index%1lu", i);
377		if (unlikely(retval)) {
378			for (j = 0; j < i; j++) {
379				kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
380			}
381			kobject_put(&all_cpu_cache_info[cpu].kobj);
382			cpu_cache_sysfs_exit(cpu);
383			return retval;
384		}
385		kobject_uevent(&(this_object->kobj), KOBJ_ADD);
386	}
387	kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD);
388	return retval;
389}
390
391/* Remove cache interface for CPU device */
392static int cache_remove_dev(unsigned int cpu)
393{
 
394	unsigned long i;
395
396	for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
397		kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
398
399	if (all_cpu_cache_info[cpu].kobj.parent) {
400		kobject_put(&all_cpu_cache_info[cpu].kobj);
401		memset(&all_cpu_cache_info[cpu].kobj,
402			0,
403			sizeof(struct kobject));
404	}
405
406	cpu_cache_sysfs_exit(cpu);
407
408	return 0;
409}
410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411static int __init cache_sysfs_init(void)
412{
413	int ret;
 
 
 
 
 
 
 
 
 
 
 
414
415	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/topology:online",
416				cache_add_dev, cache_remove_dev);
417	WARN_ON(ret < 0);
418	return 0;
419}
 
420device_initcall(cache_sysfs_init);
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * This file contains NUMA specific variables and functions which can
  7 * be split away from DISCONTIGMEM and are used on NUMA machines with
  8 * contiguous memory.
  9 * 		2002/08/07 Erich Focht <efocht@ess.nec.de>
 10 * Populate cpu entries in sysfs for non-numa systems as well
 11 *  	Intel Corporation - Ashok Raj
 12 * 02/27/2006 Zhang, Yanmin
 13 *	Populate cpu cache entries in sysfs for cpu cache info
 14 */
 15
 16#include <linux/cpu.h>
 17#include <linux/kernel.h>
 18#include <linux/mm.h>
 19#include <linux/node.h>
 20#include <linux/slab.h>
 21#include <linux/init.h>
 22#include <linux/bootmem.h>
 23#include <linux/nodemask.h>
 24#include <linux/notifier.h>
 25#include <linux/export.h>
 26#include <asm/mmzone.h>
 27#include <asm/numa.h>
 28#include <asm/cpu.h>
 29
 30static struct ia64_cpu *sysfs_cpus;
 31
 32void arch_fix_phys_package_id(int num, u32 slot)
 33{
 34#ifdef CONFIG_SMP
 35	if (cpu_data(num)->socket_id == -1)
 36		cpu_data(num)->socket_id = slot;
 37#endif
 38}
 39EXPORT_SYMBOL_GPL(arch_fix_phys_package_id);
 40
 41
 42#ifdef CONFIG_HOTPLUG_CPU
 43int __ref arch_register_cpu(int num)
 44{
 45#ifdef CONFIG_ACPI
 46	/*
 47	 * If CPEI can be re-targeted or if this is not
 48	 * CPEI target, then it is hotpluggable
 49	 */
 50	if (can_cpei_retarget() || !is_cpu_cpei_target(num))
 51		sysfs_cpus[num].cpu.hotpluggable = 1;
 52	map_cpu_to_node(num, node_cpuid[num].nid);
 53#endif
 54	return register_cpu(&sysfs_cpus[num].cpu, num);
 55}
 56EXPORT_SYMBOL(arch_register_cpu);
 57
 58void __ref arch_unregister_cpu(int num)
 59{
 60	unregister_cpu(&sysfs_cpus[num].cpu);
 61#ifdef CONFIG_ACPI
 62	unmap_cpu_from_node(num, cpu_to_node(num));
 63#endif
 64}
 65EXPORT_SYMBOL(arch_unregister_cpu);
 66#else
 67static int __init arch_register_cpu(int num)
 68{
 69	return register_cpu(&sysfs_cpus[num].cpu, num);
 70}
 71#endif /*CONFIG_HOTPLUG_CPU*/
 72
 73
 74static int __init topology_init(void)
 75{
 76	int i, err = 0;
 77
 78#ifdef CONFIG_NUMA
 79	/*
 80	 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
 81	 */
 82	for_each_online_node(i) {
 83		if ((err = register_one_node(i)))
 84			goto out;
 85	}
 86#endif
 87
 88	sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
 89	if (!sysfs_cpus)
 90		panic("kzalloc in topology_init failed - NR_CPUS too big?");
 91
 92	for_each_present_cpu(i) {
 93		if((err = arch_register_cpu(i)))
 94			goto out;
 95	}
 96out:
 97	return err;
 98}
 99
100subsys_initcall(topology_init);
101
102
103/*
104 * Export cpu cache information through sysfs
105 */
106
107/*
108 *  A bunch of string array to get pretty printing
109 */
110static const char *cache_types[] = {
111	"",			/* not used */
112	"Instruction",
113	"Data",
114	"Unified"	/* unified */
115};
116
117static const char *cache_mattrib[]={
118	"WriteThrough",
119	"WriteBack",
120	"",		/* reserved */
121	""		/* reserved */
122};
123
124struct cache_info {
125	pal_cache_config_info_t	cci;
126	cpumask_t shared_cpu_map;
127	int level;
128	int type;
129	struct kobject kobj;
130};
131
132struct cpu_cache_info {
133	struct cache_info *cache_leaves;
134	int	num_cache_leaves;
135	struct kobject kobj;
136};
137
138static struct cpu_cache_info	all_cpu_cache_info[NR_CPUS];
139#define LEAF_KOBJECT_PTR(x,y)    (&all_cpu_cache_info[x].cache_leaves[y])
140
141#ifdef CONFIG_SMP
142static void cache_shared_cpu_map_setup(unsigned int cpu,
143		struct cache_info * this_leaf)
144{
145	pal_cache_shared_info_t	csi;
146	int num_shared, i = 0;
147	unsigned int j;
148
149	if (cpu_data(cpu)->threads_per_core <= 1 &&
150		cpu_data(cpu)->cores_per_socket <= 1) {
151		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
152		return;
153	}
154
155	if (ia64_pal_cache_shared_info(this_leaf->level,
156					this_leaf->type,
157					0,
158					&csi) != PAL_STATUS_SUCCESS)
159		return;
160
161	num_shared = (int) csi.num_shared;
162	do {
163		for_each_possible_cpu(j)
164			if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
165				&& cpu_data(j)->core_id == csi.log1_cid
166				&& cpu_data(j)->thread_id == csi.log1_tid)
167				cpumask_set_cpu(j, &this_leaf->shared_cpu_map);
168
169		i++;
170	} while (i < num_shared &&
171		ia64_pal_cache_shared_info(this_leaf->level,
172				this_leaf->type,
173				i,
174				&csi) == PAL_STATUS_SUCCESS);
175}
176#else
177static void cache_shared_cpu_map_setup(unsigned int cpu,
178		struct cache_info * this_leaf)
179{
180	cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
181	return;
182}
183#endif
184
185static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
186					char *buf)
187{
188	return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
189}
190
191static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
192					char *buf)
193{
194	return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
195}
196
197static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
198{
199	return sprintf(buf,
200			"%s\n",
201			cache_mattrib[this_leaf->cci.pcci_cache_attr]);
202}
203
204static ssize_t show_size(struct cache_info *this_leaf, char *buf)
205{
206	return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
207}
208
209static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
210{
211	unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
212	number_of_sets /= this_leaf->cci.pcci_assoc;
213	number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
214
215	return sprintf(buf, "%u\n", number_of_sets);
216}
217
218static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
219{
220	cpumask_t shared_cpu_map;
221
222	cpumask_and(&shared_cpu_map,
223				&this_leaf->shared_cpu_map, cpu_online_mask);
224	return scnprintf(buf, PAGE_SIZE, "%*pb\n",
225			 cpumask_pr_args(&shared_cpu_map));
226}
227
228static ssize_t show_type(struct cache_info *this_leaf, char *buf)
229{
230	int type = this_leaf->type + this_leaf->cci.pcci_unified;
231	return sprintf(buf, "%s\n", cache_types[type]);
232}
233
234static ssize_t show_level(struct cache_info *this_leaf, char *buf)
235{
236	return sprintf(buf, "%u\n", this_leaf->level);
237}
238
239struct cache_attr {
240	struct attribute attr;
241	ssize_t (*show)(struct cache_info *, char *);
242	ssize_t (*store)(struct cache_info *, const char *, size_t count);
243};
244
245#ifdef define_one_ro
246	#undef define_one_ro
247#endif
248#define define_one_ro(_name) \
249	static struct cache_attr _name = \
250__ATTR(_name, 0444, show_##_name, NULL)
251
252define_one_ro(level);
253define_one_ro(type);
254define_one_ro(coherency_line_size);
255define_one_ro(ways_of_associativity);
256define_one_ro(size);
257define_one_ro(number_of_sets);
258define_one_ro(shared_cpu_map);
259define_one_ro(attributes);
260
261static struct attribute * cache_default_attrs[] = {
262	&type.attr,
263	&level.attr,
264	&coherency_line_size.attr,
265	&ways_of_associativity.attr,
266	&attributes.attr,
267	&size.attr,
268	&number_of_sets.attr,
269	&shared_cpu_map.attr,
270	NULL
271};
272
273#define to_object(k) container_of(k, struct cache_info, kobj)
274#define to_attr(a) container_of(a, struct cache_attr, attr)
275
276static ssize_t ia64_cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
277{
278	struct cache_attr *fattr = to_attr(attr);
279	struct cache_info *this_leaf = to_object(kobj);
280	ssize_t ret;
281
282	ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
283	return ret;
284}
285
286static const struct sysfs_ops cache_sysfs_ops = {
287	.show   = ia64_cache_show
288};
289
290static struct kobj_type cache_ktype = {
291	.sysfs_ops	= &cache_sysfs_ops,
292	.default_attrs	= cache_default_attrs,
293};
294
295static struct kobj_type cache_ktype_percpu_entry = {
296	.sysfs_ops	= &cache_sysfs_ops,
297};
298
299static void cpu_cache_sysfs_exit(unsigned int cpu)
300{
301	kfree(all_cpu_cache_info[cpu].cache_leaves);
302	all_cpu_cache_info[cpu].cache_leaves = NULL;
303	all_cpu_cache_info[cpu].num_cache_leaves = 0;
304	memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
305	return;
306}
307
308static int cpu_cache_sysfs_init(unsigned int cpu)
309{
310	unsigned long i, levels, unique_caches;
311	pal_cache_config_info_t cci;
312	int j;
313	long status;
314	struct cache_info *this_cache;
315	int num_cache_leaves = 0;
316
317	if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
318		printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
319		return -1;
320	}
321
322	this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
323			GFP_KERNEL);
324	if (this_cache == NULL)
325		return -ENOMEM;
326
327	for (i=0; i < levels; i++) {
328		for (j=2; j >0 ; j--) {
329			if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
330					PAL_STATUS_SUCCESS)
331				continue;
332
333			this_cache[num_cache_leaves].cci = cci;
334			this_cache[num_cache_leaves].level = i + 1;
335			this_cache[num_cache_leaves].type = j;
336
337			cache_shared_cpu_map_setup(cpu,
338					&this_cache[num_cache_leaves]);
339			num_cache_leaves ++;
340		}
341	}
342
343	all_cpu_cache_info[cpu].cache_leaves = this_cache;
344	all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
345
346	memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
347
348	return 0;
349}
350
351/* Add cache interface for CPU device */
352static int cache_add_dev(struct device *sys_dev)
353{
354	unsigned int cpu = sys_dev->id;
355	unsigned long i, j;
356	struct cache_info *this_object;
357	int retval = 0;
358	cpumask_t oldmask;
359
360	if (all_cpu_cache_info[cpu].kobj.parent)
361		return 0;
362
363	oldmask = current->cpus_allowed;
364	retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
365	if (unlikely(retval))
366		return retval;
367
368	retval = cpu_cache_sysfs_init(cpu);
369	set_cpus_allowed_ptr(current, &oldmask);
370	if (unlikely(retval < 0))
371		return retval;
372
373	retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
374				      &cache_ktype_percpu_entry, &sys_dev->kobj,
375				      "%s", "cache");
376	if (unlikely(retval < 0)) {
377		cpu_cache_sysfs_exit(cpu);
378		return retval;
379	}
380
381	for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
382		this_object = LEAF_KOBJECT_PTR(cpu,i);
383		retval = kobject_init_and_add(&(this_object->kobj),
384					      &cache_ktype,
385					      &all_cpu_cache_info[cpu].kobj,
386					      "index%1lu", i);
387		if (unlikely(retval)) {
388			for (j = 0; j < i; j++) {
389				kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
390			}
391			kobject_put(&all_cpu_cache_info[cpu].kobj);
392			cpu_cache_sysfs_exit(cpu);
393			return retval;
394		}
395		kobject_uevent(&(this_object->kobj), KOBJ_ADD);
396	}
397	kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD);
398	return retval;
399}
400
401/* Remove cache interface for CPU device */
402static int cache_remove_dev(struct device *sys_dev)
403{
404	unsigned int cpu = sys_dev->id;
405	unsigned long i;
406
407	for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
408		kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
409
410	if (all_cpu_cache_info[cpu].kobj.parent) {
411		kobject_put(&all_cpu_cache_info[cpu].kobj);
412		memset(&all_cpu_cache_info[cpu].kobj,
413			0,
414			sizeof(struct kobject));
415	}
416
417	cpu_cache_sysfs_exit(cpu);
418
419	return 0;
420}
421
422/*
423 * When a cpu is hot-plugged, do a check and initiate
424 * cache kobject if necessary
425 */
426static int cache_cpu_callback(struct notifier_block *nfb,
427		unsigned long action, void *hcpu)
428{
429	unsigned int cpu = (unsigned long)hcpu;
430	struct device *sys_dev;
431
432	sys_dev = get_cpu_device(cpu);
433	switch (action) {
434	case CPU_ONLINE:
435	case CPU_ONLINE_FROZEN:
436		cache_add_dev(sys_dev);
437		break;
438	case CPU_DEAD:
439	case CPU_DEAD_FROZEN:
440		cache_remove_dev(sys_dev);
441		break;
442	}
443	return NOTIFY_OK;
444}
445
446static struct notifier_block cache_cpu_notifier =
447{
448	.notifier_call = cache_cpu_callback
449};
450
451static int __init cache_sysfs_init(void)
452{
453	int i;
454
455	cpu_notifier_register_begin();
456
457	for_each_online_cpu(i) {
458		struct device *sys_dev = get_cpu_device((unsigned int)i);
459		cache_add_dev(sys_dev);
460	}
461
462	__register_hotcpu_notifier(&cache_cpu_notifier);
463
464	cpu_notifier_register_done();
465
 
 
 
466	return 0;
467}
468
469device_initcall(cache_sysfs_init);
470