Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *    Copyright IBM Corp. 2007
  3 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  4 */
  5
  6#define KMSG_COMPONENT "cpu"
  7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  8
  9#include <linux/kernel.h>
 10#include <linux/mm.h>
 11#include <linux/init.h>
 12#include <linux/device.h>
 13#include <linux/bootmem.h>
 
 
 
 
 
 
 14#include <linux/sched.h>
 15#include <linux/workqueue.h>
 
 
 
 16#include <linux/cpu.h>
 17#include <linux/smp.h>
 18#include <linux/cpuset.h>
 19#include <asm/delay.h>
 
 
 
 20
 21#define PTF_HORIZONTAL	(0UL)
 22#define PTF_VERTICAL	(1UL)
 23#define PTF_CHECK	(2UL)
 24
 
 
 
 
 
 
 
 25struct mask_info {
 26	struct mask_info *next;
 27	unsigned char id;
 28	cpumask_t mask;
 29};
 30
 31static int topology_enabled = 1;
 
 32static void topology_work_fn(struct work_struct *work);
 33static struct sysinfo_15_1_x *tl_info;
 34static struct timer_list topology_timer;
 35static void set_topology_timer(void);
 36static DECLARE_WORK(topology_work, topology_work_fn);
 37/* topology_lock protects the core linked list */
 38static DEFINE_SPINLOCK(topology_lock);
 39
 40static struct mask_info core_info;
 41cpumask_t cpu_core_map[NR_CPUS];
 42unsigned char cpu_core_id[NR_CPUS];
 43
 44#ifdef CONFIG_SCHED_BOOK
 
 
 
 
 45static struct mask_info book_info;
 46cpumask_t cpu_book_map[NR_CPUS];
 47unsigned char cpu_book_id[NR_CPUS];
 48#endif
 
 
 
 49
 50static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 51{
 52	cpumask_t mask;
 53
 54	cpumask_clear(&mask);
 55	if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
 56		cpumask_copy(&mask, cpumask_of(cpu));
 57		return mask;
 58	}
 59	while (info) {
 60		if (cpumask_test_cpu(cpu, &info->mask)) {
 61			mask = info->mask;
 62			break;
 63		}
 64		info = info->next;
 65	}
 66	if (cpumask_empty(&mask))
 
 
 
 
 
 
 67		cpumask_copy(&mask, cpumask_of(cpu));
 
 
 68	return mask;
 69}
 70
 71static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
 72			     struct mask_info *book, struct mask_info *core)
 73{
 74	unsigned int cpu;
 
 75
 76	for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS);
 77	     cpu < TOPOLOGY_CPU_BITS;
 78	     cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
 79	{
 80		unsigned int rcpu, lcpu;
 
 
 
 
 
 
 81
 82		rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
 83		for_each_present_cpu(lcpu) {
 84			if (cpu_logical_map(lcpu) != rcpu)
 85				continue;
 86#ifdef CONFIG_SCHED_BOOK
 87			cpumask_set_cpu(lcpu, &book->mask);
 88			cpu_book_id[lcpu] = book->id;
 89#endif
 90			cpumask_set_cpu(lcpu, &core->mask);
 91			cpu_core_id[lcpu] = core->id;
 92			smp_cpu_polarization[lcpu] = tl_cpu->pp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93		}
 94	}
 95}
 96
 97static void clear_masks(void)
 98{
 99	struct mask_info *info;
100
101	info = &core_info;
102	while (info) {
103		cpumask_clear(&info->mask);
104		info = info->next;
105	}
106#ifdef CONFIG_SCHED_BOOK
107	info = &book_info;
108	while (info) {
109		cpumask_clear(&info->mask);
110		info = info->next;
111	}
112#endif
 
 
 
 
113}
114
115static union topology_entry *next_tle(union topology_entry *tle)
116{
117	if (!tle->nl)
118		return (union topology_entry *)((struct topology_cpu *)tle + 1);
119	return (union topology_entry *)((struct topology_container *)tle + 1);
120}
121
122static void tl_to_cores(struct sysinfo_15_1_x *info)
123{
124#ifdef CONFIG_SCHED_BOOK
125	struct mask_info *book = &book_info;
126#else
127	struct mask_info *book = NULL;
128#endif
129	struct mask_info *core = &core_info;
130	union topology_entry *tle, *end;
131
132
133	spin_lock_irq(&topology_lock);
134	clear_masks();
135	tle = info->tle;
136	end = (union topology_entry *)((unsigned long)info + info->length);
137	while (tle < end) {
138		switch (tle->nl) {
139#ifdef CONFIG_SCHED_BOOK
 
 
 
140		case 2:
141			book = book->next;
142			book->id = tle->container.id;
143			break;
144#endif
145		case 1:
146			core = core->next;
147			core->id = tle->container.id;
148			break;
149		case 0:
150			add_cpus_to_mask(&tle->cpu, book, core);
151			break;
152		default:
153			clear_masks();
154			goto out;
155		}
156		tle = next_tle(tle);
157	}
158out:
159	spin_unlock_irq(&topology_lock);
160}
161
162static void topology_update_polarization_simple(void)
163{
164	int cpu;
165
166	mutex_lock(&smp_cpu_state_mutex);
167	for_each_possible_cpu(cpu)
168		smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
169	mutex_unlock(&smp_cpu_state_mutex);
170}
171
172static int ptf(unsigned long fc)
173{
174	int rc;
175
176	asm volatile(
177		"	.insn	rre,0xb9a20000,%1,%1\n"
178		"	ipm	%0\n"
179		"	srl	%0,28\n"
180		: "=d" (rc)
181		: "d" (fc)  : "cc");
182	return rc;
183}
184
185int topology_set_cpu_management(int fc)
186{
187	int cpu;
188	int rc;
189
190	if (!MACHINE_HAS_TOPOLOGY)
191		return -EOPNOTSUPP;
192	if (fc)
193		rc = ptf(PTF_VERTICAL);
194	else
195		rc = ptf(PTF_HORIZONTAL);
196	if (rc)
197		return -EBUSY;
198	for_each_possible_cpu(cpu)
199		smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
200	return rc;
201}
202
203static void update_cpu_core_map(void)
204{
205	unsigned long flags;
206	int cpu;
207
208	spin_lock_irqsave(&topology_lock, flags);
209	for_each_possible_cpu(cpu) {
210		cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
211#ifdef CONFIG_SCHED_BOOK
212		cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
213#endif
 
 
 
 
 
 
 
 
 
 
 
214	}
215	spin_unlock_irqrestore(&topology_lock, flags);
216}
217
218void store_topology(struct sysinfo_15_1_x *info)
219{
220#ifdef CONFIG_SCHED_BOOK
221	int rc;
222
223	rc = stsi(info, 15, 1, 3);
224	if (rc != -ENOSYS)
225		return;
226#endif
227	stsi(info, 15, 1, 2);
 
228}
229
230int arch_update_cpu_topology(void)
231{
232	struct sysinfo_15_1_x *info = tl_info;
233	struct sys_device *sysdev;
234	int cpu;
235
236	if (!MACHINE_HAS_TOPOLOGY) {
237		update_cpu_core_map();
238		topology_update_polarization_simple();
239		return 0;
 
 
240	}
241	store_topology(info);
242	tl_to_cores(info);
243	update_cpu_core_map();
 
 
 
 
 
 
 
 
 
 
 
244	for_each_online_cpu(cpu) {
245		sysdev = get_cpu_sysdev(cpu);
246		kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
247	}
248	return 1;
249}
250
251static void topology_work_fn(struct work_struct *work)
252{
253	rebuild_sched_domains();
254}
255
256void topology_schedule_update(void)
257{
258	schedule_work(&topology_work);
259}
260
261static void topology_timer_fn(unsigned long ignored)
 
 
 
 
 
262{
263	if (ptf(PTF_CHECK))
264		topology_schedule_update();
265	set_topology_timer();
266}
267
 
 
 
 
268static void set_topology_timer(void)
269{
270	topology_timer.function = topology_timer_fn;
271	topology_timer.data = 0;
272	topology_timer.expires = jiffies + 60 * HZ;
273	add_timer(&topology_timer);
274}
275
276static int __init early_parse_topology(char *p)
277{
278	if (strncmp(p, "off", 3))
279		return 0;
280	topology_enabled = 0;
281	return 0;
 
 
 
 
 
282}
283early_param("topology", early_parse_topology);
284
285static int __init init_topology_update(void)
 
 
 
 
286{
287	int rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
 
 
 
289	rc = 0;
290	if (!MACHINE_HAS_TOPOLOGY) {
291		topology_update_polarization_simple();
 
292		goto out;
293	}
294	init_timer_deferrable(&topology_timer);
295	set_topology_timer();
 
 
296out:
297	update_cpu_core_map();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298	return rc;
299}
300__initcall(init_topology_update);
301
302static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask,
303			int offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304{
305	int i, nr_masks;
306
307	nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
308	for (i = 0; i < info->mnest - offset; i++)
309		nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
310	nr_masks = max(nr_masks, 1);
311	for (i = 0; i < nr_masks; i++) {
312		mask->next = alloc_bootmem(sizeof(struct mask_info));
313		mask = mask->next;
314	}
315}
316
317void __init s390_init_cpu_topology(void)
318{
319	struct sysinfo_15_1_x *info;
320	int i;
321
 
 
 
 
 
 
 
322	if (!MACHINE_HAS_TOPOLOGY)
323		return;
324	tl_info = alloc_bootmem_pages(PAGE_SIZE);
325	info = tl_info;
326	store_topology(info);
327	pr_info("The CPU configuration topology of the machine is:");
328	for (i = 0; i < TOPOLOGY_NR_MAG; i++)
329		printk(" %d", info->mag[i]);
330	printk(" / %d\n", info->mnest);
331	alloc_masks(info, &core_info, 2);
332#ifdef CONFIG_SCHED_BOOK
333	alloc_masks(info, &book_info, 3);
334#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Copyright IBM Corp. 2007, 2011
  4 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  5 */
  6
  7#define KMSG_COMPONENT "cpu"
  8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9
 10#include <linux/workqueue.h>
 
 
 
 11#include <linux/bootmem.h>
 12#include <linux/uaccess.h>
 13#include <linux/sysctl.h>
 14#include <linux/cpuset.h>
 15#include <linux/device.h>
 16#include <linux/export.h>
 17#include <linux/kernel.h>
 18#include <linux/sched.h>
 19#include <linux/sched/topology.h>
 20#include <linux/delay.h>
 21#include <linux/init.h>
 22#include <linux/slab.h>
 23#include <linux/cpu.h>
 24#include <linux/smp.h>
 25#include <linux/mm.h>
 26#include <linux/nodemask.h>
 27#include <linux/node.h>
 28#include <asm/sysinfo.h>
 29#include <asm/numa.h>
 30
 31#define PTF_HORIZONTAL	(0UL)
 32#define PTF_VERTICAL	(1UL)
 33#define PTF_CHECK	(2UL)
 34
 35enum {
 36	TOPOLOGY_MODE_HW,
 37	TOPOLOGY_MODE_SINGLE,
 38	TOPOLOGY_MODE_PACKAGE,
 39	TOPOLOGY_MODE_UNINITIALIZED
 40};
 41
 42struct mask_info {
 43	struct mask_info *next;
 44	unsigned char id;
 45	cpumask_t mask;
 46};
 47
 48static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
 49static void set_topology_timer(void);
 50static void topology_work_fn(struct work_struct *work);
 51static struct sysinfo_15_1_x *tl_info;
 
 
 
 
 
 52
 53static DECLARE_WORK(topology_work, topology_work_fn);
 
 
 54
 55/*
 56 * Socket/Book linked lists and cpu_topology updates are
 57 * protected by "sched_domains_mutex".
 58 */
 59static struct mask_info socket_info;
 60static struct mask_info book_info;
 61static struct mask_info drawer_info;
 62
 63struct cpu_topology_s390 cpu_topology[NR_CPUS];
 64EXPORT_SYMBOL_GPL(cpu_topology);
 65
 66cpumask_t cpus_with_topology;
 67
 68static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 69{
 70	cpumask_t mask;
 71
 72	cpumask_copy(&mask, cpumask_of(cpu));
 73	switch (topology_mode) {
 74	case TOPOLOGY_MODE_HW:
 75		while (info) {
 76			if (cpumask_test_cpu(cpu, &info->mask)) {
 77				mask = info->mask;
 78				break;
 79			}
 80			info = info->next;
 81		}
 82		if (cpumask_empty(&mask))
 83			cpumask_copy(&mask, cpumask_of(cpu));
 84		break;
 85	case TOPOLOGY_MODE_PACKAGE:
 86		cpumask_copy(&mask, cpu_present_mask);
 87		break;
 88	default:
 89		/* fallthrough */
 90	case TOPOLOGY_MODE_SINGLE:
 91		cpumask_copy(&mask, cpumask_of(cpu));
 92		break;
 93	}
 94	return mask;
 95}
 96
 97static cpumask_t cpu_thread_map(unsigned int cpu)
 
 98{
 99	cpumask_t mask;
100	int i;
101
102	cpumask_copy(&mask, cpumask_of(cpu));
103	if (topology_mode != TOPOLOGY_MODE_HW)
104		return mask;
105	cpu -= cpu % (smp_cpu_mtid + 1);
106	for (i = 0; i <= smp_cpu_mtid; i++)
107		if (cpu_present(cpu + i))
108			cpumask_set_cpu(cpu + i, &mask);
109	return mask;
110}
111
112#define TOPOLOGY_CORE_BITS	64
113
114static void add_cpus_to_mask(struct topology_core *tl_core,
115			     struct mask_info *drawer,
116			     struct mask_info *book,
117			     struct mask_info *socket)
118{
119	struct cpu_topology_s390 *topo;
120	unsigned int core;
121
122	for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
123		unsigned int rcore;
124		int lcpu, i;
125
126		rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
127		lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
128		if (lcpu < 0)
129			continue;
130		for (i = 0; i <= smp_cpu_mtid; i++) {
131			topo = &cpu_topology[lcpu + i];
132			topo->drawer_id = drawer->id;
133			topo->book_id = book->id;
134			topo->socket_id = socket->id;
135			topo->core_id = rcore;
136			topo->thread_id = lcpu + i;
137			topo->dedicated = tl_core->d;
138			cpumask_set_cpu(lcpu + i, &drawer->mask);
139			cpumask_set_cpu(lcpu + i, &book->mask);
140			cpumask_set_cpu(lcpu + i, &socket->mask);
141			cpumask_set_cpu(lcpu + i, &cpus_with_topology);
142			smp_cpu_set_polarization(lcpu + i, tl_core->pp);
143		}
144	}
145}
146
147static void clear_masks(void)
148{
149	struct mask_info *info;
150
151	info = &socket_info;
152	while (info) {
153		cpumask_clear(&info->mask);
154		info = info->next;
155	}
 
156	info = &book_info;
157	while (info) {
158		cpumask_clear(&info->mask);
159		info = info->next;
160	}
161	info = &drawer_info;
162	while (info) {
163		cpumask_clear(&info->mask);
164		info = info->next;
165	}
166}
167
168static union topology_entry *next_tle(union topology_entry *tle)
169{
170	if (!tle->nl)
171		return (union topology_entry *)((struct topology_core *)tle + 1);
172	return (union topology_entry *)((struct topology_container *)tle + 1);
173}
174
175static void tl_to_masks(struct sysinfo_15_1_x *info)
176{
177	struct mask_info *socket = &socket_info;
178	struct mask_info *book = &book_info;
179	struct mask_info *drawer = &drawer_info;
 
 
 
180	union topology_entry *tle, *end;
181
 
 
182	clear_masks();
183	tle = info->tle;
184	end = (union topology_entry *)((unsigned long)info + info->length);
185	while (tle < end) {
186		switch (tle->nl) {
187		case 3:
188			drawer = drawer->next;
189			drawer->id = tle->container.id;
190			break;
191		case 2:
192			book = book->next;
193			book->id = tle->container.id;
194			break;
 
195		case 1:
196			socket = socket->next;
197			socket->id = tle->container.id;
198			break;
199		case 0:
200			add_cpus_to_mask(&tle->cpu, drawer, book, socket);
201			break;
202		default:
203			clear_masks();
204			return;
205		}
206		tle = next_tle(tle);
207	}
 
 
208}
209
210static void topology_update_polarization_simple(void)
211{
212	int cpu;
213
 
214	for_each_possible_cpu(cpu)
215		smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
 
216}
217
218static int ptf(unsigned long fc)
219{
220	int rc;
221
222	asm volatile(
223		"	.insn	rre,0xb9a20000,%1,%1\n"
224		"	ipm	%0\n"
225		"	srl	%0,28\n"
226		: "=d" (rc)
227		: "d" (fc)  : "cc");
228	return rc;
229}
230
231int topology_set_cpu_management(int fc)
232{
233	int cpu, rc;
 
234
235	if (!MACHINE_HAS_TOPOLOGY)
236		return -EOPNOTSUPP;
237	if (fc)
238		rc = ptf(PTF_VERTICAL);
239	else
240		rc = ptf(PTF_HORIZONTAL);
241	if (rc)
242		return -EBUSY;
243	for_each_possible_cpu(cpu)
244		smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
245	return rc;
246}
247
248static void update_cpu_masks(void)
249{
250	struct cpu_topology_s390 *topo;
251	int cpu, id;
252
 
253	for_each_possible_cpu(cpu) {
254		topo = &cpu_topology[cpu];
255		topo->thread_mask = cpu_thread_map(cpu);
256		topo->core_mask = cpu_group_map(&socket_info, cpu);
257		topo->book_mask = cpu_group_map(&book_info, cpu);
258		topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
259		if (topology_mode != TOPOLOGY_MODE_HW) {
260			id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
261			topo->thread_id = cpu;
262			topo->core_id = cpu;
263			topo->socket_id = id;
264			topo->book_id = id;
265			topo->drawer_id = id;
266			if (cpu_present(cpu))
267				cpumask_set_cpu(cpu, &cpus_with_topology);
268		}
269	}
270	numa_update_cpu_topology();
271}
272
273void store_topology(struct sysinfo_15_1_x *info)
274{
275	stsi(info, 15, 1, topology_mnest_limit());
276}
277
278static void __arch_update_dedicated_flag(void *arg)
279{
280	if (topology_cpu_dedicated(smp_processor_id()))
281		set_cpu_flag(CIF_DEDICATED_CPU);
282	else
283		clear_cpu_flag(CIF_DEDICATED_CPU);
284}
285
286static int __arch_update_cpu_topology(void)
287{
288	struct sysinfo_15_1_x *info = tl_info;
289	int rc = 0;
 
290
291	mutex_lock(&smp_cpu_state_mutex);
292	cpumask_clear(&cpus_with_topology);
293	if (MACHINE_HAS_TOPOLOGY) {
294		rc = 1;
295		store_topology(info);
296		tl_to_masks(info);
297	}
298	update_cpu_masks();
299	if (!MACHINE_HAS_TOPOLOGY)
300		topology_update_polarization_simple();
301	mutex_unlock(&smp_cpu_state_mutex);
302	return rc;
303}
304
305int arch_update_cpu_topology(void)
306{
307	struct device *dev;
308	int cpu, rc;
309
310	rc = __arch_update_cpu_topology();
311	on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
312	for_each_online_cpu(cpu) {
313		dev = get_cpu_device(cpu);
314		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
315	}
316	return rc;
317}
318
319static void topology_work_fn(struct work_struct *work)
320{
321	rebuild_sched_domains();
322}
323
324void topology_schedule_update(void)
325{
326	schedule_work(&topology_work);
327}
328
329static void topology_flush_work(void)
330{
331	flush_work(&topology_work);
332}
333
334static void topology_timer_fn(struct timer_list *unused)
335{
336	if (ptf(PTF_CHECK))
337		topology_schedule_update();
338	set_topology_timer();
339}
340
341static struct timer_list topology_timer;
342
343static atomic_t topology_poll = ATOMIC_INIT(0);
344
345static void set_topology_timer(void)
346{
347	if (atomic_add_unless(&topology_poll, -1, 0))
348		mod_timer(&topology_timer, jiffies + HZ / 10);
349	else
350		mod_timer(&topology_timer, jiffies + HZ * 60);
351}
352
353void topology_expect_change(void)
354{
355	if (!MACHINE_HAS_TOPOLOGY)
356		return;
357	/* This is racy, but it doesn't matter since it is just a heuristic.
358	 * Worst case is that we poll in a higher frequency for a bit longer.
359	 */
360	if (atomic_read(&topology_poll) > 60)
361		return;
362	atomic_add(60, &topology_poll);
363	set_topology_timer();
364}
 
365
366static int cpu_management;
367
368static ssize_t dispatching_show(struct device *dev,
369				struct device_attribute *attr,
370				char *buf)
371{
372	ssize_t count;
373
374	mutex_lock(&smp_cpu_state_mutex);
375	count = sprintf(buf, "%d\n", cpu_management);
376	mutex_unlock(&smp_cpu_state_mutex);
377	return count;
378}
379
380static ssize_t dispatching_store(struct device *dev,
381				 struct device_attribute *attr,
382				 const char *buf,
383				 size_t count)
384{
385	int val, rc;
386	char delim;
387
388	if (sscanf(buf, "%d %c", &val, &delim) != 1)
389		return -EINVAL;
390	if (val != 0 && val != 1)
391		return -EINVAL;
392	rc = 0;
393	get_online_cpus();
394	mutex_lock(&smp_cpu_state_mutex);
395	if (cpu_management == val)
396		goto out;
397	rc = topology_set_cpu_management(val);
398	if (rc)
399		goto out;
400	cpu_management = val;
401	topology_expect_change();
402out:
403	mutex_unlock(&smp_cpu_state_mutex);
404	put_online_cpus();
405	return rc ? rc : count;
406}
407static DEVICE_ATTR_RW(dispatching);
408
409static ssize_t cpu_polarization_show(struct device *dev,
410				     struct device_attribute *attr, char *buf)
411{
412	int cpu = dev->id;
413	ssize_t count;
414
415	mutex_lock(&smp_cpu_state_mutex);
416	switch (smp_cpu_get_polarization(cpu)) {
417	case POLARIZATION_HRZ:
418		count = sprintf(buf, "horizontal\n");
419		break;
420	case POLARIZATION_VL:
421		count = sprintf(buf, "vertical:low\n");
422		break;
423	case POLARIZATION_VM:
424		count = sprintf(buf, "vertical:medium\n");
425		break;
426	case POLARIZATION_VH:
427		count = sprintf(buf, "vertical:high\n");
428		break;
429	default:
430		count = sprintf(buf, "unknown\n");
431		break;
432	}
433	mutex_unlock(&smp_cpu_state_mutex);
434	return count;
435}
436static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
437
438static struct attribute *topology_cpu_attrs[] = {
439	&dev_attr_polarization.attr,
440	NULL,
441};
442
443static struct attribute_group topology_cpu_attr_group = {
444	.attrs = topology_cpu_attrs,
445};
446
447static ssize_t cpu_dedicated_show(struct device *dev,
448				  struct device_attribute *attr, char *buf)
449{
450	int cpu = dev->id;
451	ssize_t count;
452
453	mutex_lock(&smp_cpu_state_mutex);
454	count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
455	mutex_unlock(&smp_cpu_state_mutex);
456	return count;
457}
458static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
459
460static struct attribute *topology_extra_cpu_attrs[] = {
461	&dev_attr_dedicated.attr,
462	NULL,
463};
464
465static struct attribute_group topology_extra_cpu_attr_group = {
466	.attrs = topology_extra_cpu_attrs,
467};
468
469int topology_cpu_init(struct cpu *cpu)
470{
471	int rc;
472
473	rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
474	if (rc || !MACHINE_HAS_TOPOLOGY)
475		return rc;
476	rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
477	if (rc)
478		sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
479	return rc;
480}
 
481
482static const struct cpumask *cpu_thread_mask(int cpu)
483{
484	return &cpu_topology[cpu].thread_mask;
485}
486
487
488const struct cpumask *cpu_coregroup_mask(int cpu)
489{
490	return &cpu_topology[cpu].core_mask;
491}
492
493static const struct cpumask *cpu_book_mask(int cpu)
494{
495	return &cpu_topology[cpu].book_mask;
496}
497
498static const struct cpumask *cpu_drawer_mask(int cpu)
499{
500	return &cpu_topology[cpu].drawer_mask;
501}
502
503static struct sched_domain_topology_level s390_topology[] = {
504	{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
505	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
506	{ cpu_book_mask, SD_INIT_NAME(BOOK) },
507	{ cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
508	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
509	{ NULL, },
510};
511
512static void __init alloc_masks(struct sysinfo_15_1_x *info,
513			       struct mask_info *mask, int offset)
514{
515	int i, nr_masks;
516
517	nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
518	for (i = 0; i < info->mnest - offset; i++)
519		nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
520	nr_masks = max(nr_masks, 1);
521	for (i = 0; i < nr_masks; i++) {
522		mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
523		mask = mask->next;
524	}
525}
526
527void __init topology_init_early(void)
528{
529	struct sysinfo_15_1_x *info;
 
530
531	set_sched_topology(s390_topology);
532	if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
533		if (MACHINE_HAS_TOPOLOGY)
534			topology_mode = TOPOLOGY_MODE_HW;
535		else
536			topology_mode = TOPOLOGY_MODE_SINGLE;
537	}
538	if (!MACHINE_HAS_TOPOLOGY)
539		goto out;
540	tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE);
541	info = tl_info;
542	store_topology(info);
543	pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
544		info->mag[0], info->mag[1], info->mag[2], info->mag[3],
545		info->mag[4], info->mag[5], info->mnest);
546	alloc_masks(info, &socket_info, 1);
547	alloc_masks(info, &book_info, 2);
548	alloc_masks(info, &drawer_info, 3);
549out:
550	__arch_update_cpu_topology();
551	__arch_update_dedicated_flag(NULL);
552}
553
554static inline int topology_get_mode(int enabled)
555{
556	if (!enabled)
557		return TOPOLOGY_MODE_SINGLE;
558	return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
559}
560
561static inline int topology_is_enabled(void)
562{
563	return topology_mode != TOPOLOGY_MODE_SINGLE;
564}
565
566static int __init topology_setup(char *str)
567{
568	bool enabled;
569	int rc;
570
571	rc = kstrtobool(str, &enabled);
572	if (rc)
573		return rc;
574	topology_mode = topology_get_mode(enabled);
575	return 0;
576}
577early_param("topology", topology_setup);
578
579static int topology_ctl_handler(struct ctl_table *ctl, int write,
580				void __user *buffer, size_t *lenp, loff_t *ppos)
581{
582	unsigned int len;
583	int new_mode;
584	char buf[2];
585
586	if (!*lenp || *ppos) {
587		*lenp = 0;
588		return 0;
589	}
590	if (!write) {
591		strncpy(buf, topology_is_enabled() ? "1\n" : "0\n",
592			ARRAY_SIZE(buf));
593		len = strnlen(buf, ARRAY_SIZE(buf));
594		if (len > *lenp)
595			len = *lenp;
596		if (copy_to_user(buffer, buf, len))
597			return -EFAULT;
598		goto out;
599	}
600	len = *lenp;
601	if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
602		return -EFAULT;
603	if (buf[0] != '0' && buf[0] != '1')
604		return -EINVAL;
605	mutex_lock(&smp_cpu_state_mutex);
606	new_mode = topology_get_mode(buf[0] == '1');
607	if (topology_mode != new_mode) {
608		topology_mode = new_mode;
609		topology_schedule_update();
610	}
611	mutex_unlock(&smp_cpu_state_mutex);
612	topology_flush_work();
613out:
614	*lenp = len;
615	*ppos += len;
616	return 0;
617}
618
619static struct ctl_table topology_ctl_table[] = {
620	{
621		.procname	= "topology",
622		.mode		= 0644,
623		.proc_handler	= topology_ctl_handler,
624	},
625	{ },
626};
627
628static struct ctl_table topology_dir_table[] = {
629	{
630		.procname	= "s390",
631		.maxlen		= 0,
632		.mode		= 0555,
633		.child		= topology_ctl_table,
634	},
635	{ },
636};
637
638static int __init topology_init(void)
639{
640	timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
641	if (MACHINE_HAS_TOPOLOGY)
642		set_topology_timer();
643	else
644		topology_update_polarization_simple();
645	register_sysctl_table(topology_dir_table);
646	return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
647}
648device_initcall(topology_init);