Loading...
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE SMP support routines.
15 */
16
17#include <linux/smp.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/module.h>
22#include <asm/cacheflush.h>
23
24HV_Topology smp_topology __write_once;
25EXPORT_SYMBOL(smp_topology);
26
27#if CHIP_HAS_IPI()
28static unsigned long __iomem *ipi_mappings[NR_CPUS];
29#endif
30
31
32/*
33 * Top-level send_IPI*() functions to send messages to other cpus.
34 */
35
36/* Set by smp_send_stop() to avoid recursive panics. */
37static int stopping_cpus;
38
39static void __send_IPI_many(HV_Recipient *recip, int nrecip, int tag)
40{
41 int sent = 0;
42 while (sent < nrecip) {
43 int rc = hv_send_message(recip, nrecip,
44 (HV_VirtAddr)&tag, sizeof(tag));
45 if (rc < 0) {
46 if (!stopping_cpus) /* avoid recursive panic */
47 panic("hv_send_message returned %d", rc);
48 break;
49 }
50 WARN_ONCE(rc == 0, "hv_send_message() returned zero\n");
51 sent += rc;
52 }
53}
54
55void send_IPI_single(int cpu, int tag)
56{
57 HV_Recipient recip = {
58 .y = cpu / smp_width,
59 .x = cpu % smp_width,
60 .state = HV_TO_BE_SENT
61 };
62 __send_IPI_many(&recip, 1, tag);
63}
64
65void send_IPI_many(const struct cpumask *mask, int tag)
66{
67 HV_Recipient recip[NR_CPUS];
68 int cpu;
69 int nrecip = 0;
70 int my_cpu = smp_processor_id();
71 for_each_cpu(cpu, mask) {
72 HV_Recipient *r;
73 BUG_ON(cpu == my_cpu);
74 r = &recip[nrecip++];
75 r->y = cpu / smp_width;
76 r->x = cpu % smp_width;
77 r->state = HV_TO_BE_SENT;
78 }
79 __send_IPI_many(recip, nrecip, tag);
80}
81
82void send_IPI_allbutself(int tag)
83{
84 struct cpumask mask;
85 cpumask_copy(&mask, cpu_online_mask);
86 cpumask_clear_cpu(smp_processor_id(), &mask);
87 send_IPI_many(&mask, tag);
88}
89
90
91/*
92 * Provide smp_call_function_mask, but also run function locally
93 * if specified in the mask.
94 */
95void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *),
96 void *info, bool wait)
97{
98 int cpu = get_cpu();
99 smp_call_function_many(mask, func, info, wait);
100 if (cpumask_test_cpu(cpu, mask)) {
101 local_irq_disable();
102 func(info);
103 local_irq_enable();
104 }
105 put_cpu();
106}
107
108
109/*
110 * Functions related to starting/stopping cpus.
111 */
112
113/* Handler to start the current cpu. */
114static void smp_start_cpu_interrupt(void)
115{
116 get_irq_regs()->pc = start_cpu_function_addr;
117}
118
119/* Handler to stop the current cpu. */
120static void smp_stop_cpu_interrupt(void)
121{
122 set_cpu_online(smp_processor_id(), 0);
123 arch_local_irq_disable_all();
124 for (;;)
125 asm("nap");
126}
127
128/* This function calls the 'stop' function on all other CPUs in the system. */
129void smp_send_stop(void)
130{
131 stopping_cpus = 1;
132 send_IPI_allbutself(MSG_TAG_STOP_CPU);
133}
134
135
136/*
137 * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
138 */
139void evaluate_message(int tag)
140{
141 switch (tag) {
142 case MSG_TAG_START_CPU: /* Start up a cpu */
143 smp_start_cpu_interrupt();
144 break;
145
146 case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */
147 smp_stop_cpu_interrupt();
148 break;
149
150 case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */
151 generic_smp_call_function_interrupt();
152 break;
153
154 case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */
155 generic_smp_call_function_single_interrupt();
156 break;
157
158 default:
159 panic("Unknown IPI message tag %d", tag);
160 break;
161 }
162}
163
164
165/*
166 * flush_icache_range() code uses smp_call_function().
167 */
168
169struct ipi_flush {
170 unsigned long start;
171 unsigned long end;
172};
173
174static void ipi_flush_icache_range(void *info)
175{
176 struct ipi_flush *flush = (struct ipi_flush *) info;
177 __flush_icache_range(flush->start, flush->end);
178}
179
180void flush_icache_range(unsigned long start, unsigned long end)
181{
182 struct ipi_flush flush = { start, end };
183 preempt_disable();
184 on_each_cpu(ipi_flush_icache_range, &flush, 1);
185 preempt_enable();
186}
187
188
189/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
190static irqreturn_t handle_reschedule_ipi(int irq, void *token)
191{
192 __get_cpu_var(irq_stat).irq_resched_count++;
193 scheduler_ipi();
194
195 return IRQ_HANDLED;
196}
197
198static struct irqaction resched_action = {
199 .handler = handle_reschedule_ipi,
200 .name = "resched",
201 .dev_id = handle_reschedule_ipi /* unique token */,
202};
203
204void __init ipi_init(void)
205{
206#if CHIP_HAS_IPI()
207 int cpu;
208 /* Map IPI trigger MMIO addresses. */
209 for_each_possible_cpu(cpu) {
210 HV_Coord tile;
211 HV_PTE pte;
212 unsigned long offset;
213
214 tile.x = cpu_x(cpu);
215 tile.y = cpu_y(cpu);
216 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
217 panic("Failed to initialize IPI for cpu %d\n", cpu);
218
219 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
220 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
221 }
222#endif
223
224 /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */
225 tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU);
226 BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action));
227}
228
229#if CHIP_HAS_IPI()
230
231void smp_send_reschedule(int cpu)
232{
233 WARN_ON(cpu_is_offline(cpu));
234
235 /*
236 * We just want to do an MMIO store. The traditional writeq()
237 * functions aren't really correct here, since they're always
238 * directed at the PCI shim. For now, just do a raw store,
239 * casting away the __iomem attribute.
240 */
241 ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0;
242}
243
244#else
245
246void smp_send_reschedule(int cpu)
247{
248 HV_Coord coord;
249
250 WARN_ON(cpu_is_offline(cpu));
251
252 coord.y = cpu_y(cpu);
253 coord.x = cpu_x(cpu);
254 hv_trigger_ipi(coord, IRQ_RESCHEDULE);
255}
256
257#endif /* CHIP_HAS_IPI() */
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE SMP support routines.
15 */
16
17#include <linux/smp.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/irq_work.h>
22#include <linux/module.h>
23#include <asm/cacheflush.h>
24#include <asm/homecache.h>
25
26/*
27 * We write to width and height with a single store in head_NN.S,
28 * so make the variable aligned to "long".
29 */
30HV_Topology smp_topology __write_once __aligned(sizeof(long));
31EXPORT_SYMBOL(smp_topology);
32
33#if CHIP_HAS_IPI()
34static unsigned long __iomem *ipi_mappings[NR_CPUS];
35#endif
36
37/* Does messaging work correctly to the local cpu? */
38bool self_interrupt_ok;
39
40/*
41 * Top-level send_IPI*() functions to send messages to other cpus.
42 */
43
44/* Set by smp_send_stop() to avoid recursive panics. */
45static int stopping_cpus;
46
47static void __send_IPI_many(HV_Recipient *recip, int nrecip, int tag)
48{
49 int sent = 0;
50 while (sent < nrecip) {
51 int rc = hv_send_message(recip, nrecip,
52 (HV_VirtAddr)&tag, sizeof(tag));
53 if (rc < 0) {
54 if (!stopping_cpus) /* avoid recursive panic */
55 panic("hv_send_message returned %d", rc);
56 break;
57 }
58 WARN_ONCE(rc == 0, "hv_send_message() returned zero\n");
59 sent += rc;
60 }
61}
62
63void send_IPI_single(int cpu, int tag)
64{
65 HV_Recipient recip = {
66 .y = cpu / smp_width,
67 .x = cpu % smp_width,
68 .state = HV_TO_BE_SENT
69 };
70 __send_IPI_many(&recip, 1, tag);
71}
72
73void send_IPI_many(const struct cpumask *mask, int tag)
74{
75 HV_Recipient recip[NR_CPUS];
76 int cpu;
77 int nrecip = 0;
78 int my_cpu = smp_processor_id();
79 for_each_cpu(cpu, mask) {
80 HV_Recipient *r;
81 BUG_ON(cpu == my_cpu);
82 r = &recip[nrecip++];
83 r->y = cpu / smp_width;
84 r->x = cpu % smp_width;
85 r->state = HV_TO_BE_SENT;
86 }
87 __send_IPI_many(recip, nrecip, tag);
88}
89
90void send_IPI_allbutself(int tag)
91{
92 struct cpumask mask;
93 cpumask_copy(&mask, cpu_online_mask);
94 cpumask_clear_cpu(smp_processor_id(), &mask);
95 send_IPI_many(&mask, tag);
96}
97
98/*
99 * Functions related to starting/stopping cpus.
100 */
101
102/* Handler to start the current cpu. */
103static void smp_start_cpu_interrupt(void)
104{
105 get_irq_regs()->pc = start_cpu_function_addr;
106}
107
108/* Handler to stop the current cpu. */
109static void smp_stop_cpu_interrupt(void)
110{
111 arch_local_irq_disable_all();
112 set_cpu_online(smp_processor_id(), 0);
113 for (;;)
114 asm("nap; nop");
115}
116
117/* This function calls the 'stop' function on all other CPUs in the system. */
118void smp_send_stop(void)
119{
120 stopping_cpus = 1;
121 send_IPI_allbutself(MSG_TAG_STOP_CPU);
122}
123
124/* On panic, just wait; we may get an smp_send_stop() later on. */
125void panic_smp_self_stop(void)
126{
127 while (1)
128 asm("nap; nop");
129}
130
131/*
132 * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
133 */
134void evaluate_message(int tag)
135{
136 switch (tag) {
137 case MSG_TAG_START_CPU: /* Start up a cpu */
138 smp_start_cpu_interrupt();
139 break;
140
141 case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */
142 smp_stop_cpu_interrupt();
143 break;
144
145 case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */
146 generic_smp_call_function_interrupt();
147 break;
148
149 case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */
150 generic_smp_call_function_single_interrupt();
151 break;
152
153 case MSG_TAG_IRQ_WORK: /* Invoke IRQ work */
154 irq_work_run();
155 break;
156
157 default:
158 panic("Unknown IPI message tag %d", tag);
159 break;
160 }
161}
162
163
164/*
165 * flush_icache_range() code uses smp_call_function().
166 */
167
168struct ipi_flush {
169 unsigned long start;
170 unsigned long end;
171};
172
173static void ipi_flush_icache_range(void *info)
174{
175 struct ipi_flush *flush = (struct ipi_flush *) info;
176 __flush_icache_range(flush->start, flush->end);
177}
178
179void flush_icache_range(unsigned long start, unsigned long end)
180{
181 struct ipi_flush flush = { start, end };
182
183 /* If invoked with irqs disabled, we can not issue IPIs. */
184 if (irqs_disabled())
185 flush_remote(0, HV_FLUSH_EVICT_L1I, NULL, 0, 0, 0,
186 NULL, NULL, 0);
187 else {
188 preempt_disable();
189 on_each_cpu(ipi_flush_icache_range, &flush, 1);
190 preempt_enable();
191 }
192}
193EXPORT_SYMBOL(flush_icache_range);
194
195
196#ifdef CONFIG_IRQ_WORK
197void arch_irq_work_raise(void)
198{
199 if (arch_irq_work_has_interrupt())
200 send_IPI_single(smp_processor_id(), MSG_TAG_IRQ_WORK);
201}
202#endif
203
204
205/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
206static irqreturn_t handle_reschedule_ipi(int irq, void *token)
207{
208 __this_cpu_inc(irq_stat.irq_resched_count);
209 scheduler_ipi();
210
211 return IRQ_HANDLED;
212}
213
214static struct irqaction resched_action = {
215 .handler = handle_reschedule_ipi,
216 .name = "resched",
217 .dev_id = handle_reschedule_ipi /* unique token */,
218};
219
220void __init ipi_init(void)
221{
222 int cpu = smp_processor_id();
223 HV_Recipient recip = { .y = cpu_y(cpu), .x = cpu_x(cpu),
224 .state = HV_TO_BE_SENT };
225 int tag = MSG_TAG_CALL_FUNCTION_SINGLE;
226
227 /*
228 * Test if we can message ourselves for arch_irq_work_raise.
229 * This functionality is only available in the Tilera hypervisor
230 * in versions 4.3.4 and following.
231 */
232 if (hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag)) == 1)
233 self_interrupt_ok = true;
234 else
235 pr_warn("Older hypervisor: disabling fast irq_work_raise\n");
236
237#if CHIP_HAS_IPI()
238 /* Map IPI trigger MMIO addresses. */
239 for_each_possible_cpu(cpu) {
240 HV_Coord tile;
241 HV_PTE pte;
242 unsigned long offset;
243
244 tile.x = cpu_x(cpu);
245 tile.y = cpu_y(cpu);
246 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
247 panic("Failed to initialize IPI for cpu %d\n", cpu);
248
249 offset = PFN_PHYS(pte_pfn(pte));
250 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
251 }
252#endif
253
254 /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */
255 tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU);
256 BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action));
257}
258
259#if CHIP_HAS_IPI()
260
261void smp_send_reschedule(int cpu)
262{
263 WARN_ON(cpu_is_offline(cpu));
264
265 /*
266 * We just want to do an MMIO store. The traditional writeq()
267 * functions aren't really correct here, since they're always
268 * directed at the PCI shim. For now, just do a raw store,
269 * casting away the __iomem attribute.
270 */
271 ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0;
272}
273
274#else
275
276void smp_send_reschedule(int cpu)
277{
278 HV_Coord coord;
279
280 WARN_ON(cpu_is_offline(cpu));
281
282 coord.y = cpu_y(cpu);
283 coord.x = cpu_x(cpu);
284 hv_trigger_ipi(coord, IRQ_RESCHEDULE);
285}
286
287#endif /* CHIP_HAS_IPI() */