Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2014 IBM Corp.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public License
  6 * as published by the Free Software Foundation; either version
  7 * 2 of the License, or (at your option) any later version.
  8 */
  9
 10#include <linux/interrupt.h>
 11#include <linux/workqueue.h>
 12#include <linux/sched.h>
 13#include <linux/wait.h>
 14#include <linux/slab.h>
 15#include <linux/pid.h>
 16#include <asm/cputable.h>
 17#include <misc/cxl-base.h>
 18
 19#include "cxl.h"
 20#include "trace.h"
 21
 22static int afu_irq_range_start(void)
 23{
 24	if (cpu_has_feature(CPU_FTR_HVMODE))
 25		return 1;
 26	return 0;
 27}
 28
 29static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
 30{
 31	ctx->dsisr = dsisr;
 32	ctx->dar = dar;
 33	schedule_work(&ctx->fault_work);
 34	return IRQ_HANDLED;
 35}
 36
 37irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
 38{
 39	u64 dsisr, dar;
 40
 41	dsisr = irq_info->dsisr;
 42	dar = irq_info->dar;
 43
 44	trace_cxl_psl_irq(ctx, irq, dsisr, dar);
 45
 46	pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
 47
 48	if (dsisr & CXL_PSL_DSISR_An_DS) {
 49		/*
 50		 * We don't inherently need to sleep to handle this, but we do
 51		 * need to get a ref to the task's mm, which we can't do from
 52		 * irq context without the potential for a deadlock since it
 53		 * takes the task_lock. An alternate option would be to keep a
 54		 * reference to the task's mm the entire time it has cxl open,
 55		 * but to do that we need to solve the issue where we hold a
 56		 * ref to the mm, but the mm can hold a ref to the fd after an
 57		 * mmap preventing anything from being cleaned up.
 58		 */
 59		pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
 60		return schedule_cxl_fault(ctx, dsisr, dar);
 61	}
 62
 63	if (dsisr & CXL_PSL_DSISR_An_M)
 64		pr_devel("CXL interrupt: PTE not found\n");
 65	if (dsisr & CXL_PSL_DSISR_An_P)
 66		pr_devel("CXL interrupt: Storage protection violation\n");
 67	if (dsisr & CXL_PSL_DSISR_An_A)
 68		pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
 69	if (dsisr & CXL_PSL_DSISR_An_S)
 70		pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
 71	if (dsisr & CXL_PSL_DSISR_An_K)
 72		pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
 73
 74	if (dsisr & CXL_PSL_DSISR_An_DM) {
 75		/*
 76		 * In some cases we might be able to handle the fault
 77		 * immediately if hash_page would succeed, but we still need
 78		 * the task's mm, which as above we can't get without a lock
 79		 */
 80		pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
 81		return schedule_cxl_fault(ctx, dsisr, dar);
 82	}
 83	if (dsisr & CXL_PSL_DSISR_An_ST)
 84		WARN(1, "CXL interrupt: Segment Table PTE not found\n");
 85	if (dsisr & CXL_PSL_DSISR_An_UR)
 86		pr_devel("CXL interrupt: AURP PTE not found\n");
 87	if (dsisr & CXL_PSL_DSISR_An_PE)
 88		return cxl_ops->handle_psl_slice_error(ctx, dsisr,
 89						irq_info->errstat);
 90	if (dsisr & CXL_PSL_DSISR_An_AE) {
 91		pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
 92
 93		if (ctx->pending_afu_err) {
 94			/*
 95			 * This shouldn't happen - the PSL treats these errors
 96			 * as fatal and will have reset the AFU, so there's not
 97			 * much point buffering multiple AFU errors.
 98			 * OTOH if we DO ever see a storm of these come in it's
 99			 * probably best that we log them somewhere:
100			 */
101			dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
102					    "undelivered to pe %i: 0x%016llx\n",
103					    ctx->pe, irq_info->afu_err);
104		} else {
105			spin_lock(&ctx->lock);
106			ctx->afu_err = irq_info->afu_err;
107			ctx->pending_afu_err = true;
108			spin_unlock(&ctx->lock);
109
110			wake_up_all(&ctx->wq);
111		}
112
113		cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
114		return IRQ_HANDLED;
115	}
116	if (dsisr & CXL_PSL_DSISR_An_OC)
117		pr_devel("CXL interrupt: OS Context Warning\n");
118
119	WARN(1, "Unhandled CXL PSL IRQ\n");
120	return IRQ_HANDLED;
121}
122
123static irqreturn_t cxl_irq_afu(int irq, void *data)
124{
125	struct cxl_context *ctx = data;
126	irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
127	int irq_off, afu_irq = 0;
128	__u16 range;
129	int r;
130
131	/*
132	 * Look for the interrupt number.
133	 * On bare-metal, we know range 0 only contains the PSL
134	 * interrupt so we could start counting at range 1 and initialize
135	 * afu_irq at 1.
136	 * In a guest, range 0 also contains AFU interrupts, so it must
137	 * be counted for. Therefore we initialize afu_irq at 0 to take into
138	 * account the PSL interrupt.
139	 *
140	 * For code-readability, it just seems easier to go over all
141	 * the ranges on bare-metal and guest. The end result is the same.
142	 */
143	for (r = 0; r < CXL_IRQ_RANGES; r++) {
144		irq_off = hwirq - ctx->irqs.offset[r];
145		range = ctx->irqs.range[r];
146		if (irq_off >= 0 && irq_off < range) {
147			afu_irq += irq_off;
148			break;
149		}
150		afu_irq += range;
151	}
152	if (unlikely(r >= CXL_IRQ_RANGES)) {
153		WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
154		     ctx->pe, irq, hwirq);
155		return IRQ_HANDLED;
156	}
157
158	trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq);
159	pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
160	       afu_irq, ctx->pe, irq, hwirq);
161
162	if (unlikely(!ctx->irq_bitmap)) {
163		WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n");
164		return IRQ_HANDLED;
165	}
166	spin_lock(&ctx->lock);
167	set_bit(afu_irq - 1, ctx->irq_bitmap);
168	ctx->pending_irq = true;
169	spin_unlock(&ctx->lock);
170
171	wake_up_all(&ctx->wq);
172
173	return IRQ_HANDLED;
174}
175
176unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
177			 irq_handler_t handler, void *cookie, const char *name)
178{
179	unsigned int virq;
180	int result;
181
182	/* IRQ Domain? */
183	virq = irq_create_mapping(NULL, hwirq);
184	if (!virq) {
185		dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
186		return 0;
187	}
188
189	if (cxl_ops->setup_irq)
190		cxl_ops->setup_irq(adapter, hwirq, virq);
191
192	pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
193
194	result = request_irq(virq, handler, 0, name, cookie);
195	if (result) {
196		dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
197		return 0;
198	}
199
200	return virq;
201}
202
203void cxl_unmap_irq(unsigned int virq, void *cookie)
204{
205	free_irq(virq, cookie);
206}
207
208int cxl_register_one_irq(struct cxl *adapter,
209			irq_handler_t handler,
210			void *cookie,
211			irq_hw_number_t *dest_hwirq,
212			unsigned int *dest_virq,
213			const char *name)
214{
215	int hwirq, virq;
216
217	if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0)
218		return hwirq;
219
220	if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
221		goto err;
222
223	*dest_hwirq = hwirq;
224	*dest_virq = virq;
225
226	return 0;
227
228err:
229	cxl_ops->release_one_irq(adapter, hwirq);
230	return -ENOMEM;
231}
232
233void afu_irq_name_free(struct cxl_context *ctx)
234{
235	struct cxl_irq_name *irq_name, *tmp;
236
237	list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) {
238		kfree(irq_name->name);
239		list_del(&irq_name->list);
240		kfree(irq_name);
241	}
242}
243
244int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
245{
246	int rc, r, i, j = 1;
247	struct cxl_irq_name *irq_name;
248	int alloc_count;
249
250	/*
251	 * In native mode, range 0 is reserved for the multiplexed
252	 * PSL interrupt. It has been allocated when the AFU was initialized.
253	 *
254	 * In a guest, the PSL interrupt is not mutliplexed, but per-context,
255	 * and is the first interrupt from range 0. It still needs to be
256	 * allocated, so bump the count by one.
257	 */
258	if (cpu_has_feature(CPU_FTR_HVMODE))
259		alloc_count = count;
260	else
261		alloc_count = count + 1;
262
263	if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter,
264							alloc_count)))
265		return rc;
266
267	if (cpu_has_feature(CPU_FTR_HVMODE)) {
268		/* Multiplexed PSL Interrupt */
269		ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
270		ctx->irqs.range[0] = 1;
271	}
272
273	ctx->irq_count = count;
274	ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
275				  sizeof(*ctx->irq_bitmap), GFP_KERNEL);
276	if (!ctx->irq_bitmap)
277		goto out;
278
279	/*
280	 * Allocate names first.  If any fail, bail out before allocating
281	 * actual hardware IRQs.
282	 */
283	for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
284		for (i = 0; i < ctx->irqs.range[r]; i++) {
285			irq_name = kmalloc(sizeof(struct cxl_irq_name),
286					   GFP_KERNEL);
287			if (!irq_name)
288				goto out;
289			irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i",
290						   dev_name(&ctx->afu->dev),
291						   ctx->pe, j);
292			if (!irq_name->name) {
293				kfree(irq_name);
294				goto out;
295			}
296			/* Add to tail so next look get the correct order */
297			list_add_tail(&irq_name->list, &ctx->irq_names);
298			j++;
299		}
300	}
301	return 0;
302
303out:
304	cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
305	afu_irq_name_free(ctx);
306	return -ENOMEM;
307}
308
309static void afu_register_hwirqs(struct cxl_context *ctx)
310{
311	irq_hw_number_t hwirq;
312	struct cxl_irq_name *irq_name;
313	int r, i;
314	irqreturn_t (*handler)(int irq, void *data);
315
316	/* We've allocated all memory now, so let's do the irq allocations */
317	irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
318	for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
319		hwirq = ctx->irqs.offset[r];
320		for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
321			if (r == 0 && i == 0)
322				/*
323				 * The very first interrupt of range 0 is
324				 * always the PSL interrupt, but we only
325				 * need to connect a handler for guests,
326				 * because there's one PSL interrupt per
327				 * context.
328				 * On bare-metal, the PSL interrupt is
329				 * multiplexed and was setup when the AFU
330				 * was configured.
331				 */
332				handler = cxl_ops->psl_interrupt;
333			else
334				handler = cxl_irq_afu;
335			cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx,
336				irq_name->name);
337			irq_name = list_next_entry(irq_name, list);
338		}
339	}
340}
341
342int afu_register_irqs(struct cxl_context *ctx, u32 count)
343{
344	int rc;
345
346	rc = afu_allocate_irqs(ctx, count);
347	if (rc)
348		return rc;
349
350	afu_register_hwirqs(ctx);
351	return 0;
352}
353
354void afu_release_irqs(struct cxl_context *ctx, void *cookie)
355{
356	irq_hw_number_t hwirq;
357	unsigned int virq;
358	int r, i;
359
360	for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
361		hwirq = ctx->irqs.offset[r];
362		for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
363			virq = irq_find_mapping(NULL, hwirq);
364			if (virq)
365				cxl_unmap_irq(virq, cookie);
366		}
367	}
368
369	afu_irq_name_free(ctx);
370	cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
371
372	ctx->irq_count = 0;
373}
374
375void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr)
376{
377	dev_crit(&afu->dev,
378		 "PSL Slice error received. Check AFU for root cause.\n");
379	dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
380	if (serr & CXL_PSL_SERR_An_afuto)
381		dev_crit(&afu->dev, "AFU MMIO Timeout\n");
382	if (serr & CXL_PSL_SERR_An_afudis)
383		dev_crit(&afu->dev,
384			 "MMIO targeted Accelerator that was not enabled\n");
385	if (serr & CXL_PSL_SERR_An_afuov)
386		dev_crit(&afu->dev, "AFU CTAG Overflow\n");
387	if (serr & CXL_PSL_SERR_An_badsrc)
388		dev_crit(&afu->dev, "Bad Interrupt Source\n");
389	if (serr & CXL_PSL_SERR_An_badctx)
390		dev_crit(&afu->dev, "Bad Context Handle\n");
391	if (serr & CXL_PSL_SERR_An_llcmdis)
392		dev_crit(&afu->dev, "LLCMD to Disabled AFU\n");
393	if (serr & CXL_PSL_SERR_An_llcmdto)
394		dev_crit(&afu->dev, "LLCMD Timeout to AFU\n");
395	if (serr & CXL_PSL_SERR_An_afupar)
396		dev_crit(&afu->dev, "AFU MMIO Parity Error\n");
397	if (serr & CXL_PSL_SERR_An_afudup)
398		dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n");
399	if (serr & CXL_PSL_SERR_An_AE)
400		dev_crit(&afu->dev,
401			 "AFU asserted JDONE with JERROR in AFU Directed Mode\n");
402}