Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2022 Intel Corporation
  4 */
  5
  6#include "xe_reg_sr.h"
  7
  8#include <kunit/visibility.h>
  9#include <linux/align.h>
 10#include <linux/string_helpers.h>
 11#include <linux/xarray.h>
 12
 13#include <drm/drm_managed.h>
 14#include <drm/drm_print.h>
 15
 16#include "regs/xe_engine_regs.h"
 17#include "regs/xe_gt_regs.h"
 18#include "xe_device.h"
 19#include "xe_device_types.h"
 20#include "xe_force_wake.h"
 21#include "xe_gt.h"
 22#include "xe_gt_mcr.h"
 23#include "xe_gt_printk.h"
 24#include "xe_hw_engine_types.h"
 25#include "xe_macros.h"
 26#include "xe_mmio.h"
 27#include "xe_reg_whitelist.h"
 28#include "xe_rtp_types.h"
 29
 30static void reg_sr_fini(struct drm_device *drm, void *arg)
 31{
 32	struct xe_reg_sr *sr = arg;
 33	struct xe_reg_sr_entry *entry;
 34	unsigned long reg;
 35
 36	xa_for_each(&sr->xa, reg, entry)
 37		kfree(entry);
 38
 39	xa_destroy(&sr->xa);
 40}
 41
 42int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe)
 43{
 44	xa_init(&sr->xa);
 45	sr->name = name;
 46
 47	return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr);
 48}
 49EXPORT_SYMBOL_IF_KUNIT(xe_reg_sr_init);
 50
 51static bool compatible_entries(const struct xe_reg_sr_entry *e1,
 52			       const struct xe_reg_sr_entry *e2)
 53{
 54	/*
 55	 * Don't allow overwriting values: clr_bits/set_bits should be disjoint
 56	 * when operating in the same register
 57	 */
 58	if (e1->clr_bits & e2->clr_bits || e1->set_bits & e2->set_bits ||
 59	    e1->clr_bits & e2->set_bits || e1->set_bits & e2->clr_bits)
 60		return false;
 61
 62	if (e1->reg.raw != e2->reg.raw)
 63		return false;
 64
 65	return true;
 66}
 67
 68static void reg_sr_inc_error(struct xe_reg_sr *sr)
 69{
 70#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
 71	sr->errors++;
 72#endif
 73}
 74
 75int xe_reg_sr_add(struct xe_reg_sr *sr,
 76		  const struct xe_reg_sr_entry *e,
 77		  struct xe_gt *gt)
 78{
 79	unsigned long idx = e->reg.addr;
 80	struct xe_reg_sr_entry *pentry = xa_load(&sr->xa, idx);
 81	int ret;
 82
 83	if (pentry) {
 84		if (!compatible_entries(pentry, e)) {
 85			ret = -EINVAL;
 86			goto fail;
 87		}
 88
 89		pentry->clr_bits |= e->clr_bits;
 90		pentry->set_bits |= e->set_bits;
 91		pentry->read_mask |= e->read_mask;
 92
 93		return 0;
 94	}
 95
 96	pentry = kmalloc(sizeof(*pentry), GFP_KERNEL);
 97	if (!pentry) {
 98		ret = -ENOMEM;
 99		goto fail;
100	}
101
102	*pentry = *e;
103	ret = xa_err(xa_store(&sr->xa, idx, pentry, GFP_KERNEL));
104	if (ret)
105		goto fail;
106
107	return 0;
108
109fail:
110	xe_gt_err(gt,
111		  "discarding save-restore reg %04lx (clear: %08x, set: %08x, masked: %s, mcr: %s): ret=%d\n",
112		  idx, e->clr_bits, e->set_bits,
113		  str_yes_no(e->reg.masked),
114		  str_yes_no(e->reg.mcr),
115		  ret);
116	reg_sr_inc_error(sr);
117
118	return ret;
119}
120
121/*
122 * Convert back from encoded value to type-safe, only to be used when reg.mcr
123 * is true
124 */
125static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
126{
127	return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
128}
129
130static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry)
131{
132	struct xe_reg reg = entry->reg;
133	struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
134	u32 val;
135
136	/*
137	 * If this is a masked register, need to set the upper 16 bits.
138	 * Set them to clr_bits since that is always a superset of the bits
139	 * being modified.
140	 *
141	 * When it's not masked, we have to read it from hardware, unless we are
142	 * supposed to set all bits.
143	 */
144	if (reg.masked)
145		val = entry->clr_bits << 16;
146	else if (entry->clr_bits + 1)
147		val = (reg.mcr ?
148		       xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
149		       xe_mmio_read32(&gt->mmio, reg)) & (~entry->clr_bits);
150	else
151		val = 0;
152
153	/*
154	 * TODO: add selftest to validate all tables, regardless of platform:
155	 *   - Masked registers can't have set_bits with upper bits set
156	 *   - set_bits must be contained in clr_bits
157	 */
158	val |= entry->set_bits;
159
160	xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
161
162	if (entry->reg.mcr)
163		xe_gt_mcr_multicast_write(gt, reg_mcr, val);
164	else
165		xe_mmio_write32(&gt->mmio, reg, val);
166}
167
168void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt)
169{
170	struct xe_reg_sr_entry *entry;
171	unsigned long reg;
172	unsigned int fw_ref;
173
174	if (xa_empty(&sr->xa))
175		return;
176
177	xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name);
178
179	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
180	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
181		goto err_force_wake;
182
183	xa_for_each(&sr->xa, reg, entry)
184		apply_one_mmio(gt, entry);
185
186	xe_force_wake_put(gt_to_fw(gt), fw_ref);
187
188	return;
189
190err_force_wake:
191	xe_force_wake_put(gt_to_fw(gt), fw_ref);
192	xe_gt_err(gt, "Failed to apply, err=-ETIMEDOUT\n");
193}
194
195void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe)
196{
197	struct xe_reg_sr *sr = &hwe->reg_whitelist;
198	struct xe_gt *gt = hwe->gt;
199	struct xe_device *xe = gt_to_xe(gt);
200	struct xe_reg_sr_entry *entry;
201	struct drm_printer p;
202	u32 mmio_base = hwe->mmio_base;
203	unsigned long reg;
204	unsigned int slot = 0;
205	unsigned int fw_ref;
206
207	if (xa_empty(&sr->xa))
208		return;
209
210	drm_dbg(&xe->drm, "Whitelisting %s registers\n", sr->name);
211
212	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
213	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
214		goto err_force_wake;
215
216	p = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL);
217	xa_for_each(&sr->xa, reg, entry) {
218		if (slot == RING_MAX_NONPRIV_SLOTS) {
219			xe_gt_err(gt,
220				  "hwe %s: maximum register whitelist slots (%d) reached, refusing to add more\n",
221				  hwe->name, RING_MAX_NONPRIV_SLOTS);
222			break;
223		}
224
225		xe_reg_whitelist_print_entry(&p, 0, reg, entry);
226		xe_mmio_write32(&gt->mmio, RING_FORCE_TO_NONPRIV(mmio_base, slot),
227				reg | entry->set_bits);
228		slot++;
229	}
230
231	/* And clear the rest just in case of garbage */
232	for (; slot < RING_MAX_NONPRIV_SLOTS; slot++) {
233		u32 addr = RING_NOPID(mmio_base).addr;
234
235		xe_mmio_write32(&gt->mmio, RING_FORCE_TO_NONPRIV(mmio_base, slot), addr);
236	}
237
238	xe_force_wake_put(gt_to_fw(gt), fw_ref);
239
240	return;
241
242err_force_wake:
243	xe_force_wake_put(gt_to_fw(gt), fw_ref);
244	drm_err(&xe->drm, "Failed to apply, err=-ETIMEDOUT\n");
245}
246
247/**
248 * xe_reg_sr_dump - print all save/restore entries
249 * @sr: Save/restore entries
250 * @p: DRM printer
251 */
252void xe_reg_sr_dump(struct xe_reg_sr *sr, struct drm_printer *p)
253{
254	struct xe_reg_sr_entry *entry;
255	unsigned long reg;
256
257	if (!sr->name || xa_empty(&sr->xa))
258		return;
259
260	drm_printf(p, "%s\n", sr->name);
261	xa_for_each(&sr->xa, reg, entry)
262		drm_printf(p, "\tREG[0x%lx] clr=0x%08x set=0x%08x masked=%s mcr=%s\n",
263			   reg, entry->clr_bits, entry->set_bits,
264			   str_yes_no(entry->reg.masked),
265			   str_yes_no(entry->reg.mcr));
266}