Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only OR MIT
  2/* Copyright (c) 2023 Imagination Technologies Ltd. */
  3
  4#include "pvr_cccb.h"
  5#include "pvr_context.h"
  6#include "pvr_device.h"
  7#include "pvr_drv.h"
  8#include "pvr_gem.h"
  9#include "pvr_job.h"
 10#include "pvr_power.h"
 11#include "pvr_rogue_fwif.h"
 12#include "pvr_rogue_fwif_common.h"
 13#include "pvr_rogue_fwif_resetframework.h"
 14#include "pvr_stream.h"
 15#include "pvr_stream_defs.h"
 16#include "pvr_vm.h"
 17
 18#include <drm/drm_auth.h>
 19#include <drm/drm_managed.h>
 20
 21#include <linux/bug.h>
 22#include <linux/errno.h>
 23#include <linux/kernel.h>
 24#include <linux/list.h>
 25#include <linux/sched.h>
 26#include <linux/slab.h>
 27#include <linux/spinlock.h>
 28#include <linux/string.h>
 29#include <linux/types.h>
 30#include <linux/xarray.h>
 31
 32static int
 33remap_priority(struct pvr_file *pvr_file, s32 uapi_priority,
 34	       enum pvr_context_priority *priority_out)
 35{
 36	switch (uapi_priority) {
 37	case DRM_PVR_CTX_PRIORITY_LOW:
 38		*priority_out = PVR_CTX_PRIORITY_LOW;
 39		break;
 40	case DRM_PVR_CTX_PRIORITY_NORMAL:
 41		*priority_out = PVR_CTX_PRIORITY_MEDIUM;
 42		break;
 43	case DRM_PVR_CTX_PRIORITY_HIGH:
 44		if (!capable(CAP_SYS_NICE) && !drm_is_current_master(from_pvr_file(pvr_file)))
 45			return -EACCES;
 46		*priority_out = PVR_CTX_PRIORITY_HIGH;
 47		break;
 48	default:
 49		return -EINVAL;
 50	}
 51
 52	return 0;
 53}
 54
 55static int get_fw_obj_size(enum drm_pvr_ctx_type type)
 56{
 57	switch (type) {
 58	case DRM_PVR_CTX_TYPE_RENDER:
 59		return sizeof(struct rogue_fwif_fwrendercontext);
 60	case DRM_PVR_CTX_TYPE_COMPUTE:
 61		return sizeof(struct rogue_fwif_fwcomputecontext);
 62	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
 63		return sizeof(struct rogue_fwif_fwtransfercontext);
 64	}
 65
 66	return -EINVAL;
 67}
 68
 69static int
 70process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
 71			     u64 stream_user_ptr, u32 stream_size, void *dest)
 72{
 73	void *stream;
 74	int err;
 75
 76	stream = memdup_user(u64_to_user_ptr(stream_user_ptr), stream_size);
 77	if (IS_ERR(stream))
 78		return PTR_ERR(stream);
 
 
 
 
 
 79
 80	err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest);
 
 
 81
 82	kfree(stream);
 83
 
 
 
 
 
 84	return err;
 85}
 86
 87static int init_render_fw_objs(struct pvr_context *ctx,
 88			       struct drm_pvr_ioctl_create_context_args *args,
 89			       void *fw_ctx_map)
 90{
 91	struct rogue_fwif_static_rendercontext_state *static_rendercontext_state;
 92	struct rogue_fwif_fwrendercontext *fw_render_context = fw_ctx_map;
 93
 94	if (!args->static_context_state_len)
 95		return -EINVAL;
 96
 97	static_rendercontext_state = &fw_render_context->static_render_context_state;
 98
 99	/* Copy static render context state from userspace. */
100	return process_static_context_state(ctx->pvr_dev,
101					    &pvr_static_render_context_state_stream,
102					    args->static_context_state,
103					    args->static_context_state_len,
104					    &static_rendercontext_state->ctxswitch_regs[0]);
105}
106
107static int init_compute_fw_objs(struct pvr_context *ctx,
108				struct drm_pvr_ioctl_create_context_args *args,
109				void *fw_ctx_map)
110{
111	struct rogue_fwif_fwcomputecontext *fw_compute_context = fw_ctx_map;
112	struct rogue_fwif_cdm_registers_cswitch *ctxswitch_regs;
113
114	if (!args->static_context_state_len)
115		return -EINVAL;
116
117	ctxswitch_regs = &fw_compute_context->static_compute_context_state.ctxswitch_regs;
118
119	/* Copy static render context state from userspace. */
120	return process_static_context_state(ctx->pvr_dev,
121					    &pvr_static_compute_context_state_stream,
122					    args->static_context_state,
123					    args->static_context_state_len,
124					    ctxswitch_regs);
125}
126
127static int init_transfer_fw_objs(struct pvr_context *ctx,
128				 struct drm_pvr_ioctl_create_context_args *args,
129				 void *fw_ctx_map)
130{
131	if (args->static_context_state_len)
132		return -EINVAL;
133
134	return 0;
135}
136
137static int init_fw_objs(struct pvr_context *ctx,
138			struct drm_pvr_ioctl_create_context_args *args,
139			void *fw_ctx_map)
140{
141	switch (ctx->type) {
142	case DRM_PVR_CTX_TYPE_RENDER:
143		return init_render_fw_objs(ctx, args, fw_ctx_map);
144	case DRM_PVR_CTX_TYPE_COMPUTE:
145		return init_compute_fw_objs(ctx, args, fw_ctx_map);
146	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
147		return init_transfer_fw_objs(ctx, args, fw_ctx_map);
148	}
149
150	return -EINVAL;
151}
152
153static void
154ctx_fw_data_init(void *cpu_ptr, void *priv)
155{
156	struct pvr_context *ctx = priv;
157
158	memcpy(cpu_ptr, ctx->data, ctx->data_size);
159}
160
161/**
162 * pvr_context_destroy_queues() - Destroy all queues attached to a context.
163 * @ctx: Context to destroy queues on.
164 *
165 * Should be called when the last reference to a context object is dropped.
166 * It releases all resources attached to the queues bound to this context.
167 */
168static void pvr_context_destroy_queues(struct pvr_context *ctx)
169{
170	switch (ctx->type) {
171	case DRM_PVR_CTX_TYPE_RENDER:
172		pvr_queue_destroy(ctx->queues.fragment);
173		pvr_queue_destroy(ctx->queues.geometry);
174		break;
175	case DRM_PVR_CTX_TYPE_COMPUTE:
176		pvr_queue_destroy(ctx->queues.compute);
177		break;
178	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
179		pvr_queue_destroy(ctx->queues.transfer);
180		break;
181	}
182}
183
184/**
185 * pvr_context_create_queues() - Create all queues attached to a context.
186 * @ctx: Context to create queues on.
187 * @args: Context creation arguments passed by userspace.
188 * @fw_ctx_map: CPU mapping of the FW context object.
189 *
190 * Return:
191 *  * 0 on success, or
192 *  * A negative error code otherwise.
193 */
194static int pvr_context_create_queues(struct pvr_context *ctx,
195				     struct drm_pvr_ioctl_create_context_args *args,
196				     void *fw_ctx_map)
197{
198	int err;
199
200	switch (ctx->type) {
201	case DRM_PVR_CTX_TYPE_RENDER:
202		ctx->queues.geometry = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_GEOMETRY,
203							args, fw_ctx_map);
204		if (IS_ERR(ctx->queues.geometry)) {
205			err = PTR_ERR(ctx->queues.geometry);
206			ctx->queues.geometry = NULL;
207			goto err_destroy_queues;
208		}
209
210		ctx->queues.fragment = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_FRAGMENT,
211							args, fw_ctx_map);
212		if (IS_ERR(ctx->queues.fragment)) {
213			err = PTR_ERR(ctx->queues.fragment);
214			ctx->queues.fragment = NULL;
215			goto err_destroy_queues;
216		}
217		return 0;
218
219	case DRM_PVR_CTX_TYPE_COMPUTE:
220		ctx->queues.compute = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_COMPUTE,
221						       args, fw_ctx_map);
222		if (IS_ERR(ctx->queues.compute)) {
223			err = PTR_ERR(ctx->queues.compute);
224			ctx->queues.compute = NULL;
225			goto err_destroy_queues;
226		}
227		return 0;
228
229	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
230		ctx->queues.transfer = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
231							args, fw_ctx_map);
232		if (IS_ERR(ctx->queues.transfer)) {
233			err = PTR_ERR(ctx->queues.transfer);
234			ctx->queues.transfer = NULL;
235			goto err_destroy_queues;
236		}
237		return 0;
238	}
239
240	return -EINVAL;
241
242err_destroy_queues:
243	pvr_context_destroy_queues(ctx);
244	return err;
245}
246
247/**
248 * pvr_context_kill_queues() - Kill queues attached to context.
249 * @ctx: Context to kill queues on.
250 *
251 * Killing the queues implies making them unusable for future jobs, while still
252 * letting the currently submitted jobs a chance to finish. Queue resources will
253 * stay around until pvr_context_destroy_queues() is called.
254 */
255static void pvr_context_kill_queues(struct pvr_context *ctx)
256{
257	switch (ctx->type) {
258	case DRM_PVR_CTX_TYPE_RENDER:
259		pvr_queue_kill(ctx->queues.fragment);
260		pvr_queue_kill(ctx->queues.geometry);
261		break;
262	case DRM_PVR_CTX_TYPE_COMPUTE:
263		pvr_queue_kill(ctx->queues.compute);
264		break;
265	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
266		pvr_queue_kill(ctx->queues.transfer);
267		break;
268	}
269}
270
271/**
272 * pvr_context_create() - Create a context.
273 * @pvr_file: File to attach the created context to.
274 * @args: Context creation arguments.
275 *
276 * Return:
277 *  * 0 on success, or
278 *  * A negative error code on failure.
279 */
280int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args)
281{
282	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
283	struct pvr_context *ctx;
284	int ctx_size;
285	int err;
286
287	/* Context creation flags are currently unused and must be zero. */
288	if (args->flags)
289		return -EINVAL;
290
291	ctx_size = get_fw_obj_size(args->type);
292	if (ctx_size < 0)
293		return ctx_size;
294
295	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
296	if (!ctx)
297		return -ENOMEM;
298
299	ctx->data_size = ctx_size;
300	ctx->type = args->type;
301	ctx->flags = args->flags;
302	ctx->pvr_dev = pvr_dev;
303	kref_init(&ctx->ref_count);
304
305	err = remap_priority(pvr_file, args->priority, &ctx->priority);
306	if (err)
307		goto err_free_ctx;
308
309	ctx->vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
310	if (IS_ERR(ctx->vm_ctx)) {
311		err = PTR_ERR(ctx->vm_ctx);
312		goto err_free_ctx;
313	}
314
315	ctx->data = kzalloc(ctx_size, GFP_KERNEL);
316	if (!ctx->data) {
317		err = -ENOMEM;
318		goto err_put_vm;
319	}
320
321	err = pvr_context_create_queues(ctx, args, ctx->data);
322	if (err)
323		goto err_free_ctx_data;
324
325	err = init_fw_objs(ctx, args, ctx->data);
326	if (err)
327		goto err_destroy_queues;
328
329	err = pvr_fw_object_create(pvr_dev, ctx_size, PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
330				   ctx_fw_data_init, ctx, &ctx->fw_obj);
331	if (err)
332		goto err_free_ctx_data;
333
334	err = xa_alloc(&pvr_dev->ctx_ids, &ctx->ctx_id, ctx, xa_limit_32b, GFP_KERNEL);
335	if (err)
336		goto err_destroy_fw_obj;
337
338	err = xa_alloc(&pvr_file->ctx_handles, &args->handle, ctx, xa_limit_32b, GFP_KERNEL);
339	if (err) {
340		/*
341		 * It's possible that another thread could have taken a reference on the context at
342		 * this point as it is in the ctx_ids xarray. Therefore instead of directly
343		 * destroying the context, drop a reference instead.
344		 */
345		pvr_context_put(ctx);
346		return err;
347	}
348
349	spin_lock(&pvr_dev->ctx_list_lock);
350	list_add_tail(&ctx->file_link, &pvr_file->contexts);
351	spin_unlock(&pvr_dev->ctx_list_lock);
352
353	return 0;
354
355err_destroy_fw_obj:
356	pvr_fw_object_destroy(ctx->fw_obj);
357
358err_destroy_queues:
359	pvr_context_destroy_queues(ctx);
360
361err_free_ctx_data:
362	kfree(ctx->data);
363
364err_put_vm:
365	pvr_vm_context_put(ctx->vm_ctx);
366
367err_free_ctx:
368	kfree(ctx);
369	return err;
370}
371
372static void
373pvr_context_release(struct kref *ref_count)
374{
375	struct pvr_context *ctx =
376		container_of(ref_count, struct pvr_context, ref_count);
377	struct pvr_device *pvr_dev = ctx->pvr_dev;
378
379	WARN_ON(in_interrupt());
380	spin_lock(&pvr_dev->ctx_list_lock);
381	list_del(&ctx->file_link);
382	spin_unlock(&pvr_dev->ctx_list_lock);
383
384	xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id);
385	pvr_context_destroy_queues(ctx);
386	pvr_fw_object_destroy(ctx->fw_obj);
387	kfree(ctx->data);
388	pvr_vm_context_put(ctx->vm_ctx);
389	kfree(ctx);
390}
391
392/**
393 * pvr_context_put() - Release reference on context
394 * @ctx: Target context.
395 */
396void
397pvr_context_put(struct pvr_context *ctx)
398{
399	if (ctx)
400		kref_put(&ctx->ref_count, pvr_context_release);
401}
402
403/**
404 * pvr_context_destroy() - Destroy context
405 * @pvr_file: Pointer to pvr_file structure.
406 * @handle: Userspace context handle.
407 *
408 * Removes context from context list and drops initial reference. Context will
409 * then be destroyed once all outstanding references are dropped.
410 *
411 * Return:
412 *  * 0 on success, or
413 *  * -%EINVAL if context not in context list.
414 */
415int
416pvr_context_destroy(struct pvr_file *pvr_file, u32 handle)
417{
418	struct pvr_context *ctx = xa_erase(&pvr_file->ctx_handles, handle);
419
420	if (!ctx)
421		return -EINVAL;
422
423	/* Make sure nothing can be queued to the queues after that point. */
424	pvr_context_kill_queues(ctx);
425
426	/* Release the reference held by the handle set. */
427	pvr_context_put(ctx);
428
429	return 0;
430}
431
432/**
433 * pvr_destroy_contexts_for_file: Destroy any contexts associated with the given file
434 * @pvr_file: Pointer to pvr_file structure.
435 *
436 * Removes all contexts associated with @pvr_file from the device context list and drops initial
437 * references. Contexts will then be destroyed once all outstanding references are dropped.
438 */
439void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
440{
441	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
442	struct pvr_context *ctx;
443	unsigned long handle;
444
445	xa_for_each(&pvr_file->ctx_handles, handle, ctx)
446		pvr_context_destroy(pvr_file, handle);
447
448	spin_lock(&pvr_dev->ctx_list_lock);
449	ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link);
450
451	while (!list_entry_is_head(ctx, &pvr_file->contexts, file_link)) {
452		list_del_init(&ctx->file_link);
453
454		if (pvr_context_get_if_referenced(ctx)) {
455			spin_unlock(&pvr_dev->ctx_list_lock);
456
457			pvr_vm_unmap_all(ctx->vm_ctx);
458
459			pvr_context_put(ctx);
460			spin_lock(&pvr_dev->ctx_list_lock);
461		}
462		ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link);
463	}
464	spin_unlock(&pvr_dev->ctx_list_lock);
465}
466
467/**
468 * pvr_context_device_init() - Device level initialization for queue related resources.
469 * @pvr_dev: The device to initialize.
470 */
471void pvr_context_device_init(struct pvr_device *pvr_dev)
472{
473	xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1);
474	spin_lock_init(&pvr_dev->ctx_list_lock);
475}
476
477/**
478 * pvr_context_device_fini() - Device level cleanup for queue related resources.
479 * @pvr_dev: The device to cleanup.
480 */
481void pvr_context_device_fini(struct pvr_device *pvr_dev)
482{
483	WARN_ON(!xa_empty(&pvr_dev->ctx_ids));
484	xa_destroy(&pvr_dev->ctx_ids);
485}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only OR MIT
  2/* Copyright (c) 2023 Imagination Technologies Ltd. */
  3
  4#include "pvr_cccb.h"
  5#include "pvr_context.h"
  6#include "pvr_device.h"
  7#include "pvr_drv.h"
  8#include "pvr_gem.h"
  9#include "pvr_job.h"
 10#include "pvr_power.h"
 11#include "pvr_rogue_fwif.h"
 12#include "pvr_rogue_fwif_common.h"
 13#include "pvr_rogue_fwif_resetframework.h"
 14#include "pvr_stream.h"
 15#include "pvr_stream_defs.h"
 16#include "pvr_vm.h"
 17
 18#include <drm/drm_auth.h>
 19#include <drm/drm_managed.h>
 
 
 20#include <linux/errno.h>
 21#include <linux/kernel.h>
 
 22#include <linux/sched.h>
 23#include <linux/slab.h>
 
 24#include <linux/string.h>
 25#include <linux/types.h>
 26#include <linux/xarray.h>
 27
 28static int
 29remap_priority(struct pvr_file *pvr_file, s32 uapi_priority,
 30	       enum pvr_context_priority *priority_out)
 31{
 32	switch (uapi_priority) {
 33	case DRM_PVR_CTX_PRIORITY_LOW:
 34		*priority_out = PVR_CTX_PRIORITY_LOW;
 35		break;
 36	case DRM_PVR_CTX_PRIORITY_NORMAL:
 37		*priority_out = PVR_CTX_PRIORITY_MEDIUM;
 38		break;
 39	case DRM_PVR_CTX_PRIORITY_HIGH:
 40		if (!capable(CAP_SYS_NICE) && !drm_is_current_master(from_pvr_file(pvr_file)))
 41			return -EACCES;
 42		*priority_out = PVR_CTX_PRIORITY_HIGH;
 43		break;
 44	default:
 45		return -EINVAL;
 46	}
 47
 48	return 0;
 49}
 50
 51static int get_fw_obj_size(enum drm_pvr_ctx_type type)
 52{
 53	switch (type) {
 54	case DRM_PVR_CTX_TYPE_RENDER:
 55		return sizeof(struct rogue_fwif_fwrendercontext);
 56	case DRM_PVR_CTX_TYPE_COMPUTE:
 57		return sizeof(struct rogue_fwif_fwcomputecontext);
 58	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
 59		return sizeof(struct rogue_fwif_fwtransfercontext);
 60	}
 61
 62	return -EINVAL;
 63}
 64
 65static int
 66process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
 67			     u64 stream_user_ptr, u32 stream_size, void *dest)
 68{
 69	void *stream;
 70	int err;
 71
 72	stream = kzalloc(stream_size, GFP_KERNEL);
 73	if (!stream)
 74		return -ENOMEM;
 75
 76	if (copy_from_user(stream, u64_to_user_ptr(stream_user_ptr), stream_size)) {
 77		err = -EFAULT;
 78		goto err_free;
 79	}
 80
 81	err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest);
 82	if (err)
 83		goto err_free;
 84
 85	kfree(stream);
 86
 87	return 0;
 88
 89err_free:
 90	kfree(stream);
 91
 92	return err;
 93}
 94
 95static int init_render_fw_objs(struct pvr_context *ctx,
 96			       struct drm_pvr_ioctl_create_context_args *args,
 97			       void *fw_ctx_map)
 98{
 99	struct rogue_fwif_static_rendercontext_state *static_rendercontext_state;
100	struct rogue_fwif_fwrendercontext *fw_render_context = fw_ctx_map;
101
102	if (!args->static_context_state_len)
103		return -EINVAL;
104
105	static_rendercontext_state = &fw_render_context->static_render_context_state;
106
107	/* Copy static render context state from userspace. */
108	return process_static_context_state(ctx->pvr_dev,
109					    &pvr_static_render_context_state_stream,
110					    args->static_context_state,
111					    args->static_context_state_len,
112					    &static_rendercontext_state->ctxswitch_regs[0]);
113}
114
115static int init_compute_fw_objs(struct pvr_context *ctx,
116				struct drm_pvr_ioctl_create_context_args *args,
117				void *fw_ctx_map)
118{
119	struct rogue_fwif_fwcomputecontext *fw_compute_context = fw_ctx_map;
120	struct rogue_fwif_cdm_registers_cswitch *ctxswitch_regs;
121
122	if (!args->static_context_state_len)
123		return -EINVAL;
124
125	ctxswitch_regs = &fw_compute_context->static_compute_context_state.ctxswitch_regs;
126
127	/* Copy static render context state from userspace. */
128	return process_static_context_state(ctx->pvr_dev,
129					    &pvr_static_compute_context_state_stream,
130					    args->static_context_state,
131					    args->static_context_state_len,
132					    ctxswitch_regs);
133}
134
135static int init_transfer_fw_objs(struct pvr_context *ctx,
136				 struct drm_pvr_ioctl_create_context_args *args,
137				 void *fw_ctx_map)
138{
139	if (args->static_context_state_len)
140		return -EINVAL;
141
142	return 0;
143}
144
145static int init_fw_objs(struct pvr_context *ctx,
146			struct drm_pvr_ioctl_create_context_args *args,
147			void *fw_ctx_map)
148{
149	switch (ctx->type) {
150	case DRM_PVR_CTX_TYPE_RENDER:
151		return init_render_fw_objs(ctx, args, fw_ctx_map);
152	case DRM_PVR_CTX_TYPE_COMPUTE:
153		return init_compute_fw_objs(ctx, args, fw_ctx_map);
154	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
155		return init_transfer_fw_objs(ctx, args, fw_ctx_map);
156	}
157
158	return -EINVAL;
159}
160
161static void
162ctx_fw_data_init(void *cpu_ptr, void *priv)
163{
164	struct pvr_context *ctx = priv;
165
166	memcpy(cpu_ptr, ctx->data, ctx->data_size);
167}
168
169/**
170 * pvr_context_destroy_queues() - Destroy all queues attached to a context.
171 * @ctx: Context to destroy queues on.
172 *
173 * Should be called when the last reference to a context object is dropped.
174 * It releases all resources attached to the queues bound to this context.
175 */
176static void pvr_context_destroy_queues(struct pvr_context *ctx)
177{
178	switch (ctx->type) {
179	case DRM_PVR_CTX_TYPE_RENDER:
180		pvr_queue_destroy(ctx->queues.fragment);
181		pvr_queue_destroy(ctx->queues.geometry);
182		break;
183	case DRM_PVR_CTX_TYPE_COMPUTE:
184		pvr_queue_destroy(ctx->queues.compute);
185		break;
186	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
187		pvr_queue_destroy(ctx->queues.transfer);
188		break;
189	}
190}
191
192/**
193 * pvr_context_create_queues() - Create all queues attached to a context.
194 * @ctx: Context to create queues on.
195 * @args: Context creation arguments passed by userspace.
196 * @fw_ctx_map: CPU mapping of the FW context object.
197 *
198 * Return:
199 *  * 0 on success, or
200 *  * A negative error code otherwise.
201 */
202static int pvr_context_create_queues(struct pvr_context *ctx,
203				     struct drm_pvr_ioctl_create_context_args *args,
204				     void *fw_ctx_map)
205{
206	int err;
207
208	switch (ctx->type) {
209	case DRM_PVR_CTX_TYPE_RENDER:
210		ctx->queues.geometry = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_GEOMETRY,
211							args, fw_ctx_map);
212		if (IS_ERR(ctx->queues.geometry)) {
213			err = PTR_ERR(ctx->queues.geometry);
214			ctx->queues.geometry = NULL;
215			goto err_destroy_queues;
216		}
217
218		ctx->queues.fragment = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_FRAGMENT,
219							args, fw_ctx_map);
220		if (IS_ERR(ctx->queues.fragment)) {
221			err = PTR_ERR(ctx->queues.fragment);
222			ctx->queues.fragment = NULL;
223			goto err_destroy_queues;
224		}
225		return 0;
226
227	case DRM_PVR_CTX_TYPE_COMPUTE:
228		ctx->queues.compute = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_COMPUTE,
229						       args, fw_ctx_map);
230		if (IS_ERR(ctx->queues.compute)) {
231			err = PTR_ERR(ctx->queues.compute);
232			ctx->queues.compute = NULL;
233			goto err_destroy_queues;
234		}
235		return 0;
236
237	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
238		ctx->queues.transfer = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
239							args, fw_ctx_map);
240		if (IS_ERR(ctx->queues.transfer)) {
241			err = PTR_ERR(ctx->queues.transfer);
242			ctx->queues.transfer = NULL;
243			goto err_destroy_queues;
244		}
245		return 0;
246	}
247
248	return -EINVAL;
249
250err_destroy_queues:
251	pvr_context_destroy_queues(ctx);
252	return err;
253}
254
255/**
256 * pvr_context_kill_queues() - Kill queues attached to context.
257 * @ctx: Context to kill queues on.
258 *
259 * Killing the queues implies making them unusable for future jobs, while still
260 * letting the currently submitted jobs a chance to finish. Queue resources will
261 * stay around until pvr_context_destroy_queues() is called.
262 */
263static void pvr_context_kill_queues(struct pvr_context *ctx)
264{
265	switch (ctx->type) {
266	case DRM_PVR_CTX_TYPE_RENDER:
267		pvr_queue_kill(ctx->queues.fragment);
268		pvr_queue_kill(ctx->queues.geometry);
269		break;
270	case DRM_PVR_CTX_TYPE_COMPUTE:
271		pvr_queue_kill(ctx->queues.compute);
272		break;
273	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
274		pvr_queue_kill(ctx->queues.transfer);
275		break;
276	}
277}
278
279/**
280 * pvr_context_create() - Create a context.
281 * @pvr_file: File to attach the created context to.
282 * @args: Context creation arguments.
283 *
284 * Return:
285 *  * 0 on success, or
286 *  * A negative error code on failure.
287 */
288int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args)
289{
290	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
291	struct pvr_context *ctx;
292	int ctx_size;
293	int err;
294
295	/* Context creation flags are currently unused and must be zero. */
296	if (args->flags)
297		return -EINVAL;
298
299	ctx_size = get_fw_obj_size(args->type);
300	if (ctx_size < 0)
301		return ctx_size;
302
303	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
304	if (!ctx)
305		return -ENOMEM;
306
307	ctx->data_size = ctx_size;
308	ctx->type = args->type;
309	ctx->flags = args->flags;
310	ctx->pvr_dev = pvr_dev;
311	kref_init(&ctx->ref_count);
312
313	err = remap_priority(pvr_file, args->priority, &ctx->priority);
314	if (err)
315		goto err_free_ctx;
316
317	ctx->vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
318	if (IS_ERR(ctx->vm_ctx)) {
319		err = PTR_ERR(ctx->vm_ctx);
320		goto err_free_ctx;
321	}
322
323	ctx->data = kzalloc(ctx_size, GFP_KERNEL);
324	if (!ctx->data) {
325		err = -ENOMEM;
326		goto err_put_vm;
327	}
328
329	err = pvr_context_create_queues(ctx, args, ctx->data);
330	if (err)
331		goto err_free_ctx_data;
332
333	err = init_fw_objs(ctx, args, ctx->data);
334	if (err)
335		goto err_destroy_queues;
336
337	err = pvr_fw_object_create(pvr_dev, ctx_size, PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
338				   ctx_fw_data_init, ctx, &ctx->fw_obj);
339	if (err)
340		goto err_free_ctx_data;
341
342	err = xa_alloc(&pvr_dev->ctx_ids, &ctx->ctx_id, ctx, xa_limit_32b, GFP_KERNEL);
343	if (err)
344		goto err_destroy_fw_obj;
345
346	err = xa_alloc(&pvr_file->ctx_handles, &args->handle, ctx, xa_limit_32b, GFP_KERNEL);
347	if (err) {
348		/*
349		 * It's possible that another thread could have taken a reference on the context at
350		 * this point as it is in the ctx_ids xarray. Therefore instead of directly
351		 * destroying the context, drop a reference instead.
352		 */
353		pvr_context_put(ctx);
354		return err;
355	}
356
 
 
 
 
357	return 0;
358
359err_destroy_fw_obj:
360	pvr_fw_object_destroy(ctx->fw_obj);
361
362err_destroy_queues:
363	pvr_context_destroy_queues(ctx);
364
365err_free_ctx_data:
366	kfree(ctx->data);
367
368err_put_vm:
369	pvr_vm_context_put(ctx->vm_ctx);
370
371err_free_ctx:
372	kfree(ctx);
373	return err;
374}
375
376static void
377pvr_context_release(struct kref *ref_count)
378{
379	struct pvr_context *ctx =
380		container_of(ref_count, struct pvr_context, ref_count);
381	struct pvr_device *pvr_dev = ctx->pvr_dev;
382
 
 
 
 
 
383	xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id);
384	pvr_context_destroy_queues(ctx);
385	pvr_fw_object_destroy(ctx->fw_obj);
386	kfree(ctx->data);
387	pvr_vm_context_put(ctx->vm_ctx);
388	kfree(ctx);
389}
390
391/**
392 * pvr_context_put() - Release reference on context
393 * @ctx: Target context.
394 */
395void
396pvr_context_put(struct pvr_context *ctx)
397{
398	if (ctx)
399		kref_put(&ctx->ref_count, pvr_context_release);
400}
401
402/**
403 * pvr_context_destroy() - Destroy context
404 * @pvr_file: Pointer to pvr_file structure.
405 * @handle: Userspace context handle.
406 *
407 * Removes context from context list and drops initial reference. Context will
408 * then be destroyed once all outstanding references are dropped.
409 *
410 * Return:
411 *  * 0 on success, or
412 *  * -%EINVAL if context not in context list.
413 */
414int
415pvr_context_destroy(struct pvr_file *pvr_file, u32 handle)
416{
417	struct pvr_context *ctx = xa_erase(&pvr_file->ctx_handles, handle);
418
419	if (!ctx)
420		return -EINVAL;
421
422	/* Make sure nothing can be queued to the queues after that point. */
423	pvr_context_kill_queues(ctx);
424
425	/* Release the reference held by the handle set. */
426	pvr_context_put(ctx);
427
428	return 0;
429}
430
431/**
432 * pvr_destroy_contexts_for_file: Destroy any contexts associated with the given file
433 * @pvr_file: Pointer to pvr_file structure.
434 *
435 * Removes all contexts associated with @pvr_file from the device context list and drops initial
436 * references. Contexts will then be destroyed once all outstanding references are dropped.
437 */
438void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
439{
 
440	struct pvr_context *ctx;
441	unsigned long handle;
442
443	xa_for_each(&pvr_file->ctx_handles, handle, ctx)
444		pvr_context_destroy(pvr_file, handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
445}
446
447/**
448 * pvr_context_device_init() - Device level initialization for queue related resources.
449 * @pvr_dev: The device to initialize.
450 */
451void pvr_context_device_init(struct pvr_device *pvr_dev)
452{
453	xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1);
 
454}
455
456/**
457 * pvr_context_device_fini() - Device level cleanup for queue related resources.
458 * @pvr_dev: The device to cleanup.
459 */
460void pvr_context_device_fini(struct pvr_device *pvr_dev)
461{
462	WARN_ON(!xa_empty(&pvr_dev->ctx_ids));
463	xa_destroy(&pvr_dev->ctx_ids);
464}