Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
  3
  4#ifndef __LIMA_SCHED_H__
  5#define __LIMA_SCHED_H__
  6
  7#include <drm/gpu_scheduler.h>
 
 
  8
 
  9struct lima_vm;
 10
 
 
 
 
 
 
 11struct lima_sched_task {
 12	struct drm_sched_job base;
 13
 14	struct lima_vm *vm;
 15	void *frame;
 16
 17	struct xarray deps;
 18	unsigned long last_dep;
 19
 20	struct lima_bo **bos;
 21	int num_bos;
 22
 
 
 
 23	/* pipe fence */
 24	struct dma_fence *fence;
 25};
 26
 27struct lima_sched_context {
 28	struct drm_sched_entity base;
 29};
 30
 31#define LIMA_SCHED_PIPE_MAX_MMU       8
 32#define LIMA_SCHED_PIPE_MAX_L2_CACHE  2
 33#define LIMA_SCHED_PIPE_MAX_PROCESSOR 8
 34
 35struct lima_ip;
 36
 37struct lima_sched_pipe {
 38	struct drm_gpu_scheduler base;
 39
 40	u64 fence_context;
 41	u32 fence_seqno;
 42	spinlock_t fence_lock;
 43
 
 
 44	struct lima_sched_task *current_task;
 45	struct lima_vm *current_vm;
 46
 47	struct lima_ip *mmu[LIMA_SCHED_PIPE_MAX_MMU];
 48	int num_mmu;
 49
 50	struct lima_ip *l2_cache[LIMA_SCHED_PIPE_MAX_L2_CACHE];
 51	int num_l2_cache;
 52
 53	struct lima_ip *processor[LIMA_SCHED_PIPE_MAX_PROCESSOR];
 54	int num_processor;
 55
 56	struct lima_ip *bcast_processor;
 57	struct lima_ip *bcast_mmu;
 58
 59	u32 done;
 60	bool error;
 61	atomic_t task;
 62
 63	int frame_size;
 64	struct kmem_cache *task_slab;
 65
 66	int (*task_validate)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
 67	void (*task_run)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
 68	void (*task_fini)(struct lima_sched_pipe *pipe);
 69	void (*task_error)(struct lima_sched_pipe *pipe);
 70	void (*task_mmu_error)(struct lima_sched_pipe *pipe);
 
 
 71
 72	struct work_struct error_work;
 73};
 74
 75int lima_sched_task_init(struct lima_sched_task *task,
 76			 struct lima_sched_context *context,
 77			 struct lima_bo **bos, int num_bos,
 78			 struct lima_vm *vm);
 79void lima_sched_task_fini(struct lima_sched_task *task);
 80
 81int lima_sched_context_init(struct lima_sched_pipe *pipe,
 82			    struct lima_sched_context *context,
 83			    atomic_t *guilty);
 84void lima_sched_context_fini(struct lima_sched_pipe *pipe,
 85			     struct lima_sched_context *context);
 86struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
 87						struct lima_sched_task *task);
 88
 89int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
 90void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
 91void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe);
 92
 93static inline void lima_sched_pipe_mmu_error(struct lima_sched_pipe *pipe)
 94{
 95	pipe->error = true;
 96	pipe->task_mmu_error(pipe);
 97}
 98
 99int lima_sched_slab_init(void);
100void lima_sched_slab_fini(void);
101
102#endif
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
  3
  4#ifndef __LIMA_SCHED_H__
  5#define __LIMA_SCHED_H__
  6
  7#include <drm/gpu_scheduler.h>
  8#include <linux/list.h>
  9#include <linux/xarray.h>
 10
 11struct lima_device;
 12struct lima_vm;
 13
 14struct lima_sched_error_task {
 15	struct list_head list;
 16	void *data;
 17	u32 size;
 18};
 19
 20struct lima_sched_task {
 21	struct drm_sched_job base;
 22
 23	struct lima_vm *vm;
 24	void *frame;
 25
 
 
 
 26	struct lima_bo **bos;
 27	int num_bos;
 28
 29	bool recoverable;
 30	struct lima_bo *heap;
 31
 32	/* pipe fence */
 33	struct dma_fence *fence;
 34};
 35
 36struct lima_sched_context {
 37	struct drm_sched_entity base;
 38};
 39
 40#define LIMA_SCHED_PIPE_MAX_MMU       8
 41#define LIMA_SCHED_PIPE_MAX_L2_CACHE  2
 42#define LIMA_SCHED_PIPE_MAX_PROCESSOR 8
 43
 44struct lima_ip;
 45
 46struct lima_sched_pipe {
 47	struct drm_gpu_scheduler base;
 48
 49	u64 fence_context;
 50	u32 fence_seqno;
 51	spinlock_t fence_lock;
 52
 53	struct lima_device *ldev;
 54
 55	struct lima_sched_task *current_task;
 56	struct lima_vm *current_vm;
 57
 58	struct lima_ip *mmu[LIMA_SCHED_PIPE_MAX_MMU];
 59	int num_mmu;
 60
 61	struct lima_ip *l2_cache[LIMA_SCHED_PIPE_MAX_L2_CACHE];
 62	int num_l2_cache;
 63
 64	struct lima_ip *processor[LIMA_SCHED_PIPE_MAX_PROCESSOR];
 65	int num_processor;
 66
 67	struct lima_ip *bcast_processor;
 68	struct lima_ip *bcast_mmu;
 69
 70	u32 done;
 71	bool error;
 72	atomic_t task;
 73
 74	int frame_size;
 75	struct kmem_cache *task_slab;
 76
 77	int (*task_validate)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
 78	void (*task_run)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
 79	void (*task_fini)(struct lima_sched_pipe *pipe);
 80	void (*task_error)(struct lima_sched_pipe *pipe);
 81	void (*task_mmu_error)(struct lima_sched_pipe *pipe);
 82	int (*task_recover)(struct lima_sched_pipe *pipe);
 83	void (*task_mask_irq)(struct lima_sched_pipe *pipe);
 84
 85	struct work_struct recover_work;
 86};
 87
 88int lima_sched_task_init(struct lima_sched_task *task,
 89			 struct lima_sched_context *context,
 90			 struct lima_bo **bos, int num_bos,
 91			 struct lima_vm *vm);
 92void lima_sched_task_fini(struct lima_sched_task *task);
 93
 94int lima_sched_context_init(struct lima_sched_pipe *pipe,
 95			    struct lima_sched_context *context);
 
 96void lima_sched_context_fini(struct lima_sched_pipe *pipe,
 97			     struct lima_sched_context *context);
 98struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task);
 
 99
100int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
101void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
102void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe);
103
104static inline void lima_sched_pipe_mmu_error(struct lima_sched_pipe *pipe)
105{
106	pipe->error = true;
107	pipe->task_mmu_error(pipe);
108}
109
110int lima_sched_slab_init(void);
111void lima_sched_slab_fini(void);
112
113#endif