Loading...
Note: File does not exist in v3.1.
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015 Broadcom
4 */
5#ifndef _VC4_DRV_H_
6#define _VC4_DRV_H_
7
8#include <linux/delay.h>
9#include <linux/of.h>
10#include <linux/refcount.h>
11#include <linux/uaccess.h>
12
13#include <drm/drm_atomic.h>
14#include <drm/drm_debugfs.h>
15#include <drm/drm_device.h>
16#include <drm/drm_encoder.h>
17#include <drm/drm_gem_dma_helper.h>
18#include <drm/drm_managed.h>
19#include <drm/drm_mm.h>
20#include <drm/drm_modeset_lock.h>
21
22#include "uapi/drm/vc4_drm.h"
23
24struct drm_device;
25struct drm_gem_object;
26
27/* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
28 * this.
29 */
30enum vc4_kernel_bo_type {
31 /* Any kernel allocation (gem_create_object hook) before it
32 * gets another type set.
33 */
34 VC4_BO_TYPE_KERNEL,
35 VC4_BO_TYPE_V3D,
36 VC4_BO_TYPE_V3D_SHADER,
37 VC4_BO_TYPE_DUMB,
38 VC4_BO_TYPE_BIN,
39 VC4_BO_TYPE_RCL,
40 VC4_BO_TYPE_BCL,
41 VC4_BO_TYPE_KERNEL_CACHE,
42 VC4_BO_TYPE_COUNT
43};
44
45/* Performance monitor object. The perform lifetime is controlled by userspace
46 * using perfmon related ioctls. A perfmon can be attached to a submit_cl
47 * request, and when this is the case, HW perf counters will be activated just
48 * before the submit_cl is submitted to the GPU and disabled when the job is
49 * done. This way, only events related to a specific job will be counted.
50 */
51struct vc4_perfmon {
52 struct vc4_dev *dev;
53
54 /* Tracks the number of users of the perfmon, when this counter reaches
55 * zero the perfmon is destroyed.
56 */
57 refcount_t refcnt;
58
59 /* Number of counters activated in this perfmon instance
60 * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
61 */
62 u8 ncounters;
63
64 /* Events counted by the HW perf counters. */
65 u8 events[DRM_VC4_MAX_PERF_COUNTERS];
66
67 /* Storage for counter values. Counters are incremented by the HW
68 * perf counter values every time the perfmon is attached to a GPU job.
69 * This way, perfmon users don't have to retrieve the results after
70 * each job if they want to track events covering several submissions.
71 * Note that counter values can't be reset, but you can fake a reset by
72 * destroying the perfmon and creating a new one.
73 */
74 u64 counters[];
75};
76
77struct vc4_dev {
78 struct drm_device base;
79 struct device *dev;
80
81 bool is_vc5;
82
83 unsigned int irq;
84
85 struct vc4_hvs *hvs;
86 struct vc4_v3d *v3d;
87
88 struct vc4_hang_state *hang_state;
89
90 /* The kernel-space BO cache. Tracks buffers that have been
91 * unreferenced by all other users (refcounts of 0!) but not
92 * yet freed, so we can do cheap allocations.
93 */
94 struct vc4_bo_cache {
95 /* Array of list heads for entries in the BO cache,
96 * based on number of pages, so we can do O(1) lookups
97 * in the cache when allocating.
98 */
99 struct list_head *size_list;
100 uint32_t size_list_size;
101
102 /* List of all BOs in the cache, ordered by age, so we
103 * can do O(1) lookups when trying to free old
104 * buffers.
105 */
106 struct list_head time_list;
107 struct work_struct time_work;
108 struct timer_list time_timer;
109 } bo_cache;
110
111 u32 num_labels;
112 struct vc4_label {
113 const char *name;
114 u32 num_allocated;
115 u32 size_allocated;
116 } *bo_labels;
117
118 /* Protects bo_cache and bo_labels. */
119 struct mutex bo_lock;
120
121 /* Purgeable BO pool. All BOs in this pool can have their memory
122 * reclaimed if the driver is unable to allocate new BOs. We also
123 * keep stats related to the purge mechanism here.
124 */
125 struct {
126 struct list_head list;
127 unsigned int num;
128 size_t size;
129 unsigned int purged_num;
130 size_t purged_size;
131 struct mutex lock;
132 } purgeable;
133
134 uint64_t dma_fence_context;
135
136 /* Sequence number for the last job queued in bin_job_list.
137 * Starts at 0 (no jobs emitted).
138 */
139 uint64_t emit_seqno;
140
141 /* Sequence number for the last completed job on the GPU.
142 * Starts at 0 (no jobs completed).
143 */
144 uint64_t finished_seqno;
145
146 /* List of all struct vc4_exec_info for jobs to be executed in
147 * the binner. The first job in the list is the one currently
148 * programmed into ct0ca for execution.
149 */
150 struct list_head bin_job_list;
151
152 /* List of all struct vc4_exec_info for jobs that have
153 * completed binning and are ready for rendering. The first
154 * job in the list is the one currently programmed into ct1ca
155 * for execution.
156 */
157 struct list_head render_job_list;
158
159 /* List of the finished vc4_exec_infos waiting to be freed by
160 * job_done_work.
161 */
162 struct list_head job_done_list;
163 /* Spinlock used to synchronize the job_list and seqno
164 * accesses between the IRQ handler and GEM ioctls.
165 */
166 spinlock_t job_lock;
167 wait_queue_head_t job_wait_queue;
168 struct work_struct job_done_work;
169
170 /* Used to track the active perfmon if any. Access to this field is
171 * protected by job_lock.
172 */
173 struct vc4_perfmon *active_perfmon;
174
175 /* List of struct vc4_seqno_cb for callbacks to be made from a
176 * workqueue when the given seqno is passed.
177 */
178 struct list_head seqno_cb_list;
179
180 /* The memory used for storing binner tile alloc, tile state,
181 * and overflow memory allocations. This is freed when V3D
182 * powers down.
183 */
184 struct vc4_bo *bin_bo;
185
186 /* Size of blocks allocated within bin_bo. */
187 uint32_t bin_alloc_size;
188
189 /* Bitmask of the bin_alloc_size chunks in bin_bo that are
190 * used.
191 */
192 uint32_t bin_alloc_used;
193
194 /* Bitmask of the current bin_alloc used for overflow memory. */
195 uint32_t bin_alloc_overflow;
196
197 /* Incremented when an underrun error happened after an atomic commit.
198 * This is particularly useful to detect when a specific modeset is too
199 * demanding in term of memory or HVS bandwidth which is hard to guess
200 * at atomic check time.
201 */
202 atomic_t underrun;
203
204 struct work_struct overflow_mem_work;
205
206 int power_refcount;
207
208 /* Set to true when the load tracker is active. */
209 bool load_tracker_enabled;
210
211 /* Mutex controlling the power refcount. */
212 struct mutex power_lock;
213
214 struct {
215 struct timer_list timer;
216 struct work_struct reset_work;
217 } hangcheck;
218
219 struct drm_modeset_lock ctm_state_lock;
220 struct drm_private_obj ctm_manager;
221 struct drm_private_obj hvs_channels;
222 struct drm_private_obj load_tracker;
223
224 /* List of vc4_debugfs_info_entry for adding to debugfs once
225 * the minor is available (after drm_dev_register()).
226 */
227 struct list_head debugfs_list;
228
229 /* Mutex for binner bo allocation. */
230 struct mutex bin_bo_lock;
231 /* Reference count for our binner bo. */
232 struct kref bin_bo_kref;
233};
234
235static inline struct vc4_dev *
236to_vc4_dev(struct drm_device *dev)
237{
238 return container_of(dev, struct vc4_dev, base);
239}
240
241struct vc4_bo {
242 struct drm_gem_dma_object base;
243
244 /* seqno of the last job to render using this BO. */
245 uint64_t seqno;
246
247 /* seqno of the last job to use the RCL to write to this BO.
248 *
249 * Note that this doesn't include binner overflow memory
250 * writes.
251 */
252 uint64_t write_seqno;
253
254 bool t_format;
255
256 /* List entry for the BO's position in either
257 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
258 */
259 struct list_head unref_head;
260
261 /* Time in jiffies when the BO was put in vc4->bo_cache. */
262 unsigned long free_time;
263
264 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
265 struct list_head size_head;
266
267 /* Struct for shader validation state, if created by
268 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
269 */
270 struct vc4_validated_shader_info *validated_shader;
271
272 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
273 * for user-allocated labels.
274 */
275 int label;
276
277 /* Count the number of active users. This is needed to determine
278 * whether we can move the BO to the purgeable list or not (when the BO
279 * is used by the GPU or the display engine we can't purge it).
280 */
281 refcount_t usecnt;
282
283 /* Store purgeable/purged state here */
284 u32 madv;
285 struct mutex madv_lock;
286};
287
288static inline struct vc4_bo *
289to_vc4_bo(struct drm_gem_object *bo)
290{
291 return container_of(to_drm_gem_dma_obj(bo), struct vc4_bo, base);
292}
293
294struct vc4_fence {
295 struct dma_fence base;
296 struct drm_device *dev;
297 /* vc4 seqno for signaled() test */
298 uint64_t seqno;
299};
300
301static inline struct vc4_fence *
302to_vc4_fence(struct dma_fence *fence)
303{
304 return container_of(fence, struct vc4_fence, base);
305}
306
307struct vc4_seqno_cb {
308 struct work_struct work;
309 uint64_t seqno;
310 void (*func)(struct vc4_seqno_cb *cb);
311};
312
313struct vc4_v3d {
314 struct vc4_dev *vc4;
315 struct platform_device *pdev;
316 void __iomem *regs;
317 struct clk *clk;
318 struct debugfs_regset32 regset;
319};
320
321struct vc4_hvs {
322 struct vc4_dev *vc4;
323 struct platform_device *pdev;
324 void __iomem *regs;
325 u32 __iomem *dlist;
326
327 struct clk *core_clk;
328
329 unsigned long max_core_rate;
330
331 /* Memory manager for CRTCs to allocate space in the display
332 * list. Units are dwords.
333 */
334 struct drm_mm dlist_mm;
335 /* Memory manager for the LBM memory used by HVS scaling. */
336 struct drm_mm lbm_mm;
337 spinlock_t mm_lock;
338
339 struct drm_mm_node mitchell_netravali_filter;
340
341 struct debugfs_regset32 regset;
342
343 /*
344 * Even if HDMI0 on the RPi4 can output modes requiring a pixel
345 * rate higher than 297MHz, it needs some adjustments in the
346 * config.txt file to be able to do so and thus won't always be
347 * available.
348 */
349 bool vc5_hdmi_enable_hdmi_20;
350
351 /*
352 * 4096x2160@60 requires a core overclock to work, so register
353 * whether that is sufficient.
354 */
355 bool vc5_hdmi_enable_4096by2160;
356};
357
358struct vc4_plane {
359 struct drm_plane base;
360};
361
362static inline struct vc4_plane *
363to_vc4_plane(struct drm_plane *plane)
364{
365 return container_of(plane, struct vc4_plane, base);
366}
367
368enum vc4_scaling_mode {
369 VC4_SCALING_NONE,
370 VC4_SCALING_TPZ,
371 VC4_SCALING_PPF,
372};
373
374struct vc4_plane_state {
375 struct drm_plane_state base;
376 /* System memory copy of the display list for this element, computed
377 * at atomic_check time.
378 */
379 u32 *dlist;
380 u32 dlist_size; /* Number of dwords allocated for the display list */
381 u32 dlist_count; /* Number of used dwords in the display list. */
382
383 /* Offset in the dlist to various words, for pageflip or
384 * cursor updates.
385 */
386 u32 pos0_offset;
387 u32 pos2_offset;
388 u32 ptr0_offset;
389 u32 lbm_offset;
390
391 /* Offset where the plane's dlist was last stored in the
392 * hardware at vc4_crtc_atomic_flush() time.
393 */
394 u32 __iomem *hw_dlist;
395
396 /* Clipped coordinates of the plane on the display. */
397 int crtc_x, crtc_y, crtc_w, crtc_h;
398 /* Clipped area being scanned from in the FB. */
399 u32 src_x, src_y;
400
401 u32 src_w[2], src_h[2];
402
403 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
404 enum vc4_scaling_mode x_scaling[2], y_scaling[2];
405 bool is_unity;
406 bool is_yuv;
407
408 /* Offset to start scanning out from the start of the plane's
409 * BO.
410 */
411 u32 offsets[3];
412
413 /* Our allocation in LBM for temporary storage during scaling. */
414 struct drm_mm_node lbm;
415
416 /* Set when the plane has per-pixel alpha content or does not cover
417 * the entire screen. This is a hint to the CRTC that it might need
418 * to enable background color fill.
419 */
420 bool needs_bg_fill;
421
422 /* Mark the dlist as initialized. Useful to avoid initializing it twice
423 * when async update is not possible.
424 */
425 bool dlist_initialized;
426
427 /* Load of this plane on the HVS block. The load is expressed in HVS
428 * cycles/sec.
429 */
430 u64 hvs_load;
431
432 /* Memory bandwidth needed for this plane. This is expressed in
433 * bytes/sec.
434 */
435 u64 membus_load;
436};
437
438static inline struct vc4_plane_state *
439to_vc4_plane_state(struct drm_plane_state *state)
440{
441 return container_of(state, struct vc4_plane_state, base);
442}
443
444enum vc4_encoder_type {
445 VC4_ENCODER_TYPE_NONE,
446 VC4_ENCODER_TYPE_HDMI0,
447 VC4_ENCODER_TYPE_HDMI1,
448 VC4_ENCODER_TYPE_VEC,
449 VC4_ENCODER_TYPE_DSI0,
450 VC4_ENCODER_TYPE_DSI1,
451 VC4_ENCODER_TYPE_SMI,
452 VC4_ENCODER_TYPE_DPI,
453};
454
455struct vc4_encoder {
456 struct drm_encoder base;
457 enum vc4_encoder_type type;
458 u32 clock_select;
459
460 void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state);
461 void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
462 void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
463
464 void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
465 void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state);
466};
467
468static inline struct vc4_encoder *
469to_vc4_encoder(struct drm_encoder *encoder)
470{
471 return container_of(encoder, struct vc4_encoder, base);
472}
473
474struct vc4_crtc_data {
475 const char *debugfs_name;
476
477 /* Bitmask of channels (FIFOs) of the HVS that the output can source from */
478 unsigned int hvs_available_channels;
479
480 /* Which output of the HVS this pixelvalve sources from. */
481 int hvs_output;
482};
483
484struct vc4_pv_data {
485 struct vc4_crtc_data base;
486
487 /* Depth of the PixelValve FIFO in bytes */
488 unsigned int fifo_depth;
489
490 /* Number of pixels output per clock period */
491 u8 pixels_per_clock;
492
493 enum vc4_encoder_type encoder_types[4];
494};
495
496struct vc4_crtc {
497 struct drm_crtc base;
498 struct platform_device *pdev;
499 const struct vc4_crtc_data *data;
500 void __iomem *regs;
501
502 /* Timestamp at start of vblank irq - unaffected by lock delays. */
503 ktime_t t_vblank;
504
505 u8 lut_r[256];
506 u8 lut_g[256];
507 u8 lut_b[256];
508
509 struct drm_pending_vblank_event *event;
510
511 struct debugfs_regset32 regset;
512
513 /**
514 * @feeds_txp: True if the CRTC feeds our writeback controller.
515 */
516 bool feeds_txp;
517
518 /**
519 * @irq_lock: Spinlock protecting the resources shared between
520 * the atomic code and our vblank handler.
521 */
522 spinlock_t irq_lock;
523
524 /**
525 * @current_dlist: Start offset of the display list currently
526 * set in the HVS for that CRTC. Protected by @irq_lock, and
527 * copied in vc4_hvs_update_dlist() for the CRTC interrupt
528 * handler to have access to that value.
529 */
530 unsigned int current_dlist;
531
532 /**
533 * @current_hvs_channel: HVS channel currently assigned to the
534 * CRTC. Protected by @irq_lock, and copied in
535 * vc4_hvs_atomic_begin() for the CRTC interrupt handler to have
536 * access to that value.
537 */
538 unsigned int current_hvs_channel;
539};
540
541static inline struct vc4_crtc *
542to_vc4_crtc(struct drm_crtc *crtc)
543{
544 return container_of(crtc, struct vc4_crtc, base);
545}
546
547static inline const struct vc4_crtc_data *
548vc4_crtc_to_vc4_crtc_data(const struct vc4_crtc *crtc)
549{
550 return crtc->data;
551}
552
553static inline const struct vc4_pv_data *
554vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc)
555{
556 const struct vc4_crtc_data *data = vc4_crtc_to_vc4_crtc_data(crtc);
557
558 return container_of(data, struct vc4_pv_data, base);
559}
560
561struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
562 struct drm_crtc_state *state);
563
564struct vc4_crtc_state {
565 struct drm_crtc_state base;
566 /* Dlist area for this CRTC configuration. */
567 struct drm_mm_node mm;
568 bool txp_armed;
569 unsigned int assigned_channel;
570
571 struct {
572 unsigned int left;
573 unsigned int right;
574 unsigned int top;
575 unsigned int bottom;
576 } margins;
577
578 unsigned long hvs_load;
579
580 /* Transitional state below, only valid during atomic commits */
581 bool update_muxing;
582};
583
584#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
585
586static inline struct vc4_crtc_state *
587to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
588{
589 return container_of(crtc_state, struct vc4_crtc_state, base);
590}
591
592#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
593#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
594#define HVS_READ(offset) readl(hvs->regs + offset)
595#define HVS_WRITE(offset, val) writel(val, hvs->regs + offset)
596
597#define VC4_REG32(reg) { .name = #reg, .offset = reg }
598
599struct vc4_exec_info {
600 struct vc4_dev *dev;
601
602 /* Sequence number for this bin/render job. */
603 uint64_t seqno;
604
605 /* Latest write_seqno of any BO that binning depends on. */
606 uint64_t bin_dep_seqno;
607
608 struct dma_fence *fence;
609
610 /* Last current addresses the hardware was processing when the
611 * hangcheck timer checked on us.
612 */
613 uint32_t last_ct0ca, last_ct1ca;
614
615 /* Kernel-space copy of the ioctl arguments */
616 struct drm_vc4_submit_cl *args;
617
618 /* This is the array of BOs that were looked up at the start of exec.
619 * Command validation will use indices into this array.
620 */
621 struct drm_gem_dma_object **bo;
622 uint32_t bo_count;
623
624 /* List of BOs that are being written by the RCL. Other than
625 * the binner temporary storage, this is all the BOs written
626 * by the job.
627 */
628 struct drm_gem_dma_object *rcl_write_bo[4];
629 uint32_t rcl_write_bo_count;
630
631 /* Pointers for our position in vc4->job_list */
632 struct list_head head;
633
634 /* List of other BOs used in the job that need to be released
635 * once the job is complete.
636 */
637 struct list_head unref_list;
638
639 /* Current unvalidated indices into @bo loaded by the non-hardware
640 * VC4_PACKET_GEM_HANDLES.
641 */
642 uint32_t bo_index[2];
643
644 /* This is the BO where we store the validated command lists, shader
645 * records, and uniforms.
646 */
647 struct drm_gem_dma_object *exec_bo;
648
649 /**
650 * This tracks the per-shader-record state (packet 64) that
651 * determines the length of the shader record and the offset
652 * it's expected to be found at. It gets read in from the
653 * command lists.
654 */
655 struct vc4_shader_state {
656 uint32_t addr;
657 /* Maximum vertex index referenced by any primitive using this
658 * shader state.
659 */
660 uint32_t max_index;
661 } *shader_state;
662
663 /** How many shader states the user declared they were using. */
664 uint32_t shader_state_size;
665 /** How many shader state records the validator has seen. */
666 uint32_t shader_state_count;
667
668 bool found_tile_binning_mode_config_packet;
669 bool found_start_tile_binning_packet;
670 bool found_increment_semaphore_packet;
671 bool found_flush;
672 uint8_t bin_tiles_x, bin_tiles_y;
673 /* Physical address of the start of the tile alloc array
674 * (where each tile's binned CL will start)
675 */
676 uint32_t tile_alloc_offset;
677 /* Bitmask of which binner slots are freed when this job completes. */
678 uint32_t bin_slots;
679
680 /**
681 * Computed addresses pointing into exec_bo where we start the
682 * bin thread (ct0) and render thread (ct1).
683 */
684 uint32_t ct0ca, ct0ea;
685 uint32_t ct1ca, ct1ea;
686
687 /* Pointer to the unvalidated bin CL (if present). */
688 void *bin_u;
689
690 /* Pointers to the shader recs. These paddr gets incremented as CL
691 * packets are relocated in validate_gl_shader_state, and the vaddrs
692 * (u and v) get incremented and size decremented as the shader recs
693 * themselves are validated.
694 */
695 void *shader_rec_u;
696 void *shader_rec_v;
697 uint32_t shader_rec_p;
698 uint32_t shader_rec_size;
699
700 /* Pointers to the uniform data. These pointers are incremented, and
701 * size decremented, as each batch of uniforms is uploaded.
702 */
703 void *uniforms_u;
704 void *uniforms_v;
705 uint32_t uniforms_p;
706 uint32_t uniforms_size;
707
708 /* Pointer to a performance monitor object if the user requested it,
709 * NULL otherwise.
710 */
711 struct vc4_perfmon *perfmon;
712
713 /* Whether the exec has taken a reference to the binner BO, which should
714 * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet.
715 */
716 bool bin_bo_used;
717};
718
719/* Per-open file private data. Any driver-specific resource that has to be
720 * released when the DRM file is closed should be placed here.
721 */
722struct vc4_file {
723 struct vc4_dev *dev;
724
725 struct {
726 struct idr idr;
727 struct mutex lock;
728 } perfmon;
729
730 bool bin_bo_used;
731};
732
733static inline struct vc4_exec_info *
734vc4_first_bin_job(struct vc4_dev *vc4)
735{
736 return list_first_entry_or_null(&vc4->bin_job_list,
737 struct vc4_exec_info, head);
738}
739
740static inline struct vc4_exec_info *
741vc4_first_render_job(struct vc4_dev *vc4)
742{
743 return list_first_entry_or_null(&vc4->render_job_list,
744 struct vc4_exec_info, head);
745}
746
747static inline struct vc4_exec_info *
748vc4_last_render_job(struct vc4_dev *vc4)
749{
750 if (list_empty(&vc4->render_job_list))
751 return NULL;
752 return list_last_entry(&vc4->render_job_list,
753 struct vc4_exec_info, head);
754}
755
756/**
757 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
758 * setup parameters.
759 *
760 * This will be used at draw time to relocate the reference to the texture
761 * contents in p0, and validate that the offset combined with
762 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
763 * Note that the hardware treats unprovided config parameters as 0, so not all
764 * of them need to be set up for every texure sample, and we'll store ~0 as
765 * the offset to mark the unused ones.
766 *
767 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
768 * Setup") for definitions of the texture parameters.
769 */
770struct vc4_texture_sample_info {
771 bool is_direct;
772 uint32_t p_offset[4];
773};
774
775/**
776 * struct vc4_validated_shader_info - information about validated shaders that
777 * needs to be used from command list validation.
778 *
779 * For a given shader, each time a shader state record references it, we need
780 * to verify that the shader doesn't read more uniforms than the shader state
781 * record's uniform BO pointer can provide, and we need to apply relocations
782 * and validate the shader state record's uniforms that define the texture
783 * samples.
784 */
785struct vc4_validated_shader_info {
786 uint32_t uniforms_size;
787 uint32_t uniforms_src_size;
788 uint32_t num_texture_samples;
789 struct vc4_texture_sample_info *texture_samples;
790
791 uint32_t num_uniform_addr_offsets;
792 uint32_t *uniform_addr_offsets;
793
794 bool is_threaded;
795};
796
797/**
798 * __wait_for - magic wait macro
799 *
800 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
801 * important that we check the condition again after having timed out, since the
802 * timeout could be due to preemption or similar and we've never had a chance to
803 * check the condition before the timeout.
804 */
805#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
806 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
807 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
808 int ret__; \
809 might_sleep(); \
810 for (;;) { \
811 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
812 OP; \
813 /* Guarantee COND check prior to timeout */ \
814 barrier(); \
815 if (COND) { \
816 ret__ = 0; \
817 break; \
818 } \
819 if (expired__) { \
820 ret__ = -ETIMEDOUT; \
821 break; \
822 } \
823 usleep_range(wait__, wait__ * 2); \
824 if (wait__ < (Wmax)) \
825 wait__ <<= 1; \
826 } \
827 ret__; \
828})
829
830#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
831 (Wmax))
832#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
833
834/* vc4_bo.c */
835struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
836struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
837 bool from_cache, enum vc4_kernel_bo_type type);
838int vc4_bo_dumb_create(struct drm_file *file_priv,
839 struct drm_device *dev,
840 struct drm_mode_create_dumb *args);
841int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
842 struct drm_file *file_priv);
843int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
844 struct drm_file *file_priv);
845int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
846 struct drm_file *file_priv);
847int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
848 struct drm_file *file_priv);
849int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
850 struct drm_file *file_priv);
851int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
852 struct drm_file *file_priv);
853int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
854 struct drm_file *file_priv);
855int vc4_bo_cache_init(struct drm_device *dev);
856int vc4_bo_inc_usecnt(struct vc4_bo *bo);
857void vc4_bo_dec_usecnt(struct vc4_bo *bo);
858void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
859void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
860int vc4_bo_debugfs_init(struct drm_minor *minor);
861
862/* vc4_crtc.c */
863extern struct platform_driver vc4_crtc_driver;
864int vc4_crtc_disable_at_boot(struct drm_crtc *crtc);
865int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
866 const struct drm_crtc_funcs *crtc_funcs,
867 const struct drm_crtc_helper_funcs *crtc_helper_funcs);
868int vc4_page_flip(struct drm_crtc *crtc,
869 struct drm_framebuffer *fb,
870 struct drm_pending_vblank_event *event,
871 uint32_t flags,
872 struct drm_modeset_acquire_ctx *ctx);
873struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc);
874void vc4_crtc_destroy_state(struct drm_crtc *crtc,
875 struct drm_crtc_state *state);
876void vc4_crtc_reset(struct drm_crtc *crtc);
877void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
878void vc4_crtc_send_vblank(struct drm_crtc *crtc);
879int vc4_crtc_late_register(struct drm_crtc *crtc);
880void vc4_crtc_get_margins(struct drm_crtc_state *state,
881 unsigned int *left, unsigned int *right,
882 unsigned int *top, unsigned int *bottom);
883
884/* vc4_debugfs.c */
885void vc4_debugfs_init(struct drm_minor *minor);
886#ifdef CONFIG_DEBUG_FS
887int vc4_debugfs_add_file(struct drm_minor *minor,
888 const char *filename,
889 int (*show)(struct seq_file*, void*),
890 void *data);
891int vc4_debugfs_add_regset32(struct drm_minor *minor,
892 const char *filename,
893 struct debugfs_regset32 *regset);
894#else
895static inline int vc4_debugfs_add_file(struct drm_minor *minor,
896 const char *filename,
897 int (*show)(struct seq_file*, void*),
898 void *data)
899{
900 return 0;
901}
902
903static inline int vc4_debugfs_add_regset32(struct drm_minor *minor,
904 const char *filename,
905 struct debugfs_regset32 *regset)
906{
907 return 0;
908}
909#endif
910
911/* vc4_drv.c */
912void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
913int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args);
914
915/* vc4_dpi.c */
916extern struct platform_driver vc4_dpi_driver;
917
918/* vc4_dsi.c */
919extern struct platform_driver vc4_dsi_driver;
920
921/* vc4_fence.c */
922extern const struct dma_fence_ops vc4_fence_ops;
923
924/* vc4_gem.c */
925int vc4_gem_init(struct drm_device *dev);
926int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
927 struct drm_file *file_priv);
928int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
929 struct drm_file *file_priv);
930int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
931 struct drm_file *file_priv);
932void vc4_submit_next_bin_job(struct drm_device *dev);
933void vc4_submit_next_render_job(struct drm_device *dev);
934void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
935int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
936 uint64_t timeout_ns, bool interruptible);
937void vc4_job_handle_completed(struct vc4_dev *vc4);
938int vc4_queue_seqno_cb(struct drm_device *dev,
939 struct vc4_seqno_cb *cb, uint64_t seqno,
940 void (*func)(struct vc4_seqno_cb *cb));
941int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
942 struct drm_file *file_priv);
943
944/* vc4_hdmi.c */
945extern struct platform_driver vc4_hdmi_driver;
946
947/* vc4_vec.c */
948extern struct platform_driver vc4_vec_driver;
949
950/* vc4_txp.c */
951extern struct platform_driver vc4_txp_driver;
952
953/* vc4_irq.c */
954void vc4_irq_enable(struct drm_device *dev);
955void vc4_irq_disable(struct drm_device *dev);
956int vc4_irq_install(struct drm_device *dev, int irq);
957void vc4_irq_uninstall(struct drm_device *dev);
958void vc4_irq_reset(struct drm_device *dev);
959
960/* vc4_hvs.c */
961extern struct platform_driver vc4_hvs_driver;
962void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output);
963int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output);
964u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo);
965int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
966void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state);
967void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
968void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state);
969void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
970void vc4_hvs_dump_state(struct vc4_hvs *hvs);
971void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel);
972void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel);
973int vc4_hvs_debugfs_init(struct drm_minor *minor);
974
975/* vc4_kms.c */
976int vc4_kms_load(struct drm_device *dev);
977
978/* vc4_plane.c */
979struct drm_plane *vc4_plane_init(struct drm_device *dev,
980 enum drm_plane_type type,
981 uint32_t possible_crtcs);
982int vc4_plane_create_additional_planes(struct drm_device *dev);
983u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
984u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
985void vc4_plane_async_set_fb(struct drm_plane *plane,
986 struct drm_framebuffer *fb);
987
988/* vc4_v3d.c */
989extern struct platform_driver vc4_v3d_driver;
990extern const struct of_device_id vc4_v3d_dt_match[];
991int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
992int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used);
993void vc4_v3d_bin_bo_put(struct vc4_dev *vc4);
994int vc4_v3d_pm_get(struct vc4_dev *vc4);
995void vc4_v3d_pm_put(struct vc4_dev *vc4);
996int vc4_v3d_debugfs_init(struct drm_minor *minor);
997
998/* vc4_validate.c */
999int
1000vc4_validate_bin_cl(struct drm_device *dev,
1001 void *validated,
1002 void *unvalidated,
1003 struct vc4_exec_info *exec);
1004
1005int
1006vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
1007
1008struct drm_gem_dma_object *vc4_use_bo(struct vc4_exec_info *exec,
1009 uint32_t hindex);
1010
1011int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
1012
1013bool vc4_check_tex_size(struct vc4_exec_info *exec,
1014 struct drm_gem_dma_object *fbo,
1015 uint32_t offset, uint8_t tiling_format,
1016 uint32_t width, uint32_t height, uint8_t cpp);
1017
1018/* vc4_validate_shader.c */
1019struct vc4_validated_shader_info *
1020vc4_validate_shader(struct drm_gem_dma_object *shader_obj);
1021
1022/* vc4_perfmon.c */
1023void vc4_perfmon_get(struct vc4_perfmon *perfmon);
1024void vc4_perfmon_put(struct vc4_perfmon *perfmon);
1025void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
1026void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
1027 bool capture);
1028struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
1029void vc4_perfmon_open_file(struct vc4_file *vc4file);
1030void vc4_perfmon_close_file(struct vc4_file *vc4file);
1031int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
1032 struct drm_file *file_priv);
1033int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
1034 struct drm_file *file_priv);
1035int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
1036 struct drm_file *file_priv);
1037
1038#endif /* _VC4_DRV_H_ */