Loading...
Note: File does not exist in v3.1.
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015 Broadcom
4 */
5#ifndef _VC4_DRV_H_
6#define _VC4_DRV_H_
7
8#include <linux/delay.h>
9#include <linux/refcount.h>
10#include <linux/uaccess.h>
11
12#include <drm/drm_atomic.h>
13#include <drm/drm_debugfs.h>
14#include <drm/drm_device.h>
15#include <drm/drm_encoder.h>
16#include <drm/drm_gem_cma_helper.h>
17#include <drm/drm_managed.h>
18#include <drm/drm_mm.h>
19#include <drm/drm_modeset_lock.h>
20
21#include "uapi/drm/vc4_drm.h"
22
23struct drm_device;
24struct drm_gem_object;
25
26/* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
27 * this.
28 */
29enum vc4_kernel_bo_type {
30 /* Any kernel allocation (gem_create_object hook) before it
31 * gets another type set.
32 */
33 VC4_BO_TYPE_KERNEL,
34 VC4_BO_TYPE_V3D,
35 VC4_BO_TYPE_V3D_SHADER,
36 VC4_BO_TYPE_DUMB,
37 VC4_BO_TYPE_BIN,
38 VC4_BO_TYPE_RCL,
39 VC4_BO_TYPE_BCL,
40 VC4_BO_TYPE_KERNEL_CACHE,
41 VC4_BO_TYPE_COUNT
42};
43
44/* Performance monitor object. The perform lifetime is controlled by userspace
45 * using perfmon related ioctls. A perfmon can be attached to a submit_cl
46 * request, and when this is the case, HW perf counters will be activated just
47 * before the submit_cl is submitted to the GPU and disabled when the job is
48 * done. This way, only events related to a specific job will be counted.
49 */
50struct vc4_perfmon {
51 /* Tracks the number of users of the perfmon, when this counter reaches
52 * zero the perfmon is destroyed.
53 */
54 refcount_t refcnt;
55
56 /* Number of counters activated in this perfmon instance
57 * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
58 */
59 u8 ncounters;
60
61 /* Events counted by the HW perf counters. */
62 u8 events[DRM_VC4_MAX_PERF_COUNTERS];
63
64 /* Storage for counter values. Counters are incremented by the HW
65 * perf counter values every time the perfmon is attached to a GPU job.
66 * This way, perfmon users don't have to retrieve the results after
67 * each job if they want to track events covering several submissions.
68 * Note that counter values can't be reset, but you can fake a reset by
69 * destroying the perfmon and creating a new one.
70 */
71 u64 counters[];
72};
73
74struct vc4_dev {
75 struct drm_device base;
76
77 struct vc4_hvs *hvs;
78 struct vc4_v3d *v3d;
79 struct vc4_dpi *dpi;
80 struct vc4_vec *vec;
81 struct vc4_txp *txp;
82
83 struct vc4_hang_state *hang_state;
84
85 /* The kernel-space BO cache. Tracks buffers that have been
86 * unreferenced by all other users (refcounts of 0!) but not
87 * yet freed, so we can do cheap allocations.
88 */
89 struct vc4_bo_cache {
90 /* Array of list heads for entries in the BO cache,
91 * based on number of pages, so we can do O(1) lookups
92 * in the cache when allocating.
93 */
94 struct list_head *size_list;
95 uint32_t size_list_size;
96
97 /* List of all BOs in the cache, ordered by age, so we
98 * can do O(1) lookups when trying to free old
99 * buffers.
100 */
101 struct list_head time_list;
102 struct work_struct time_work;
103 struct timer_list time_timer;
104 } bo_cache;
105
106 u32 num_labels;
107 struct vc4_label {
108 const char *name;
109 u32 num_allocated;
110 u32 size_allocated;
111 } *bo_labels;
112
113 /* Protects bo_cache and bo_labels. */
114 struct mutex bo_lock;
115
116 /* Purgeable BO pool. All BOs in this pool can have their memory
117 * reclaimed if the driver is unable to allocate new BOs. We also
118 * keep stats related to the purge mechanism here.
119 */
120 struct {
121 struct list_head list;
122 unsigned int num;
123 size_t size;
124 unsigned int purged_num;
125 size_t purged_size;
126 struct mutex lock;
127 } purgeable;
128
129 uint64_t dma_fence_context;
130
131 /* Sequence number for the last job queued in bin_job_list.
132 * Starts at 0 (no jobs emitted).
133 */
134 uint64_t emit_seqno;
135
136 /* Sequence number for the last completed job on the GPU.
137 * Starts at 0 (no jobs completed).
138 */
139 uint64_t finished_seqno;
140
141 /* List of all struct vc4_exec_info for jobs to be executed in
142 * the binner. The first job in the list is the one currently
143 * programmed into ct0ca for execution.
144 */
145 struct list_head bin_job_list;
146
147 /* List of all struct vc4_exec_info for jobs that have
148 * completed binning and are ready for rendering. The first
149 * job in the list is the one currently programmed into ct1ca
150 * for execution.
151 */
152 struct list_head render_job_list;
153
154 /* List of the finished vc4_exec_infos waiting to be freed by
155 * job_done_work.
156 */
157 struct list_head job_done_list;
158 /* Spinlock used to synchronize the job_list and seqno
159 * accesses between the IRQ handler and GEM ioctls.
160 */
161 spinlock_t job_lock;
162 wait_queue_head_t job_wait_queue;
163 struct work_struct job_done_work;
164
165 /* Used to track the active perfmon if any. Access to this field is
166 * protected by job_lock.
167 */
168 struct vc4_perfmon *active_perfmon;
169
170 /* List of struct vc4_seqno_cb for callbacks to be made from a
171 * workqueue when the given seqno is passed.
172 */
173 struct list_head seqno_cb_list;
174
175 /* The memory used for storing binner tile alloc, tile state,
176 * and overflow memory allocations. This is freed when V3D
177 * powers down.
178 */
179 struct vc4_bo *bin_bo;
180
181 /* Size of blocks allocated within bin_bo. */
182 uint32_t bin_alloc_size;
183
184 /* Bitmask of the bin_alloc_size chunks in bin_bo that are
185 * used.
186 */
187 uint32_t bin_alloc_used;
188
189 /* Bitmask of the current bin_alloc used for overflow memory. */
190 uint32_t bin_alloc_overflow;
191
192 /* Incremented when an underrun error happened after an atomic commit.
193 * This is particularly useful to detect when a specific modeset is too
194 * demanding in term of memory or HVS bandwidth which is hard to guess
195 * at atomic check time.
196 */
197 atomic_t underrun;
198
199 struct work_struct overflow_mem_work;
200
201 int power_refcount;
202
203 /* Set to true when the load tracker is supported. */
204 bool load_tracker_available;
205
206 /* Set to true when the load tracker is active. */
207 bool load_tracker_enabled;
208
209 /* Mutex controlling the power refcount. */
210 struct mutex power_lock;
211
212 struct {
213 struct timer_list timer;
214 struct work_struct reset_work;
215 } hangcheck;
216
217 struct drm_modeset_lock ctm_state_lock;
218 struct drm_private_obj ctm_manager;
219 struct drm_private_obj hvs_channels;
220 struct drm_private_obj load_tracker;
221
222 /* List of vc4_debugfs_info_entry for adding to debugfs once
223 * the minor is available (after drm_dev_register()).
224 */
225 struct list_head debugfs_list;
226
227 /* Mutex for binner bo allocation. */
228 struct mutex bin_bo_lock;
229 /* Reference count for our binner bo. */
230 struct kref bin_bo_kref;
231};
232
233static inline struct vc4_dev *
234to_vc4_dev(struct drm_device *dev)
235{
236 return container_of(dev, struct vc4_dev, base);
237}
238
239struct vc4_bo {
240 struct drm_gem_cma_object base;
241
242 /* seqno of the last job to render using this BO. */
243 uint64_t seqno;
244
245 /* seqno of the last job to use the RCL to write to this BO.
246 *
247 * Note that this doesn't include binner overflow memory
248 * writes.
249 */
250 uint64_t write_seqno;
251
252 bool t_format;
253
254 /* List entry for the BO's position in either
255 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
256 */
257 struct list_head unref_head;
258
259 /* Time in jiffies when the BO was put in vc4->bo_cache. */
260 unsigned long free_time;
261
262 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
263 struct list_head size_head;
264
265 /* Struct for shader validation state, if created by
266 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
267 */
268 struct vc4_validated_shader_info *validated_shader;
269
270 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
271 * for user-allocated labels.
272 */
273 int label;
274
275 /* Count the number of active users. This is needed to determine
276 * whether we can move the BO to the purgeable list or not (when the BO
277 * is used by the GPU or the display engine we can't purge it).
278 */
279 refcount_t usecnt;
280
281 /* Store purgeable/purged state here */
282 u32 madv;
283 struct mutex madv_lock;
284};
285
286static inline struct vc4_bo *
287to_vc4_bo(struct drm_gem_object *bo)
288{
289 return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base);
290}
291
292struct vc4_fence {
293 struct dma_fence base;
294 struct drm_device *dev;
295 /* vc4 seqno for signaled() test */
296 uint64_t seqno;
297};
298
299static inline struct vc4_fence *
300to_vc4_fence(struct dma_fence *fence)
301{
302 return container_of(fence, struct vc4_fence, base);
303}
304
305struct vc4_seqno_cb {
306 struct work_struct work;
307 uint64_t seqno;
308 void (*func)(struct vc4_seqno_cb *cb);
309};
310
311struct vc4_v3d {
312 struct vc4_dev *vc4;
313 struct platform_device *pdev;
314 void __iomem *regs;
315 struct clk *clk;
316 struct debugfs_regset32 regset;
317};
318
319struct vc4_hvs {
320 struct platform_device *pdev;
321 void __iomem *regs;
322 u32 __iomem *dlist;
323
324 struct clk *core_clk;
325
326 /* Memory manager for CRTCs to allocate space in the display
327 * list. Units are dwords.
328 */
329 struct drm_mm dlist_mm;
330 /* Memory manager for the LBM memory used by HVS scaling. */
331 struct drm_mm lbm_mm;
332 spinlock_t mm_lock;
333
334 struct drm_mm_node mitchell_netravali_filter;
335
336 struct debugfs_regset32 regset;
337
338 /* HVS version 5 flag, therefore requires updated dlist structures */
339 bool hvs5;
340};
341
342struct vc4_plane {
343 struct drm_plane base;
344};
345
346static inline struct vc4_plane *
347to_vc4_plane(struct drm_plane *plane)
348{
349 return container_of(plane, struct vc4_plane, base);
350}
351
352enum vc4_scaling_mode {
353 VC4_SCALING_NONE,
354 VC4_SCALING_TPZ,
355 VC4_SCALING_PPF,
356};
357
358struct vc4_plane_state {
359 struct drm_plane_state base;
360 /* System memory copy of the display list for this element, computed
361 * at atomic_check time.
362 */
363 u32 *dlist;
364 u32 dlist_size; /* Number of dwords allocated for the display list */
365 u32 dlist_count; /* Number of used dwords in the display list. */
366
367 /* Offset in the dlist to various words, for pageflip or
368 * cursor updates.
369 */
370 u32 pos0_offset;
371 u32 pos2_offset;
372 u32 ptr0_offset;
373 u32 lbm_offset;
374
375 /* Offset where the plane's dlist was last stored in the
376 * hardware at vc4_crtc_atomic_flush() time.
377 */
378 u32 __iomem *hw_dlist;
379
380 /* Clipped coordinates of the plane on the display. */
381 int crtc_x, crtc_y, crtc_w, crtc_h;
382 /* Clipped area being scanned from in the FB. */
383 u32 src_x, src_y;
384
385 u32 src_w[2], src_h[2];
386
387 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
388 enum vc4_scaling_mode x_scaling[2], y_scaling[2];
389 bool is_unity;
390 bool is_yuv;
391
392 /* Offset to start scanning out from the start of the plane's
393 * BO.
394 */
395 u32 offsets[3];
396
397 /* Our allocation in LBM for temporary storage during scaling. */
398 struct drm_mm_node lbm;
399
400 /* Set when the plane has per-pixel alpha content or does not cover
401 * the entire screen. This is a hint to the CRTC that it might need
402 * to enable background color fill.
403 */
404 bool needs_bg_fill;
405
406 /* Mark the dlist as initialized. Useful to avoid initializing it twice
407 * when async update is not possible.
408 */
409 bool dlist_initialized;
410
411 /* Load of this plane on the HVS block. The load is expressed in HVS
412 * cycles/sec.
413 */
414 u64 hvs_load;
415
416 /* Memory bandwidth needed for this plane. This is expressed in
417 * bytes/sec.
418 */
419 u64 membus_load;
420};
421
422static inline struct vc4_plane_state *
423to_vc4_plane_state(struct drm_plane_state *state)
424{
425 return container_of(state, struct vc4_plane_state, base);
426}
427
428enum vc4_encoder_type {
429 VC4_ENCODER_TYPE_NONE,
430 VC4_ENCODER_TYPE_HDMI0,
431 VC4_ENCODER_TYPE_HDMI1,
432 VC4_ENCODER_TYPE_VEC,
433 VC4_ENCODER_TYPE_DSI0,
434 VC4_ENCODER_TYPE_DSI1,
435 VC4_ENCODER_TYPE_SMI,
436 VC4_ENCODER_TYPE_DPI,
437};
438
439struct vc4_encoder {
440 struct drm_encoder base;
441 enum vc4_encoder_type type;
442 u32 clock_select;
443
444 void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state);
445 void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
446 void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
447
448 void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
449 void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state);
450};
451
452static inline struct vc4_encoder *
453to_vc4_encoder(struct drm_encoder *encoder)
454{
455 return container_of(encoder, struct vc4_encoder, base);
456}
457
458struct vc4_crtc_data {
459 /* Bitmask of channels (FIFOs) of the HVS that the output can source from */
460 unsigned int hvs_available_channels;
461
462 /* Which output of the HVS this pixelvalve sources from. */
463 int hvs_output;
464};
465
466struct vc4_pv_data {
467 struct vc4_crtc_data base;
468
469 /* Depth of the PixelValve FIFO in bytes */
470 unsigned int fifo_depth;
471
472 /* Number of pixels output per clock period */
473 u8 pixels_per_clock;
474
475 enum vc4_encoder_type encoder_types[4];
476 const char *debugfs_name;
477
478};
479
480struct vc4_crtc {
481 struct drm_crtc base;
482 struct platform_device *pdev;
483 const struct vc4_crtc_data *data;
484 void __iomem *regs;
485
486 /* Timestamp at start of vblank irq - unaffected by lock delays. */
487 ktime_t t_vblank;
488
489 u8 lut_r[256];
490 u8 lut_g[256];
491 u8 lut_b[256];
492
493 struct drm_pending_vblank_event *event;
494
495 struct debugfs_regset32 regset;
496};
497
498static inline struct vc4_crtc *
499to_vc4_crtc(struct drm_crtc *crtc)
500{
501 return container_of(crtc, struct vc4_crtc, base);
502}
503
504static inline const struct vc4_crtc_data *
505vc4_crtc_to_vc4_crtc_data(const struct vc4_crtc *crtc)
506{
507 return crtc->data;
508}
509
510static inline const struct vc4_pv_data *
511vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc)
512{
513 const struct vc4_crtc_data *data = vc4_crtc_to_vc4_crtc_data(crtc);
514
515 return container_of(data, struct vc4_pv_data, base);
516}
517
518struct vc4_crtc_state {
519 struct drm_crtc_state base;
520 /* Dlist area for this CRTC configuration. */
521 struct drm_mm_node mm;
522 bool feed_txp;
523 bool txp_armed;
524 unsigned int assigned_channel;
525
526 struct {
527 unsigned int left;
528 unsigned int right;
529 unsigned int top;
530 unsigned int bottom;
531 } margins;
532
533 /* Transitional state below, only valid during atomic commits */
534 bool update_muxing;
535};
536
537#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
538
539static inline struct vc4_crtc_state *
540to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
541{
542 return container_of(crtc_state, struct vc4_crtc_state, base);
543}
544
545#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
546#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
547#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
548#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
549
550#define VC4_REG32(reg) { .name = #reg, .offset = reg }
551
552struct vc4_exec_info {
553 /* Sequence number for this bin/render job. */
554 uint64_t seqno;
555
556 /* Latest write_seqno of any BO that binning depends on. */
557 uint64_t bin_dep_seqno;
558
559 struct dma_fence *fence;
560
561 /* Last current addresses the hardware was processing when the
562 * hangcheck timer checked on us.
563 */
564 uint32_t last_ct0ca, last_ct1ca;
565
566 /* Kernel-space copy of the ioctl arguments */
567 struct drm_vc4_submit_cl *args;
568
569 /* This is the array of BOs that were looked up at the start of exec.
570 * Command validation will use indices into this array.
571 */
572 struct drm_gem_cma_object **bo;
573 uint32_t bo_count;
574
575 /* List of BOs that are being written by the RCL. Other than
576 * the binner temporary storage, this is all the BOs written
577 * by the job.
578 */
579 struct drm_gem_cma_object *rcl_write_bo[4];
580 uint32_t rcl_write_bo_count;
581
582 /* Pointers for our position in vc4->job_list */
583 struct list_head head;
584
585 /* List of other BOs used in the job that need to be released
586 * once the job is complete.
587 */
588 struct list_head unref_list;
589
590 /* Current unvalidated indices into @bo loaded by the non-hardware
591 * VC4_PACKET_GEM_HANDLES.
592 */
593 uint32_t bo_index[2];
594
595 /* This is the BO where we store the validated command lists, shader
596 * records, and uniforms.
597 */
598 struct drm_gem_cma_object *exec_bo;
599
600 /**
601 * This tracks the per-shader-record state (packet 64) that
602 * determines the length of the shader record and the offset
603 * it's expected to be found at. It gets read in from the
604 * command lists.
605 */
606 struct vc4_shader_state {
607 uint32_t addr;
608 /* Maximum vertex index referenced by any primitive using this
609 * shader state.
610 */
611 uint32_t max_index;
612 } *shader_state;
613
614 /** How many shader states the user declared they were using. */
615 uint32_t shader_state_size;
616 /** How many shader state records the validator has seen. */
617 uint32_t shader_state_count;
618
619 bool found_tile_binning_mode_config_packet;
620 bool found_start_tile_binning_packet;
621 bool found_increment_semaphore_packet;
622 bool found_flush;
623 uint8_t bin_tiles_x, bin_tiles_y;
624 /* Physical address of the start of the tile alloc array
625 * (where each tile's binned CL will start)
626 */
627 uint32_t tile_alloc_offset;
628 /* Bitmask of which binner slots are freed when this job completes. */
629 uint32_t bin_slots;
630
631 /**
632 * Computed addresses pointing into exec_bo where we start the
633 * bin thread (ct0) and render thread (ct1).
634 */
635 uint32_t ct0ca, ct0ea;
636 uint32_t ct1ca, ct1ea;
637
638 /* Pointer to the unvalidated bin CL (if present). */
639 void *bin_u;
640
641 /* Pointers to the shader recs. These paddr gets incremented as CL
642 * packets are relocated in validate_gl_shader_state, and the vaddrs
643 * (u and v) get incremented and size decremented as the shader recs
644 * themselves are validated.
645 */
646 void *shader_rec_u;
647 void *shader_rec_v;
648 uint32_t shader_rec_p;
649 uint32_t shader_rec_size;
650
651 /* Pointers to the uniform data. These pointers are incremented, and
652 * size decremented, as each batch of uniforms is uploaded.
653 */
654 void *uniforms_u;
655 void *uniforms_v;
656 uint32_t uniforms_p;
657 uint32_t uniforms_size;
658
659 /* Pointer to a performance monitor object if the user requested it,
660 * NULL otherwise.
661 */
662 struct vc4_perfmon *perfmon;
663
664 /* Whether the exec has taken a reference to the binner BO, which should
665 * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet.
666 */
667 bool bin_bo_used;
668};
669
670/* Per-open file private data. Any driver-specific resource that has to be
671 * released when the DRM file is closed should be placed here.
672 */
673struct vc4_file {
674 struct {
675 struct idr idr;
676 struct mutex lock;
677 } perfmon;
678
679 bool bin_bo_used;
680};
681
682static inline struct vc4_exec_info *
683vc4_first_bin_job(struct vc4_dev *vc4)
684{
685 return list_first_entry_or_null(&vc4->bin_job_list,
686 struct vc4_exec_info, head);
687}
688
689static inline struct vc4_exec_info *
690vc4_first_render_job(struct vc4_dev *vc4)
691{
692 return list_first_entry_or_null(&vc4->render_job_list,
693 struct vc4_exec_info, head);
694}
695
696static inline struct vc4_exec_info *
697vc4_last_render_job(struct vc4_dev *vc4)
698{
699 if (list_empty(&vc4->render_job_list))
700 return NULL;
701 return list_last_entry(&vc4->render_job_list,
702 struct vc4_exec_info, head);
703}
704
705/**
706 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
707 * setup parameters.
708 *
709 * This will be used at draw time to relocate the reference to the texture
710 * contents in p0, and validate that the offset combined with
711 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
712 * Note that the hardware treats unprovided config parameters as 0, so not all
713 * of them need to be set up for every texure sample, and we'll store ~0 as
714 * the offset to mark the unused ones.
715 *
716 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
717 * Setup") for definitions of the texture parameters.
718 */
719struct vc4_texture_sample_info {
720 bool is_direct;
721 uint32_t p_offset[4];
722};
723
724/**
725 * struct vc4_validated_shader_info - information about validated shaders that
726 * needs to be used from command list validation.
727 *
728 * For a given shader, each time a shader state record references it, we need
729 * to verify that the shader doesn't read more uniforms than the shader state
730 * record's uniform BO pointer can provide, and we need to apply relocations
731 * and validate the shader state record's uniforms that define the texture
732 * samples.
733 */
734struct vc4_validated_shader_info {
735 uint32_t uniforms_size;
736 uint32_t uniforms_src_size;
737 uint32_t num_texture_samples;
738 struct vc4_texture_sample_info *texture_samples;
739
740 uint32_t num_uniform_addr_offsets;
741 uint32_t *uniform_addr_offsets;
742
743 bool is_threaded;
744};
745
746/**
747 * __wait_for - magic wait macro
748 *
749 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
750 * important that we check the condition again after having timed out, since the
751 * timeout could be due to preemption or similar and we've never had a chance to
752 * check the condition before the timeout.
753 */
754#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
755 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
756 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
757 int ret__; \
758 might_sleep(); \
759 for (;;) { \
760 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
761 OP; \
762 /* Guarantee COND check prior to timeout */ \
763 barrier(); \
764 if (COND) { \
765 ret__ = 0; \
766 break; \
767 } \
768 if (expired__) { \
769 ret__ = -ETIMEDOUT; \
770 break; \
771 } \
772 usleep_range(wait__, wait__ * 2); \
773 if (wait__ < (Wmax)) \
774 wait__ <<= 1; \
775 } \
776 ret__; \
777})
778
779#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
780 (Wmax))
781#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
782
783/* vc4_bo.c */
784struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
785struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
786 bool from_cache, enum vc4_kernel_bo_type type);
787int vc4_dumb_create(struct drm_file *file_priv,
788 struct drm_device *dev,
789 struct drm_mode_create_dumb *args);
790int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
791 struct drm_file *file_priv);
792int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
793 struct drm_file *file_priv);
794int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
795 struct drm_file *file_priv);
796int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
797 struct drm_file *file_priv);
798int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
799 struct drm_file *file_priv);
800int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
801 struct drm_file *file_priv);
802int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
803 struct drm_file *file_priv);
804int vc4_bo_cache_init(struct drm_device *dev);
805int vc4_bo_inc_usecnt(struct vc4_bo *bo);
806void vc4_bo_dec_usecnt(struct vc4_bo *bo);
807void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
808void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
809
810/* vc4_crtc.c */
811extern struct platform_driver vc4_crtc_driver;
812int vc4_crtc_disable_at_boot(struct drm_crtc *crtc);
813int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
814 const struct drm_crtc_funcs *crtc_funcs,
815 const struct drm_crtc_helper_funcs *crtc_helper_funcs);
816void vc4_crtc_destroy(struct drm_crtc *crtc);
817int vc4_page_flip(struct drm_crtc *crtc,
818 struct drm_framebuffer *fb,
819 struct drm_pending_vblank_event *event,
820 uint32_t flags,
821 struct drm_modeset_acquire_ctx *ctx);
822struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc);
823void vc4_crtc_destroy_state(struct drm_crtc *crtc,
824 struct drm_crtc_state *state);
825void vc4_crtc_reset(struct drm_crtc *crtc);
826void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
827void vc4_crtc_get_margins(struct drm_crtc_state *state,
828 unsigned int *left, unsigned int *right,
829 unsigned int *top, unsigned int *bottom);
830
831/* vc4_debugfs.c */
832void vc4_debugfs_init(struct drm_minor *minor);
833#ifdef CONFIG_DEBUG_FS
834void vc4_debugfs_add_file(struct drm_device *drm,
835 const char *filename,
836 int (*show)(struct seq_file*, void*),
837 void *data);
838void vc4_debugfs_add_regset32(struct drm_device *drm,
839 const char *filename,
840 struct debugfs_regset32 *regset);
841#else
842static inline void vc4_debugfs_add_file(struct drm_device *drm,
843 const char *filename,
844 int (*show)(struct seq_file*, void*),
845 void *data)
846{
847}
848
849static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
850 const char *filename,
851 struct debugfs_regset32 *regset)
852{
853}
854#endif
855
856/* vc4_drv.c */
857void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
858
859/* vc4_dpi.c */
860extern struct platform_driver vc4_dpi_driver;
861
862/* vc4_dsi.c */
863extern struct platform_driver vc4_dsi_driver;
864
865/* vc4_fence.c */
866extern const struct dma_fence_ops vc4_fence_ops;
867
868/* vc4_gem.c */
869int vc4_gem_init(struct drm_device *dev);
870int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
871 struct drm_file *file_priv);
872int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
873 struct drm_file *file_priv);
874int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
875 struct drm_file *file_priv);
876void vc4_submit_next_bin_job(struct drm_device *dev);
877void vc4_submit_next_render_job(struct drm_device *dev);
878void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
879int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
880 uint64_t timeout_ns, bool interruptible);
881void vc4_job_handle_completed(struct vc4_dev *vc4);
882int vc4_queue_seqno_cb(struct drm_device *dev,
883 struct vc4_seqno_cb *cb, uint64_t seqno,
884 void (*func)(struct vc4_seqno_cb *cb));
885int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
886 struct drm_file *file_priv);
887
888/* vc4_hdmi.c */
889extern struct platform_driver vc4_hdmi_driver;
890
891/* vc4_vec.c */
892extern struct platform_driver vc4_vec_driver;
893
894/* vc4_txp.c */
895extern struct platform_driver vc4_txp_driver;
896
897/* vc4_irq.c */
898irqreturn_t vc4_irq(int irq, void *arg);
899void vc4_irq_preinstall(struct drm_device *dev);
900int vc4_irq_postinstall(struct drm_device *dev);
901void vc4_irq_uninstall(struct drm_device *dev);
902void vc4_irq_reset(struct drm_device *dev);
903
904/* vc4_hvs.c */
905extern struct platform_driver vc4_hvs_driver;
906void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output);
907int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output);
908int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
909void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
910void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state);
911void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
912void vc4_hvs_dump_state(struct drm_device *dev);
913void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
914void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
915
916/* vc4_kms.c */
917int vc4_kms_load(struct drm_device *dev);
918
919/* vc4_plane.c */
920struct drm_plane *vc4_plane_init(struct drm_device *dev,
921 enum drm_plane_type type);
922int vc4_plane_create_additional_planes(struct drm_device *dev);
923u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
924u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
925void vc4_plane_async_set_fb(struct drm_plane *plane,
926 struct drm_framebuffer *fb);
927
928/* vc4_v3d.c */
929extern struct platform_driver vc4_v3d_driver;
930extern const struct of_device_id vc4_v3d_dt_match[];
931int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
932int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used);
933void vc4_v3d_bin_bo_put(struct vc4_dev *vc4);
934int vc4_v3d_pm_get(struct vc4_dev *vc4);
935void vc4_v3d_pm_put(struct vc4_dev *vc4);
936
937/* vc4_validate.c */
938int
939vc4_validate_bin_cl(struct drm_device *dev,
940 void *validated,
941 void *unvalidated,
942 struct vc4_exec_info *exec);
943
944int
945vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
946
947struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
948 uint32_t hindex);
949
950int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
951
952bool vc4_check_tex_size(struct vc4_exec_info *exec,
953 struct drm_gem_cma_object *fbo,
954 uint32_t offset, uint8_t tiling_format,
955 uint32_t width, uint32_t height, uint8_t cpp);
956
957/* vc4_validate_shader.c */
958struct vc4_validated_shader_info *
959vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
960
961/* vc4_perfmon.c */
962void vc4_perfmon_get(struct vc4_perfmon *perfmon);
963void vc4_perfmon_put(struct vc4_perfmon *perfmon);
964void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
965void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
966 bool capture);
967struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
968void vc4_perfmon_open_file(struct vc4_file *vc4file);
969void vc4_perfmon_close_file(struct vc4_file *vc4file);
970int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
971 struct drm_file *file_priv);
972int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
973 struct drm_file *file_priv);
974int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
975 struct drm_file *file_priv);
976
977#endif /* _VC4_DRV_H_ */