Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (c) 2012-2015, NVIDIA Corporation.
4 */
5
6#ifndef HOST1X_DEV_H
7#define HOST1X_DEV_H
8
9#include <linux/device.h>
10#include <linux/iommu.h>
11#include <linux/iova.h>
12#include <linux/irqreturn.h>
13#include <linux/platform_device.h>
14#include <linux/reset.h>
15
16#include "cdma.h"
17#include "channel.h"
18#include "context.h"
19#include "intr.h"
20#include "job.h"
21#include "syncpt.h"
22
23struct host1x_syncpt;
24struct host1x_syncpt_base;
25struct host1x_channel;
26struct host1x_cdma;
27struct host1x_job;
28struct push_buffer;
29struct output;
30struct dentry;
31
32struct host1x_channel_ops {
33 int (*init)(struct host1x_channel *channel, struct host1x *host,
34 unsigned int id);
35 int (*submit)(struct host1x_job *job);
36};
37
38struct host1x_cdma_ops {
39 void (*start)(struct host1x_cdma *cdma);
40 void (*stop)(struct host1x_cdma *cdma);
41 void (*flush)(struct host1x_cdma *cdma);
42 int (*timeout_init)(struct host1x_cdma *cdma);
43 void (*timeout_destroy)(struct host1x_cdma *cdma);
44 void (*freeze)(struct host1x_cdma *cdma);
45 void (*resume)(struct host1x_cdma *cdma, u32 getptr);
46 void (*timeout_cpu_incr)(struct host1x_cdma *cdma, u32 getptr,
47 u32 syncpt_incrs, u32 syncval, u32 nr_slots);
48};
49
50struct host1x_pushbuffer_ops {
51 void (*init)(struct push_buffer *pb);
52};
53
54struct host1x_debug_ops {
55 void (*debug_init)(struct dentry *de);
56 void (*show_channel_cdma)(struct host1x *host,
57 struct host1x_channel *ch,
58 struct output *o);
59 void (*show_channel_fifo)(struct host1x *host,
60 struct host1x_channel *ch,
61 struct output *o);
62 void (*show_mlocks)(struct host1x *host, struct output *output);
63
64};
65
66struct host1x_syncpt_ops {
67 void (*restore)(struct host1x_syncpt *syncpt);
68 void (*restore_wait_base)(struct host1x_syncpt *syncpt);
69 void (*load_wait_base)(struct host1x_syncpt *syncpt);
70 u32 (*load)(struct host1x_syncpt *syncpt);
71 int (*cpu_incr)(struct host1x_syncpt *syncpt);
72 void (*assign_to_channel)(struct host1x_syncpt *syncpt,
73 struct host1x_channel *channel);
74 void (*enable_protection)(struct host1x *host);
75};
76
77struct host1x_intr_ops {
78 int (*init_host_sync)(struct host1x *host, u32 cpm);
79 void (*set_syncpt_threshold)(
80 struct host1x *host, unsigned int id, u32 thresh);
81 void (*enable_syncpt_intr)(struct host1x *host, unsigned int id);
82 void (*disable_syncpt_intr)(struct host1x *host, unsigned int id);
83 void (*disable_all_syncpt_intrs)(struct host1x *host);
84 int (*free_syncpt_irq)(struct host1x *host);
85 irqreturn_t (*isr)(int irq, void *dev_id);
86};
87
88struct host1x_sid_entry {
89 unsigned int base;
90 unsigned int offset;
91 unsigned int limit;
92};
93
94struct host1x_table_desc {
95 unsigned int base;
96 unsigned int count;
97};
98
99struct host1x_info {
100 unsigned int nb_channels; /* host1x: number of channels supported */
101 unsigned int nb_pts; /* host1x: number of syncpoints supported */
102 unsigned int nb_bases; /* host1x: number of syncpoint bases supported */
103 unsigned int nb_mlocks; /* host1x: number of mlocks supported */
104 int (*init)(struct host1x *host1x); /* initialize per SoC ops */
105 unsigned int sync_offset; /* offset of syncpoint registers */
106 u64 dma_mask; /* mask of addressable memory */
107 bool has_wide_gather; /* supports GATHER_W opcode */
108 bool has_hypervisor; /* has hypervisor registers */
109 bool has_common; /* has common registers separate from hypervisor */
110 unsigned int num_sid_entries;
111 const struct host1x_sid_entry *sid_table;
112 struct host1x_table_desc streamid_vm_table;
113 struct host1x_table_desc classid_vm_table;
114 struct host1x_table_desc mmio_vm_table;
115 /*
116 * On T20-T148, the boot chain may setup DC to increment syncpoints
117 * 26/27 on VBLANK. As such we cannot use these syncpoints until
118 * the display driver disables VBLANK increments.
119 */
120 bool reserve_vblank_syncpts;
121 /*
122 * On Tegra186, secure world applications may require access to
123 * host1x during suspend/resume. To allow this, we need to leave
124 * host1x not in reset.
125 */
126 bool skip_reset_assert;
127};
128
129struct host1x {
130 const struct host1x_info *info;
131
132 void __iomem *regs;
133 void __iomem *hv_regs; /* hypervisor region */
134 void __iomem *common_regs;
135 int syncpt_irqs[8];
136 int num_syncpt_irqs;
137 struct host1x_syncpt *syncpt;
138 struct host1x_syncpt_base *bases;
139 struct device *dev;
140 struct clk *clk;
141 struct reset_control_bulk_data resets[2];
142 unsigned int nresets;
143
144 struct iommu_group *group;
145 struct iommu_domain *domain;
146 struct iova_domain iova;
147 dma_addr_t iova_end;
148
149 struct mutex intr_mutex;
150
151 const struct host1x_syncpt_ops *syncpt_op;
152 const struct host1x_intr_ops *intr_op;
153 const struct host1x_channel_ops *channel_op;
154 const struct host1x_cdma_ops *cdma_op;
155 const struct host1x_pushbuffer_ops *cdma_pb_op;
156 const struct host1x_debug_ops *debug_op;
157
158 struct host1x_syncpt *nop_sp;
159
160 struct mutex syncpt_mutex;
161
162 struct host1x_channel_list channel_list;
163 struct host1x_memory_context_list context_list;
164
165 struct dentry *debugfs;
166
167 struct mutex devices_lock;
168 struct list_head devices;
169
170 struct list_head list;
171
172 struct device_dma_parameters dma_parms;
173
174 struct host1x_bo_cache cache;
175};
176
177void host1x_common_writel(struct host1x *host1x, u32 v, u32 r);
178void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r);
179u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r);
180void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r);
181u32 host1x_sync_readl(struct host1x *host1x, u32 r);
182void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r);
183u32 host1x_ch_readl(struct host1x_channel *ch, u32 r);
184
185static inline void host1x_hw_syncpt_restore(struct host1x *host,
186 struct host1x_syncpt *sp)
187{
188 host->syncpt_op->restore(sp);
189}
190
191static inline void host1x_hw_syncpt_restore_wait_base(struct host1x *host,
192 struct host1x_syncpt *sp)
193{
194 host->syncpt_op->restore_wait_base(sp);
195}
196
197static inline void host1x_hw_syncpt_load_wait_base(struct host1x *host,
198 struct host1x_syncpt *sp)
199{
200 host->syncpt_op->load_wait_base(sp);
201}
202
203static inline u32 host1x_hw_syncpt_load(struct host1x *host,
204 struct host1x_syncpt *sp)
205{
206 return host->syncpt_op->load(sp);
207}
208
209static inline int host1x_hw_syncpt_cpu_incr(struct host1x *host,
210 struct host1x_syncpt *sp)
211{
212 return host->syncpt_op->cpu_incr(sp);
213}
214
215static inline void host1x_hw_syncpt_assign_to_channel(
216 struct host1x *host, struct host1x_syncpt *sp,
217 struct host1x_channel *ch)
218{
219 return host->syncpt_op->assign_to_channel(sp, ch);
220}
221
222static inline void host1x_hw_syncpt_enable_protection(struct host1x *host)
223{
224 return host->syncpt_op->enable_protection(host);
225}
226
227static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm)
228{
229 return host->intr_op->init_host_sync(host, cpm);
230}
231
232static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
233 unsigned int id,
234 u32 thresh)
235{
236 host->intr_op->set_syncpt_threshold(host, id, thresh);
237}
238
239static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
240 unsigned int id)
241{
242 host->intr_op->enable_syncpt_intr(host, id);
243}
244
245static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
246 unsigned int id)
247{
248 host->intr_op->disable_syncpt_intr(host, id);
249}
250
251static inline void host1x_hw_intr_disable_all_syncpt_intrs(struct host1x *host)
252{
253 host->intr_op->disable_all_syncpt_intrs(host);
254}
255
256static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
257{
258 return host->intr_op->free_syncpt_irq(host);
259}
260
261static inline int host1x_hw_channel_init(struct host1x *host,
262 struct host1x_channel *channel,
263 unsigned int id)
264{
265 return host->channel_op->init(channel, host, id);
266}
267
268static inline int host1x_hw_channel_submit(struct host1x *host,
269 struct host1x_job *job)
270{
271 return host->channel_op->submit(job);
272}
273
274static inline void host1x_hw_cdma_start(struct host1x *host,
275 struct host1x_cdma *cdma)
276{
277 host->cdma_op->start(cdma);
278}
279
280static inline void host1x_hw_cdma_stop(struct host1x *host,
281 struct host1x_cdma *cdma)
282{
283 host->cdma_op->stop(cdma);
284}
285
286static inline void host1x_hw_cdma_flush(struct host1x *host,
287 struct host1x_cdma *cdma)
288{
289 host->cdma_op->flush(cdma);
290}
291
292static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
293 struct host1x_cdma *cdma)
294{
295 return host->cdma_op->timeout_init(cdma);
296}
297
298static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
299 struct host1x_cdma *cdma)
300{
301 host->cdma_op->timeout_destroy(cdma);
302}
303
304static inline void host1x_hw_cdma_freeze(struct host1x *host,
305 struct host1x_cdma *cdma)
306{
307 host->cdma_op->freeze(cdma);
308}
309
310static inline void host1x_hw_cdma_resume(struct host1x *host,
311 struct host1x_cdma *cdma, u32 getptr)
312{
313 host->cdma_op->resume(cdma, getptr);
314}
315
316static inline void host1x_hw_cdma_timeout_cpu_incr(struct host1x *host,
317 struct host1x_cdma *cdma,
318 u32 getptr,
319 u32 syncpt_incrs,
320 u32 syncval, u32 nr_slots)
321{
322 host->cdma_op->timeout_cpu_incr(cdma, getptr, syncpt_incrs, syncval,
323 nr_slots);
324}
325
326static inline void host1x_hw_pushbuffer_init(struct host1x *host,
327 struct push_buffer *pb)
328{
329 host->cdma_pb_op->init(pb);
330}
331
332static inline void host1x_hw_debug_init(struct host1x *host, struct dentry *de)
333{
334 if (host->debug_op && host->debug_op->debug_init)
335 host->debug_op->debug_init(de);
336}
337
338static inline void host1x_hw_show_channel_cdma(struct host1x *host,
339 struct host1x_channel *channel,
340 struct output *o)
341{
342 host->debug_op->show_channel_cdma(host, channel, o);
343}
344
345static inline void host1x_hw_show_channel_fifo(struct host1x *host,
346 struct host1x_channel *channel,
347 struct output *o)
348{
349 host->debug_op->show_channel_fifo(host, channel, o);
350}
351
352static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
353{
354 host->debug_op->show_mlocks(host, o);
355}
356
357extern struct platform_driver tegra_mipi_driver;
358
359#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (c) 2012-2015, NVIDIA Corporation.
4 */
5
6#ifndef HOST1X_DEV_H
7#define HOST1X_DEV_H
8
9#include <linux/device.h>
10#include <linux/iommu.h>
11#include <linux/iova.h>
12#include <linux/platform_device.h>
13#include <linux/reset.h>
14
15#include "cdma.h"
16#include "channel.h"
17#include "intr.h"
18#include "job.h"
19#include "syncpt.h"
20
21struct host1x_syncpt;
22struct host1x_syncpt_base;
23struct host1x_channel;
24struct host1x_cdma;
25struct host1x_job;
26struct push_buffer;
27struct output;
28struct dentry;
29
30struct host1x_channel_ops {
31 int (*init)(struct host1x_channel *channel, struct host1x *host,
32 unsigned int id);
33 int (*submit)(struct host1x_job *job);
34};
35
36struct host1x_cdma_ops {
37 void (*start)(struct host1x_cdma *cdma);
38 void (*stop)(struct host1x_cdma *cdma);
39 void (*flush)(struct host1x_cdma *cdma);
40 int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt);
41 void (*timeout_destroy)(struct host1x_cdma *cdma);
42 void (*freeze)(struct host1x_cdma *cdma);
43 void (*resume)(struct host1x_cdma *cdma, u32 getptr);
44 void (*timeout_cpu_incr)(struct host1x_cdma *cdma, u32 getptr,
45 u32 syncpt_incrs, u32 syncval, u32 nr_slots);
46};
47
48struct host1x_pushbuffer_ops {
49 void (*init)(struct push_buffer *pb);
50};
51
52struct host1x_debug_ops {
53 void (*debug_init)(struct dentry *de);
54 void (*show_channel_cdma)(struct host1x *host,
55 struct host1x_channel *ch,
56 struct output *o);
57 void (*show_channel_fifo)(struct host1x *host,
58 struct host1x_channel *ch,
59 struct output *o);
60 void (*show_mlocks)(struct host1x *host, struct output *output);
61
62};
63
64struct host1x_syncpt_ops {
65 void (*restore)(struct host1x_syncpt *syncpt);
66 void (*restore_wait_base)(struct host1x_syncpt *syncpt);
67 void (*load_wait_base)(struct host1x_syncpt *syncpt);
68 u32 (*load)(struct host1x_syncpt *syncpt);
69 int (*cpu_incr)(struct host1x_syncpt *syncpt);
70 void (*assign_to_channel)(struct host1x_syncpt *syncpt,
71 struct host1x_channel *channel);
72 void (*enable_protection)(struct host1x *host);
73};
74
75struct host1x_intr_ops {
76 int (*init_host_sync)(struct host1x *host, u32 cpm,
77 void (*syncpt_thresh_work)(struct work_struct *work));
78 void (*set_syncpt_threshold)(
79 struct host1x *host, unsigned int id, u32 thresh);
80 void (*enable_syncpt_intr)(struct host1x *host, unsigned int id);
81 void (*disable_syncpt_intr)(struct host1x *host, unsigned int id);
82 void (*disable_all_syncpt_intrs)(struct host1x *host);
83 int (*free_syncpt_irq)(struct host1x *host);
84};
85
86struct host1x_sid_entry {
87 unsigned int base;
88 unsigned int offset;
89 unsigned int limit;
90};
91
92struct host1x_info {
93 unsigned int nb_channels; /* host1x: number of channels supported */
94 unsigned int nb_pts; /* host1x: number of syncpoints supported */
95 unsigned int nb_bases; /* host1x: number of syncpoint bases supported */
96 unsigned int nb_mlocks; /* host1x: number of mlocks supported */
97 int (*init)(struct host1x *host1x); /* initialize per SoC ops */
98 unsigned int sync_offset; /* offset of syncpoint registers */
99 u64 dma_mask; /* mask of addressable memory */
100 bool has_hypervisor; /* has hypervisor registers */
101 unsigned int num_sid_entries;
102 const struct host1x_sid_entry *sid_table;
103};
104
105struct host1x {
106 const struct host1x_info *info;
107
108 void __iomem *regs;
109 void __iomem *hv_regs; /* hypervisor region */
110 struct host1x_syncpt *syncpt;
111 struct host1x_syncpt_base *bases;
112 struct device *dev;
113 struct clk *clk;
114 struct reset_control *rst;
115
116 struct iommu_group *group;
117 struct iommu_domain *domain;
118 struct iova_domain iova;
119 dma_addr_t iova_end;
120
121 struct mutex intr_mutex;
122 int intr_syncpt_irq;
123
124 const struct host1x_syncpt_ops *syncpt_op;
125 const struct host1x_intr_ops *intr_op;
126 const struct host1x_channel_ops *channel_op;
127 const struct host1x_cdma_ops *cdma_op;
128 const struct host1x_pushbuffer_ops *cdma_pb_op;
129 const struct host1x_debug_ops *debug_op;
130
131 struct host1x_syncpt *nop_sp;
132
133 struct mutex syncpt_mutex;
134
135 struct host1x_channel_list channel_list;
136
137 struct dentry *debugfs;
138
139 struct mutex devices_lock;
140 struct list_head devices;
141
142 struct list_head list;
143};
144
145void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);
146u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r);
147void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
148u32 host1x_sync_readl(struct host1x *host1x, u32 r);
149void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v);
150u32 host1x_ch_readl(struct host1x_channel *ch, u32 r);
151
152static inline void host1x_hw_syncpt_restore(struct host1x *host,
153 struct host1x_syncpt *sp)
154{
155 host->syncpt_op->restore(sp);
156}
157
158static inline void host1x_hw_syncpt_restore_wait_base(struct host1x *host,
159 struct host1x_syncpt *sp)
160{
161 host->syncpt_op->restore_wait_base(sp);
162}
163
164static inline void host1x_hw_syncpt_load_wait_base(struct host1x *host,
165 struct host1x_syncpt *sp)
166{
167 host->syncpt_op->load_wait_base(sp);
168}
169
170static inline u32 host1x_hw_syncpt_load(struct host1x *host,
171 struct host1x_syncpt *sp)
172{
173 return host->syncpt_op->load(sp);
174}
175
176static inline int host1x_hw_syncpt_cpu_incr(struct host1x *host,
177 struct host1x_syncpt *sp)
178{
179 return host->syncpt_op->cpu_incr(sp);
180}
181
182static inline void host1x_hw_syncpt_assign_to_channel(
183 struct host1x *host, struct host1x_syncpt *sp,
184 struct host1x_channel *ch)
185{
186 return host->syncpt_op->assign_to_channel(sp, ch);
187}
188
189static inline void host1x_hw_syncpt_enable_protection(struct host1x *host)
190{
191 return host->syncpt_op->enable_protection(host);
192}
193
194static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
195 void (*syncpt_thresh_work)(struct work_struct *))
196{
197 return host->intr_op->init_host_sync(host, cpm, syncpt_thresh_work);
198}
199
200static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
201 unsigned int id,
202 u32 thresh)
203{
204 host->intr_op->set_syncpt_threshold(host, id, thresh);
205}
206
207static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
208 unsigned int id)
209{
210 host->intr_op->enable_syncpt_intr(host, id);
211}
212
213static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
214 unsigned int id)
215{
216 host->intr_op->disable_syncpt_intr(host, id);
217}
218
219static inline void host1x_hw_intr_disable_all_syncpt_intrs(struct host1x *host)
220{
221 host->intr_op->disable_all_syncpt_intrs(host);
222}
223
224static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
225{
226 return host->intr_op->free_syncpt_irq(host);
227}
228
229static inline int host1x_hw_channel_init(struct host1x *host,
230 struct host1x_channel *channel,
231 unsigned int id)
232{
233 return host->channel_op->init(channel, host, id);
234}
235
236static inline int host1x_hw_channel_submit(struct host1x *host,
237 struct host1x_job *job)
238{
239 return host->channel_op->submit(job);
240}
241
242static inline void host1x_hw_cdma_start(struct host1x *host,
243 struct host1x_cdma *cdma)
244{
245 host->cdma_op->start(cdma);
246}
247
248static inline void host1x_hw_cdma_stop(struct host1x *host,
249 struct host1x_cdma *cdma)
250{
251 host->cdma_op->stop(cdma);
252}
253
254static inline void host1x_hw_cdma_flush(struct host1x *host,
255 struct host1x_cdma *cdma)
256{
257 host->cdma_op->flush(cdma);
258}
259
260static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
261 struct host1x_cdma *cdma,
262 unsigned int syncpt)
263{
264 return host->cdma_op->timeout_init(cdma, syncpt);
265}
266
267static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
268 struct host1x_cdma *cdma)
269{
270 host->cdma_op->timeout_destroy(cdma);
271}
272
273static inline void host1x_hw_cdma_freeze(struct host1x *host,
274 struct host1x_cdma *cdma)
275{
276 host->cdma_op->freeze(cdma);
277}
278
279static inline void host1x_hw_cdma_resume(struct host1x *host,
280 struct host1x_cdma *cdma, u32 getptr)
281{
282 host->cdma_op->resume(cdma, getptr);
283}
284
285static inline void host1x_hw_cdma_timeout_cpu_incr(struct host1x *host,
286 struct host1x_cdma *cdma,
287 u32 getptr,
288 u32 syncpt_incrs,
289 u32 syncval, u32 nr_slots)
290{
291 host->cdma_op->timeout_cpu_incr(cdma, getptr, syncpt_incrs, syncval,
292 nr_slots);
293}
294
295static inline void host1x_hw_pushbuffer_init(struct host1x *host,
296 struct push_buffer *pb)
297{
298 host->cdma_pb_op->init(pb);
299}
300
301static inline void host1x_hw_debug_init(struct host1x *host, struct dentry *de)
302{
303 if (host->debug_op && host->debug_op->debug_init)
304 host->debug_op->debug_init(de);
305}
306
307static inline void host1x_hw_show_channel_cdma(struct host1x *host,
308 struct host1x_channel *channel,
309 struct output *o)
310{
311 host->debug_op->show_channel_cdma(host, channel, o);
312}
313
314static inline void host1x_hw_show_channel_fifo(struct host1x *host,
315 struct host1x_channel *channel,
316 struct output *o)
317{
318 host->debug_op->show_channel_fifo(host, channel, o);
319}
320
321static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
322{
323 host->debug_op->show_mlocks(host, o);
324}
325
326extern struct platform_driver tegra_mipi_driver;
327
328#endif