Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Tegra host1x Job
4 *
5 * Copyright (c) 2010-2015, NVIDIA Corporation.
6 */
7
8#include <linux/dma-mapping.h>
9#include <linux/err.h>
10#include <linux/host1x.h>
11#include <linux/iommu.h>
12#include <linux/kref.h>
13#include <linux/module.h>
14#include <linux/scatterlist.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17#include <trace/events/host1x.h>
18
19#include "channel.h"
20#include "dev.h"
21#include "job.h"
22#include "syncpt.h"
23
24#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
25
26struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
27 u32 num_cmdbufs, u32 num_relocs)
28{
29 struct host1x_job *job = NULL;
30 unsigned int num_unpins = num_relocs;
31 u64 total;
32 void *mem;
33
34 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
35 num_unpins += num_cmdbufs;
36
37 /* Check that we're not going to overflow */
38 total = sizeof(struct host1x_job) +
39 (u64)num_relocs * sizeof(struct host1x_reloc) +
40 (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
41 (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
42 (u64)num_unpins * sizeof(dma_addr_t) +
43 (u64)num_unpins * sizeof(u32 *);
44 if (total > ULONG_MAX)
45 return NULL;
46
47 mem = job = kzalloc(total, GFP_KERNEL);
48 if (!job)
49 return NULL;
50
51 kref_init(&job->ref);
52 job->channel = ch;
53
54 /* Redistribute memory to the structs */
55 mem += sizeof(struct host1x_job);
56 job->relocs = num_relocs ? mem : NULL;
57 mem += num_relocs * sizeof(struct host1x_reloc);
58 job->unpins = num_unpins ? mem : NULL;
59 mem += num_unpins * sizeof(struct host1x_job_unpin_data);
60 job->gathers = num_cmdbufs ? mem : NULL;
61 mem += num_cmdbufs * sizeof(struct host1x_job_gather);
62 job->addr_phys = num_unpins ? mem : NULL;
63
64 job->reloc_addr_phys = job->addr_phys;
65 job->gather_addr_phys = &job->addr_phys[num_relocs];
66
67 return job;
68}
69EXPORT_SYMBOL(host1x_job_alloc);
70
71struct host1x_job *host1x_job_get(struct host1x_job *job)
72{
73 kref_get(&job->ref);
74 return job;
75}
76EXPORT_SYMBOL(host1x_job_get);
77
78static void job_free(struct kref *ref)
79{
80 struct host1x_job *job = container_of(ref, struct host1x_job, ref);
81
82 kfree(job);
83}
84
85void host1x_job_put(struct host1x_job *job)
86{
87 kref_put(&job->ref, job_free);
88}
89EXPORT_SYMBOL(host1x_job_put);
90
91void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
92 unsigned int words, unsigned int offset)
93{
94 struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
95
96 gather->words = words;
97 gather->bo = bo;
98 gather->offset = offset;
99
100 job->num_gathers++;
101}
102EXPORT_SYMBOL(host1x_job_add_gather);
103
104static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
105{
106 struct host1x_client *client = job->client;
107 struct device *dev = client->dev;
108 struct host1x_job_gather *g;
109 struct iommu_domain *domain;
110 unsigned int i;
111 int err;
112
113 domain = iommu_get_domain_for_dev(dev);
114 job->num_unpins = 0;
115
116 for (i = 0; i < job->num_relocs; i++) {
117 struct host1x_reloc *reloc = &job->relocs[i];
118 dma_addr_t phys_addr, *phys;
119 struct sg_table *sgt;
120
121 reloc->target.bo = host1x_bo_get(reloc->target.bo);
122 if (!reloc->target.bo) {
123 err = -EINVAL;
124 goto unpin;
125 }
126
127 /*
128 * If the client device is not attached to an IOMMU, the
129 * physical address of the buffer object can be used.
130 *
131 * Similarly, when an IOMMU domain is shared between all
132 * host1x clients, the IOVA is already available, so no
133 * need to map the buffer object again.
134 *
135 * XXX Note that this isn't always safe to do because it
136 * relies on an assumption that no cache maintenance is
137 * needed on the buffer objects.
138 */
139 if (!domain || client->group)
140 phys = &phys_addr;
141 else
142 phys = NULL;
143
144 sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
145 if (IS_ERR(sgt)) {
146 err = PTR_ERR(sgt);
147 goto unpin;
148 }
149
150 if (sgt) {
151 unsigned long mask = HOST1X_RELOC_READ |
152 HOST1X_RELOC_WRITE;
153 enum dma_data_direction dir;
154
155 switch (reloc->flags & mask) {
156 case HOST1X_RELOC_READ:
157 dir = DMA_TO_DEVICE;
158 break;
159
160 case HOST1X_RELOC_WRITE:
161 dir = DMA_FROM_DEVICE;
162 break;
163
164 case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
165 dir = DMA_BIDIRECTIONAL;
166 break;
167
168 default:
169 err = -EINVAL;
170 goto unpin;
171 }
172
173 err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
174 if (!err) {
175 err = -ENOMEM;
176 goto unpin;
177 }
178
179 job->unpins[job->num_unpins].dev = dev;
180 job->unpins[job->num_unpins].dir = dir;
181 phys_addr = sg_dma_address(sgt->sgl);
182 }
183
184 job->addr_phys[job->num_unpins] = phys_addr;
185 job->unpins[job->num_unpins].bo = reloc->target.bo;
186 job->unpins[job->num_unpins].sgt = sgt;
187 job->num_unpins++;
188 }
189
190 /*
191 * We will copy gathers BO content later, so there is no need to
192 * hold and pin them.
193 */
194 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
195 return 0;
196
197 for (i = 0; i < job->num_gathers; i++) {
198 size_t gather_size = 0;
199 struct scatterlist *sg;
200 struct sg_table *sgt;
201 dma_addr_t phys_addr;
202 unsigned long shift;
203 struct iova *alloc;
204 dma_addr_t *phys;
205 unsigned int j;
206
207 g = &job->gathers[i];
208 g->bo = host1x_bo_get(g->bo);
209 if (!g->bo) {
210 err = -EINVAL;
211 goto unpin;
212 }
213
214 /**
215 * If the host1x is not attached to an IOMMU, there is no need
216 * to map the buffer object for the host1x, since the physical
217 * address can simply be used.
218 */
219 if (!iommu_get_domain_for_dev(host->dev))
220 phys = &phys_addr;
221 else
222 phys = NULL;
223
224 sgt = host1x_bo_pin(host->dev, g->bo, phys);
225 if (IS_ERR(sgt)) {
226 err = PTR_ERR(sgt);
227 goto put;
228 }
229
230 if (host->domain) {
231 for_each_sg(sgt->sgl, sg, sgt->nents, j)
232 gather_size += sg->length;
233 gather_size = iova_align(&host->iova, gather_size);
234
235 shift = iova_shift(&host->iova);
236 alloc = alloc_iova(&host->iova, gather_size >> shift,
237 host->iova_end >> shift, true);
238 if (!alloc) {
239 err = -ENOMEM;
240 goto put;
241 }
242
243 err = iommu_map_sg(host->domain,
244 iova_dma_addr(&host->iova, alloc),
245 sgt->sgl, sgt->nents, IOMMU_READ);
246 if (err == 0) {
247 __free_iova(&host->iova, alloc);
248 err = -EINVAL;
249 goto put;
250 }
251
252 job->unpins[job->num_unpins].size = gather_size;
253 phys_addr = iova_dma_addr(&host->iova, alloc);
254 } else if (sgt) {
255 err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
256 DMA_TO_DEVICE);
257 if (!err) {
258 err = -ENOMEM;
259 goto put;
260 }
261
262 job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
263 job->unpins[job->num_unpins].dev = host->dev;
264 phys_addr = sg_dma_address(sgt->sgl);
265 }
266
267 job->addr_phys[job->num_unpins] = phys_addr;
268 job->gather_addr_phys[i] = phys_addr;
269
270 job->unpins[job->num_unpins].bo = g->bo;
271 job->unpins[job->num_unpins].sgt = sgt;
272 job->num_unpins++;
273 }
274
275 return 0;
276
277put:
278 host1x_bo_put(g->bo);
279unpin:
280 host1x_job_unpin(job);
281 return err;
282}
283
284static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
285{
286 void *cmdbuf_addr = NULL;
287 struct host1x_bo *cmdbuf = g->bo;
288 unsigned int i;
289
290 /* pin & patch the relocs for one gather */
291 for (i = 0; i < job->num_relocs; i++) {
292 struct host1x_reloc *reloc = &job->relocs[i];
293 u32 reloc_addr = (job->reloc_addr_phys[i] +
294 reloc->target.offset) >> reloc->shift;
295 u32 *target;
296
297 /* skip all other gathers */
298 if (cmdbuf != reloc->cmdbuf.bo)
299 continue;
300
301 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
302 target = (u32 *)job->gather_copy_mapped +
303 reloc->cmdbuf.offset / sizeof(u32) +
304 g->offset / sizeof(u32);
305 goto patch_reloc;
306 }
307
308 if (!cmdbuf_addr) {
309 cmdbuf_addr = host1x_bo_mmap(cmdbuf);
310
311 if (unlikely(!cmdbuf_addr)) {
312 pr_err("Could not map cmdbuf for relocation\n");
313 return -ENOMEM;
314 }
315 }
316
317 target = cmdbuf_addr + reloc->cmdbuf.offset;
318patch_reloc:
319 *target = reloc_addr;
320 }
321
322 if (cmdbuf_addr)
323 host1x_bo_munmap(cmdbuf, cmdbuf_addr);
324
325 return 0;
326}
327
328static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
329 unsigned int offset)
330{
331 offset *= sizeof(u32);
332
333 if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
334 return false;
335
336 /* relocation shift value validation isn't implemented yet */
337 if (reloc->shift)
338 return false;
339
340 return true;
341}
342
343struct host1x_firewall {
344 struct host1x_job *job;
345 struct device *dev;
346
347 unsigned int num_relocs;
348 struct host1x_reloc *reloc;
349
350 struct host1x_bo *cmdbuf;
351 unsigned int offset;
352
353 u32 words;
354 u32 class;
355 u32 reg;
356 u32 mask;
357 u32 count;
358};
359
360static int check_register(struct host1x_firewall *fw, unsigned long offset)
361{
362 if (!fw->job->is_addr_reg)
363 return 0;
364
365 if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
366 if (!fw->num_relocs)
367 return -EINVAL;
368
369 if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
370 return -EINVAL;
371
372 fw->num_relocs--;
373 fw->reloc++;
374 }
375
376 return 0;
377}
378
379static int check_class(struct host1x_firewall *fw, u32 class)
380{
381 if (!fw->job->is_valid_class) {
382 if (fw->class != class)
383 return -EINVAL;
384 } else {
385 if (!fw->job->is_valid_class(fw->class))
386 return -EINVAL;
387 }
388
389 return 0;
390}
391
392static int check_mask(struct host1x_firewall *fw)
393{
394 u32 mask = fw->mask;
395 u32 reg = fw->reg;
396 int ret;
397
398 while (mask) {
399 if (fw->words == 0)
400 return -EINVAL;
401
402 if (mask & 1) {
403 ret = check_register(fw, reg);
404 if (ret < 0)
405 return ret;
406
407 fw->words--;
408 fw->offset++;
409 }
410 mask >>= 1;
411 reg++;
412 }
413
414 return 0;
415}
416
417static int check_incr(struct host1x_firewall *fw)
418{
419 u32 count = fw->count;
420 u32 reg = fw->reg;
421 int ret;
422
423 while (count) {
424 if (fw->words == 0)
425 return -EINVAL;
426
427 ret = check_register(fw, reg);
428 if (ret < 0)
429 return ret;
430
431 reg++;
432 fw->words--;
433 fw->offset++;
434 count--;
435 }
436
437 return 0;
438}
439
440static int check_nonincr(struct host1x_firewall *fw)
441{
442 u32 count = fw->count;
443 int ret;
444
445 while (count) {
446 if (fw->words == 0)
447 return -EINVAL;
448
449 ret = check_register(fw, fw->reg);
450 if (ret < 0)
451 return ret;
452
453 fw->words--;
454 fw->offset++;
455 count--;
456 }
457
458 return 0;
459}
460
461static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
462{
463 u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
464 (g->offset / sizeof(u32));
465 u32 job_class = fw->class;
466 int err = 0;
467
468 fw->words = g->words;
469 fw->cmdbuf = g->bo;
470 fw->offset = 0;
471
472 while (fw->words && !err) {
473 u32 word = cmdbuf_base[fw->offset];
474 u32 opcode = (word & 0xf0000000) >> 28;
475
476 fw->mask = 0;
477 fw->reg = 0;
478 fw->count = 0;
479 fw->words--;
480 fw->offset++;
481
482 switch (opcode) {
483 case 0:
484 fw->class = word >> 6 & 0x3ff;
485 fw->mask = word & 0x3f;
486 fw->reg = word >> 16 & 0xfff;
487 err = check_class(fw, job_class);
488 if (!err)
489 err = check_mask(fw);
490 if (err)
491 goto out;
492 break;
493 case 1:
494 fw->reg = word >> 16 & 0xfff;
495 fw->count = word & 0xffff;
496 err = check_incr(fw);
497 if (err)
498 goto out;
499 break;
500
501 case 2:
502 fw->reg = word >> 16 & 0xfff;
503 fw->count = word & 0xffff;
504 err = check_nonincr(fw);
505 if (err)
506 goto out;
507 break;
508
509 case 3:
510 fw->mask = word & 0xffff;
511 fw->reg = word >> 16 & 0xfff;
512 err = check_mask(fw);
513 if (err)
514 goto out;
515 break;
516 case 4:
517 case 14:
518 break;
519 default:
520 err = -EINVAL;
521 break;
522 }
523 }
524
525out:
526 return err;
527}
528
529static inline int copy_gathers(struct device *host, struct host1x_job *job,
530 struct device *dev)
531{
532 struct host1x_firewall fw;
533 size_t size = 0;
534 size_t offset = 0;
535 unsigned int i;
536
537 fw.job = job;
538 fw.dev = dev;
539 fw.reloc = job->relocs;
540 fw.num_relocs = job->num_relocs;
541 fw.class = job->class;
542
543 for (i = 0; i < job->num_gathers; i++) {
544 struct host1x_job_gather *g = &job->gathers[i];
545
546 size += g->words * sizeof(u32);
547 }
548
549 /*
550 * Try a non-blocking allocation from a higher priority pools first,
551 * as awaiting for the allocation here is a major performance hit.
552 */
553 job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
554 GFP_NOWAIT);
555
556 /* the higher priority allocation failed, try the generic-blocking */
557 if (!job->gather_copy_mapped)
558 job->gather_copy_mapped = dma_alloc_wc(host, size,
559 &job->gather_copy,
560 GFP_KERNEL);
561 if (!job->gather_copy_mapped)
562 return -ENOMEM;
563
564 job->gather_copy_size = size;
565
566 for (i = 0; i < job->num_gathers; i++) {
567 struct host1x_job_gather *g = &job->gathers[i];
568 void *gather;
569
570 /* Copy the gather */
571 gather = host1x_bo_mmap(g->bo);
572 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
573 g->words * sizeof(u32));
574 host1x_bo_munmap(g->bo, gather);
575
576 /* Store the location in the buffer */
577 g->base = job->gather_copy;
578 g->offset = offset;
579
580 /* Validate the job */
581 if (validate(&fw, g))
582 return -EINVAL;
583
584 offset += g->words * sizeof(u32);
585 }
586
587 /* No relocs should remain at this point */
588 if (fw.num_relocs)
589 return -EINVAL;
590
591 return 0;
592}
593
594int host1x_job_pin(struct host1x_job *job, struct device *dev)
595{
596 int err;
597 unsigned int i, j;
598 struct host1x *host = dev_get_drvdata(dev->parent);
599
600 /* pin memory */
601 err = pin_job(host, job);
602 if (err)
603 goto out;
604
605 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
606 err = copy_gathers(host->dev, job, dev);
607 if (err)
608 goto out;
609 }
610
611 /* patch gathers */
612 for (i = 0; i < job->num_gathers; i++) {
613 struct host1x_job_gather *g = &job->gathers[i];
614
615 /* process each gather mem only once */
616 if (g->handled)
617 continue;
618
619 /* copy_gathers() sets gathers base if firewall is enabled */
620 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
621 g->base = job->gather_addr_phys[i];
622
623 for (j = i + 1; j < job->num_gathers; j++) {
624 if (job->gathers[j].bo == g->bo) {
625 job->gathers[j].handled = true;
626 job->gathers[j].base = g->base;
627 }
628 }
629
630 err = do_relocs(job, g);
631 if (err)
632 break;
633 }
634
635out:
636 if (err)
637 host1x_job_unpin(job);
638 wmb();
639
640 return err;
641}
642EXPORT_SYMBOL(host1x_job_pin);
643
644void host1x_job_unpin(struct host1x_job *job)
645{
646 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
647 unsigned int i;
648
649 for (i = 0; i < job->num_unpins; i++) {
650 struct host1x_job_unpin_data *unpin = &job->unpins[i];
651 struct device *dev = unpin->dev ?: host->dev;
652 struct sg_table *sgt = unpin->sgt;
653
654 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
655 unpin->size && host->domain) {
656 iommu_unmap(host->domain, job->addr_phys[i],
657 unpin->size);
658 free_iova(&host->iova,
659 iova_pfn(&host->iova, job->addr_phys[i]));
660 }
661
662 if (unpin->dev && sgt)
663 dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
664 unpin->dir);
665
666 host1x_bo_unpin(dev, unpin->bo, sgt);
667 host1x_bo_put(unpin->bo);
668 }
669
670 job->num_unpins = 0;
671
672 if (job->gather_copy_size)
673 dma_free_wc(host->dev, job->gather_copy_size,
674 job->gather_copy_mapped, job->gather_copy);
675}
676EXPORT_SYMBOL(host1x_job_unpin);
677
678/*
679 * Debug routine used to dump job entries
680 */
681void host1x_job_dump(struct device *dev, struct host1x_job *job)
682{
683 dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
684 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
685 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
686 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
687 dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
688 dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
689}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Tegra host1x Job
4 *
5 * Copyright (c) 2010-2015, NVIDIA Corporation.
6 */
7
8#include <linux/dma-mapping.h>
9#include <linux/err.h>
10#include <linux/host1x.h>
11#include <linux/kref.h>
12#include <linux/module.h>
13#include <linux/scatterlist.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <trace/events/host1x.h>
17
18#include "channel.h"
19#include "dev.h"
20#include "job.h"
21#include "syncpt.h"
22
23#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
24
25struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
26 u32 num_cmdbufs, u32 num_relocs)
27{
28 struct host1x_job *job = NULL;
29 unsigned int num_unpins = num_cmdbufs + num_relocs;
30 u64 total;
31 void *mem;
32
33 /* Check that we're not going to overflow */
34 total = sizeof(struct host1x_job) +
35 (u64)num_relocs * sizeof(struct host1x_reloc) +
36 (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
37 (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
38 (u64)num_unpins * sizeof(dma_addr_t) +
39 (u64)num_unpins * sizeof(u32 *);
40 if (total > ULONG_MAX)
41 return NULL;
42
43 mem = job = kzalloc(total, GFP_KERNEL);
44 if (!job)
45 return NULL;
46
47 kref_init(&job->ref);
48 job->channel = ch;
49
50 /* Redistribute memory to the structs */
51 mem += sizeof(struct host1x_job);
52 job->relocs = num_relocs ? mem : NULL;
53 mem += num_relocs * sizeof(struct host1x_reloc);
54 job->unpins = num_unpins ? mem : NULL;
55 mem += num_unpins * sizeof(struct host1x_job_unpin_data);
56 job->gathers = num_cmdbufs ? mem : NULL;
57 mem += num_cmdbufs * sizeof(struct host1x_job_gather);
58 job->addr_phys = num_unpins ? mem : NULL;
59
60 job->reloc_addr_phys = job->addr_phys;
61 job->gather_addr_phys = &job->addr_phys[num_relocs];
62
63 return job;
64}
65EXPORT_SYMBOL(host1x_job_alloc);
66
67struct host1x_job *host1x_job_get(struct host1x_job *job)
68{
69 kref_get(&job->ref);
70 return job;
71}
72EXPORT_SYMBOL(host1x_job_get);
73
74static void job_free(struct kref *ref)
75{
76 struct host1x_job *job = container_of(ref, struct host1x_job, ref);
77
78 kfree(job);
79}
80
81void host1x_job_put(struct host1x_job *job)
82{
83 kref_put(&job->ref, job_free);
84}
85EXPORT_SYMBOL(host1x_job_put);
86
87void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
88 unsigned int words, unsigned int offset)
89{
90 struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
91
92 gather->words = words;
93 gather->bo = bo;
94 gather->offset = offset;
95
96 job->num_gathers++;
97}
98EXPORT_SYMBOL(host1x_job_add_gather);
99
100static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
101{
102 unsigned int i;
103 int err;
104
105 job->num_unpins = 0;
106
107 for (i = 0; i < job->num_relocs; i++) {
108 struct host1x_reloc *reloc = &job->relocs[i];
109 struct sg_table *sgt;
110 dma_addr_t phys_addr;
111
112 reloc->target.bo = host1x_bo_get(reloc->target.bo);
113 if (!reloc->target.bo) {
114 err = -EINVAL;
115 goto unpin;
116 }
117
118 phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
119
120 job->addr_phys[job->num_unpins] = phys_addr;
121 job->unpins[job->num_unpins].bo = reloc->target.bo;
122 job->unpins[job->num_unpins].sgt = sgt;
123 job->num_unpins++;
124 }
125
126 for (i = 0; i < job->num_gathers; i++) {
127 struct host1x_job_gather *g = &job->gathers[i];
128 size_t gather_size = 0;
129 struct scatterlist *sg;
130 struct sg_table *sgt;
131 dma_addr_t phys_addr;
132 unsigned long shift;
133 struct iova *alloc;
134 unsigned int j;
135
136 g->bo = host1x_bo_get(g->bo);
137 if (!g->bo) {
138 err = -EINVAL;
139 goto unpin;
140 }
141
142 phys_addr = host1x_bo_pin(g->bo, &sgt);
143
144 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
145 for_each_sg(sgt->sgl, sg, sgt->nents, j)
146 gather_size += sg->length;
147 gather_size = iova_align(&host->iova, gather_size);
148
149 shift = iova_shift(&host->iova);
150 alloc = alloc_iova(&host->iova, gather_size >> shift,
151 host->iova_end >> shift, true);
152 if (!alloc) {
153 err = -ENOMEM;
154 goto unpin;
155 }
156
157 err = iommu_map_sg(host->domain,
158 iova_dma_addr(&host->iova, alloc),
159 sgt->sgl, sgt->nents, IOMMU_READ);
160 if (err == 0) {
161 __free_iova(&host->iova, alloc);
162 err = -EINVAL;
163 goto unpin;
164 }
165
166 job->addr_phys[job->num_unpins] =
167 iova_dma_addr(&host->iova, alloc);
168 job->unpins[job->num_unpins].size = gather_size;
169 } else {
170 job->addr_phys[job->num_unpins] = phys_addr;
171 }
172
173 job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
174
175 job->unpins[job->num_unpins].bo = g->bo;
176 job->unpins[job->num_unpins].sgt = sgt;
177 job->num_unpins++;
178 }
179
180 return 0;
181
182unpin:
183 host1x_job_unpin(job);
184 return err;
185}
186
187static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
188{
189 u32 last_page = ~0;
190 void *cmdbuf_page_addr = NULL;
191 struct host1x_bo *cmdbuf = g->bo;
192 unsigned int i;
193
194 /* pin & patch the relocs for one gather */
195 for (i = 0; i < job->num_relocs; i++) {
196 struct host1x_reloc *reloc = &job->relocs[i];
197 u32 reloc_addr = (job->reloc_addr_phys[i] +
198 reloc->target.offset) >> reloc->shift;
199 u32 *target;
200
201 /* skip all other gathers */
202 if (cmdbuf != reloc->cmdbuf.bo)
203 continue;
204
205 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
206 target = (u32 *)job->gather_copy_mapped +
207 reloc->cmdbuf.offset / sizeof(u32) +
208 g->offset / sizeof(u32);
209 goto patch_reloc;
210 }
211
212 if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
213 if (cmdbuf_page_addr)
214 host1x_bo_kunmap(cmdbuf, last_page,
215 cmdbuf_page_addr);
216
217 cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
218 reloc->cmdbuf.offset >> PAGE_SHIFT);
219 last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
220
221 if (unlikely(!cmdbuf_page_addr)) {
222 pr_err("Could not map cmdbuf for relocation\n");
223 return -ENOMEM;
224 }
225 }
226
227 target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
228patch_reloc:
229 *target = reloc_addr;
230 }
231
232 if (cmdbuf_page_addr)
233 host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
234
235 return 0;
236}
237
238static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
239 unsigned int offset)
240{
241 offset *= sizeof(u32);
242
243 if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
244 return false;
245
246 /* relocation shift value validation isn't implemented yet */
247 if (reloc->shift)
248 return false;
249
250 return true;
251}
252
253struct host1x_firewall {
254 struct host1x_job *job;
255 struct device *dev;
256
257 unsigned int num_relocs;
258 struct host1x_reloc *reloc;
259
260 struct host1x_bo *cmdbuf;
261 unsigned int offset;
262
263 u32 words;
264 u32 class;
265 u32 reg;
266 u32 mask;
267 u32 count;
268};
269
270static int check_register(struct host1x_firewall *fw, unsigned long offset)
271{
272 if (!fw->job->is_addr_reg)
273 return 0;
274
275 if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
276 if (!fw->num_relocs)
277 return -EINVAL;
278
279 if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
280 return -EINVAL;
281
282 fw->num_relocs--;
283 fw->reloc++;
284 }
285
286 return 0;
287}
288
289static int check_class(struct host1x_firewall *fw, u32 class)
290{
291 if (!fw->job->is_valid_class) {
292 if (fw->class != class)
293 return -EINVAL;
294 } else {
295 if (!fw->job->is_valid_class(fw->class))
296 return -EINVAL;
297 }
298
299 return 0;
300}
301
302static int check_mask(struct host1x_firewall *fw)
303{
304 u32 mask = fw->mask;
305 u32 reg = fw->reg;
306 int ret;
307
308 while (mask) {
309 if (fw->words == 0)
310 return -EINVAL;
311
312 if (mask & 1) {
313 ret = check_register(fw, reg);
314 if (ret < 0)
315 return ret;
316
317 fw->words--;
318 fw->offset++;
319 }
320 mask >>= 1;
321 reg++;
322 }
323
324 return 0;
325}
326
327static int check_incr(struct host1x_firewall *fw)
328{
329 u32 count = fw->count;
330 u32 reg = fw->reg;
331 int ret;
332
333 while (count) {
334 if (fw->words == 0)
335 return -EINVAL;
336
337 ret = check_register(fw, reg);
338 if (ret < 0)
339 return ret;
340
341 reg++;
342 fw->words--;
343 fw->offset++;
344 count--;
345 }
346
347 return 0;
348}
349
350static int check_nonincr(struct host1x_firewall *fw)
351{
352 u32 count = fw->count;
353 int ret;
354
355 while (count) {
356 if (fw->words == 0)
357 return -EINVAL;
358
359 ret = check_register(fw, fw->reg);
360 if (ret < 0)
361 return ret;
362
363 fw->words--;
364 fw->offset++;
365 count--;
366 }
367
368 return 0;
369}
370
371static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
372{
373 u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
374 (g->offset / sizeof(u32));
375 u32 job_class = fw->class;
376 int err = 0;
377
378 fw->words = g->words;
379 fw->cmdbuf = g->bo;
380 fw->offset = 0;
381
382 while (fw->words && !err) {
383 u32 word = cmdbuf_base[fw->offset];
384 u32 opcode = (word & 0xf0000000) >> 28;
385
386 fw->mask = 0;
387 fw->reg = 0;
388 fw->count = 0;
389 fw->words--;
390 fw->offset++;
391
392 switch (opcode) {
393 case 0:
394 fw->class = word >> 6 & 0x3ff;
395 fw->mask = word & 0x3f;
396 fw->reg = word >> 16 & 0xfff;
397 err = check_class(fw, job_class);
398 if (!err)
399 err = check_mask(fw);
400 if (err)
401 goto out;
402 break;
403 case 1:
404 fw->reg = word >> 16 & 0xfff;
405 fw->count = word & 0xffff;
406 err = check_incr(fw);
407 if (err)
408 goto out;
409 break;
410
411 case 2:
412 fw->reg = word >> 16 & 0xfff;
413 fw->count = word & 0xffff;
414 err = check_nonincr(fw);
415 if (err)
416 goto out;
417 break;
418
419 case 3:
420 fw->mask = word & 0xffff;
421 fw->reg = word >> 16 & 0xfff;
422 err = check_mask(fw);
423 if (err)
424 goto out;
425 break;
426 case 4:
427 case 14:
428 break;
429 default:
430 err = -EINVAL;
431 break;
432 }
433 }
434
435out:
436 return err;
437}
438
439static inline int copy_gathers(struct host1x_job *job, struct device *dev)
440{
441 struct host1x_firewall fw;
442 size_t size = 0;
443 size_t offset = 0;
444 unsigned int i;
445
446 fw.job = job;
447 fw.dev = dev;
448 fw.reloc = job->relocs;
449 fw.num_relocs = job->num_relocs;
450 fw.class = job->class;
451
452 for (i = 0; i < job->num_gathers; i++) {
453 struct host1x_job_gather *g = &job->gathers[i];
454
455 size += g->words * sizeof(u32);
456 }
457
458 /*
459 * Try a non-blocking allocation from a higher priority pools first,
460 * as awaiting for the allocation here is a major performance hit.
461 */
462 job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
463 GFP_NOWAIT);
464
465 /* the higher priority allocation failed, try the generic-blocking */
466 if (!job->gather_copy_mapped)
467 job->gather_copy_mapped = dma_alloc_wc(dev, size,
468 &job->gather_copy,
469 GFP_KERNEL);
470 if (!job->gather_copy_mapped)
471 return -ENOMEM;
472
473 job->gather_copy_size = size;
474
475 for (i = 0; i < job->num_gathers; i++) {
476 struct host1x_job_gather *g = &job->gathers[i];
477 void *gather;
478
479 /* Copy the gather */
480 gather = host1x_bo_mmap(g->bo);
481 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
482 g->words * sizeof(u32));
483 host1x_bo_munmap(g->bo, gather);
484
485 /* Store the location in the buffer */
486 g->base = job->gather_copy;
487 g->offset = offset;
488
489 /* Validate the job */
490 if (validate(&fw, g))
491 return -EINVAL;
492
493 offset += g->words * sizeof(u32);
494 }
495
496 /* No relocs should remain at this point */
497 if (fw.num_relocs)
498 return -EINVAL;
499
500 return 0;
501}
502
503int host1x_job_pin(struct host1x_job *job, struct device *dev)
504{
505 int err;
506 unsigned int i, j;
507 struct host1x *host = dev_get_drvdata(dev->parent);
508
509 /* pin memory */
510 err = pin_job(host, job);
511 if (err)
512 goto out;
513
514 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
515 err = copy_gathers(job, dev);
516 if (err)
517 goto out;
518 }
519
520 /* patch gathers */
521 for (i = 0; i < job->num_gathers; i++) {
522 struct host1x_job_gather *g = &job->gathers[i];
523
524 /* process each gather mem only once */
525 if (g->handled)
526 continue;
527
528 /* copy_gathers() sets gathers base if firewall is enabled */
529 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
530 g->base = job->gather_addr_phys[i];
531
532 for (j = i + 1; j < job->num_gathers; j++) {
533 if (job->gathers[j].bo == g->bo) {
534 job->gathers[j].handled = true;
535 job->gathers[j].base = g->base;
536 }
537 }
538
539 err = do_relocs(job, g);
540 if (err)
541 break;
542 }
543
544out:
545 if (err)
546 host1x_job_unpin(job);
547 wmb();
548
549 return err;
550}
551EXPORT_SYMBOL(host1x_job_pin);
552
553void host1x_job_unpin(struct host1x_job *job)
554{
555 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
556 unsigned int i;
557
558 for (i = 0; i < job->num_unpins; i++) {
559 struct host1x_job_unpin_data *unpin = &job->unpins[i];
560
561 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
562 unpin->size && host->domain) {
563 iommu_unmap(host->domain, job->addr_phys[i],
564 unpin->size);
565 free_iova(&host->iova,
566 iova_pfn(&host->iova, job->addr_phys[i]));
567 }
568
569 host1x_bo_unpin(unpin->bo, unpin->sgt);
570 host1x_bo_put(unpin->bo);
571 }
572
573 job->num_unpins = 0;
574
575 if (job->gather_copy_size)
576 dma_free_wc(job->channel->dev, job->gather_copy_size,
577 job->gather_copy_mapped, job->gather_copy);
578}
579EXPORT_SYMBOL(host1x_job_unpin);
580
581/*
582 * Debug routine used to dump job entries
583 */
584void host1x_job_dump(struct device *dev, struct host1x_job *job)
585{
586 dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
587 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
588 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
589 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
590 dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
591 dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
592}