Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Tegra host1x Job
  4 *
  5 * Copyright (c) 2010-2015, NVIDIA Corporation.
  6 */
  7
  8#include <linux/dma-mapping.h>
  9#include <linux/err.h>
 10#include <linux/host1x.h>
 11#include <linux/iommu.h>
 12#include <linux/kref.h>
 13#include <linux/module.h>
 14#include <linux/scatterlist.h>
 15#include <linux/slab.h>
 16#include <linux/vmalloc.h>
 17#include <trace/events/host1x.h>
 18
 19#include "channel.h"
 20#include "dev.h"
 21#include "job.h"
 22#include "syncpt.h"
 23
 24#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
 25
 26struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
 27				    u32 num_cmdbufs, u32 num_relocs)
 28{
 29	struct host1x_job *job = NULL;
 30	unsigned int num_unpins = num_relocs;
 31	u64 total;
 32	void *mem;
 33
 34	if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
 35		num_unpins += num_cmdbufs;
 36
 37	/* Check that we're not going to overflow */
 38	total = sizeof(struct host1x_job) +
 39		(u64)num_relocs * sizeof(struct host1x_reloc) +
 40		(u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
 41		(u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
 42		(u64)num_unpins * sizeof(dma_addr_t) +
 43		(u64)num_unpins * sizeof(u32 *);
 44	if (total > ULONG_MAX)
 45		return NULL;
 46
 47	mem = job = kzalloc(total, GFP_KERNEL);
 48	if (!job)
 49		return NULL;
 50
 51	kref_init(&job->ref);
 52	job->channel = ch;
 53
 54	/* Redistribute memory to the structs  */
 55	mem += sizeof(struct host1x_job);
 56	job->relocs = num_relocs ? mem : NULL;
 57	mem += num_relocs * sizeof(struct host1x_reloc);
 58	job->unpins = num_unpins ? mem : NULL;
 59	mem += num_unpins * sizeof(struct host1x_job_unpin_data);
 60	job->gathers = num_cmdbufs ? mem : NULL;
 61	mem += num_cmdbufs * sizeof(struct host1x_job_gather);
 62	job->addr_phys = num_unpins ? mem : NULL;
 63
 64	job->reloc_addr_phys = job->addr_phys;
 65	job->gather_addr_phys = &job->addr_phys[num_relocs];
 66
 67	return job;
 68}
 69EXPORT_SYMBOL(host1x_job_alloc);
 70
 71struct host1x_job *host1x_job_get(struct host1x_job *job)
 72{
 73	kref_get(&job->ref);
 74	return job;
 75}
 76EXPORT_SYMBOL(host1x_job_get);
 77
 78static void job_free(struct kref *ref)
 79{
 80	struct host1x_job *job = container_of(ref, struct host1x_job, ref);
 81
 
 
 
 82	kfree(job);
 83}
 84
 85void host1x_job_put(struct host1x_job *job)
 86{
 87	kref_put(&job->ref, job_free);
 88}
 89EXPORT_SYMBOL(host1x_job_put);
 90
 91void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
 92			   unsigned int words, unsigned int offset)
 93{
 94	struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
 95
 96	gather->words = words;
 97	gather->bo = bo;
 98	gather->offset = offset;
 99
100	job->num_gathers++;
101}
102EXPORT_SYMBOL(host1x_job_add_gather);
103
104static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
105{
106	struct host1x_client *client = job->client;
107	struct device *dev = client->dev;
108	struct host1x_job_gather *g;
109	struct iommu_domain *domain;
110	unsigned int i;
111	int err;
112
113	domain = iommu_get_domain_for_dev(dev);
114	job->num_unpins = 0;
115
116	for (i = 0; i < job->num_relocs; i++) {
117		struct host1x_reloc *reloc = &job->relocs[i];
118		dma_addr_t phys_addr, *phys;
119		struct sg_table *sgt;
120
121		reloc->target.bo = host1x_bo_get(reloc->target.bo);
122		if (!reloc->target.bo) {
123			err = -EINVAL;
124			goto unpin;
125		}
126
127		/*
128		 * If the client device is not attached to an IOMMU, the
129		 * physical address of the buffer object can be used.
130		 *
131		 * Similarly, when an IOMMU domain is shared between all
132		 * host1x clients, the IOVA is already available, so no
133		 * need to map the buffer object again.
134		 *
135		 * XXX Note that this isn't always safe to do because it
136		 * relies on an assumption that no cache maintenance is
137		 * needed on the buffer objects.
138		 */
139		if (!domain || client->group)
140			phys = &phys_addr;
141		else
142			phys = NULL;
143
144		sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
145		if (IS_ERR(sgt)) {
146			err = PTR_ERR(sgt);
147			goto unpin;
148		}
149
150		if (sgt) {
151			unsigned long mask = HOST1X_RELOC_READ |
152					     HOST1X_RELOC_WRITE;
153			enum dma_data_direction dir;
154
155			switch (reloc->flags & mask) {
156			case HOST1X_RELOC_READ:
157				dir = DMA_TO_DEVICE;
158				break;
159
160			case HOST1X_RELOC_WRITE:
161				dir = DMA_FROM_DEVICE;
162				break;
163
164			case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
165				dir = DMA_BIDIRECTIONAL;
166				break;
167
168			default:
169				err = -EINVAL;
170				goto unpin;
171			}
172
173			err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
174			if (!err) {
175				err = -ENOMEM;
176				goto unpin;
177			}
178
179			job->unpins[job->num_unpins].dev = dev;
180			job->unpins[job->num_unpins].dir = dir;
181			phys_addr = sg_dma_address(sgt->sgl);
182		}
183
184		job->addr_phys[job->num_unpins] = phys_addr;
185		job->unpins[job->num_unpins].bo = reloc->target.bo;
186		job->unpins[job->num_unpins].sgt = sgt;
187		job->num_unpins++;
188	}
189
190	/*
191	 * We will copy gathers BO content later, so there is no need to
192	 * hold and pin them.
193	 */
194	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
195		return 0;
196
197	for (i = 0; i < job->num_gathers; i++) {
198		size_t gather_size = 0;
199		struct scatterlist *sg;
200		struct sg_table *sgt;
201		dma_addr_t phys_addr;
202		unsigned long shift;
203		struct iova *alloc;
204		dma_addr_t *phys;
205		unsigned int j;
206
207		g = &job->gathers[i];
208		g->bo = host1x_bo_get(g->bo);
209		if (!g->bo) {
210			err = -EINVAL;
211			goto unpin;
212		}
213
214		/**
215		 * If the host1x is not attached to an IOMMU, there is no need
216		 * to map the buffer object for the host1x, since the physical
217		 * address can simply be used.
218		 */
219		if (!iommu_get_domain_for_dev(host->dev))
220			phys = &phys_addr;
221		else
222			phys = NULL;
223
224		sgt = host1x_bo_pin(host->dev, g->bo, phys);
225		if (IS_ERR(sgt)) {
226			err = PTR_ERR(sgt);
227			goto put;
228		}
229
230		if (host->domain) {
231			for_each_sg(sgt->sgl, sg, sgt->nents, j)
232				gather_size += sg->length;
233			gather_size = iova_align(&host->iova, gather_size);
234
235			shift = iova_shift(&host->iova);
236			alloc = alloc_iova(&host->iova, gather_size >> shift,
237					   host->iova_end >> shift, true);
238			if (!alloc) {
239				err = -ENOMEM;
240				goto put;
241			}
242
243			err = iommu_map_sg(host->domain,
244					iova_dma_addr(&host->iova, alloc),
245					sgt->sgl, sgt->nents, IOMMU_READ);
246			if (err == 0) {
247				__free_iova(&host->iova, alloc);
248				err = -EINVAL;
249				goto put;
250			}
251
252			job->unpins[job->num_unpins].size = gather_size;
253			phys_addr = iova_dma_addr(&host->iova, alloc);
254		} else if (sgt) {
255			err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
256					 DMA_TO_DEVICE);
257			if (!err) {
258				err = -ENOMEM;
259				goto put;
260			}
261
262			job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
263			job->unpins[job->num_unpins].dev = host->dev;
264			phys_addr = sg_dma_address(sgt->sgl);
265		}
266
267		job->addr_phys[job->num_unpins] = phys_addr;
268		job->gather_addr_phys[i] = phys_addr;
269
270		job->unpins[job->num_unpins].bo = g->bo;
271		job->unpins[job->num_unpins].sgt = sgt;
272		job->num_unpins++;
273	}
274
275	return 0;
276
277put:
278	host1x_bo_put(g->bo);
279unpin:
280	host1x_job_unpin(job);
281	return err;
282}
283
284static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
285{
286	void *cmdbuf_addr = NULL;
287	struct host1x_bo *cmdbuf = g->bo;
288	unsigned int i;
289
290	/* pin & patch the relocs for one gather */
291	for (i = 0; i < job->num_relocs; i++) {
292		struct host1x_reloc *reloc = &job->relocs[i];
293		u32 reloc_addr = (job->reloc_addr_phys[i] +
294				  reloc->target.offset) >> reloc->shift;
295		u32 *target;
296
297		/* skip all other gathers */
298		if (cmdbuf != reloc->cmdbuf.bo)
299			continue;
300
301		if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
302			target = (u32 *)job->gather_copy_mapped +
303					reloc->cmdbuf.offset / sizeof(u32) +
304						g->offset / sizeof(u32);
305			goto patch_reloc;
306		}
307
308		if (!cmdbuf_addr) {
309			cmdbuf_addr = host1x_bo_mmap(cmdbuf);
310
311			if (unlikely(!cmdbuf_addr)) {
312				pr_err("Could not map cmdbuf for relocation\n");
313				return -ENOMEM;
314			}
315		}
316
317		target = cmdbuf_addr + reloc->cmdbuf.offset;
318patch_reloc:
319		*target = reloc_addr;
320	}
321
322	if (cmdbuf_addr)
323		host1x_bo_munmap(cmdbuf, cmdbuf_addr);
324
325	return 0;
326}
327
328static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
329			unsigned int offset)
330{
331	offset *= sizeof(u32);
332
333	if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
334		return false;
335
336	/* relocation shift value validation isn't implemented yet */
337	if (reloc->shift)
338		return false;
339
340	return true;
341}
342
343struct host1x_firewall {
344	struct host1x_job *job;
345	struct device *dev;
346
347	unsigned int num_relocs;
348	struct host1x_reloc *reloc;
349
350	struct host1x_bo *cmdbuf;
351	unsigned int offset;
352
353	u32 words;
354	u32 class;
355	u32 reg;
356	u32 mask;
357	u32 count;
358};
359
360static int check_register(struct host1x_firewall *fw, unsigned long offset)
361{
362	if (!fw->job->is_addr_reg)
363		return 0;
364
365	if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
366		if (!fw->num_relocs)
367			return -EINVAL;
368
369		if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
370			return -EINVAL;
371
372		fw->num_relocs--;
373		fw->reloc++;
374	}
375
376	return 0;
377}
378
379static int check_class(struct host1x_firewall *fw, u32 class)
380{
381	if (!fw->job->is_valid_class) {
382		if (fw->class != class)
383			return -EINVAL;
384	} else {
385		if (!fw->job->is_valid_class(fw->class))
386			return -EINVAL;
387	}
388
389	return 0;
390}
391
392static int check_mask(struct host1x_firewall *fw)
393{
394	u32 mask = fw->mask;
395	u32 reg = fw->reg;
396	int ret;
397
398	while (mask) {
399		if (fw->words == 0)
400			return -EINVAL;
401
402		if (mask & 1) {
403			ret = check_register(fw, reg);
404			if (ret < 0)
405				return ret;
406
407			fw->words--;
408			fw->offset++;
409		}
410		mask >>= 1;
411		reg++;
412	}
413
414	return 0;
415}
416
417static int check_incr(struct host1x_firewall *fw)
418{
419	u32 count = fw->count;
420	u32 reg = fw->reg;
421	int ret;
422
423	while (count) {
424		if (fw->words == 0)
425			return -EINVAL;
426
427		ret = check_register(fw, reg);
428		if (ret < 0)
429			return ret;
430
431		reg++;
432		fw->words--;
433		fw->offset++;
434		count--;
435	}
436
437	return 0;
438}
439
440static int check_nonincr(struct host1x_firewall *fw)
441{
442	u32 count = fw->count;
443	int ret;
444
445	while (count) {
446		if (fw->words == 0)
447			return -EINVAL;
448
449		ret = check_register(fw, fw->reg);
450		if (ret < 0)
451			return ret;
452
453		fw->words--;
454		fw->offset++;
455		count--;
456	}
457
458	return 0;
459}
460
461static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
462{
463	u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
464		(g->offset / sizeof(u32));
465	u32 job_class = fw->class;
466	int err = 0;
467
468	fw->words = g->words;
469	fw->cmdbuf = g->bo;
470	fw->offset = 0;
471
472	while (fw->words && !err) {
473		u32 word = cmdbuf_base[fw->offset];
474		u32 opcode = (word & 0xf0000000) >> 28;
475
476		fw->mask = 0;
477		fw->reg = 0;
478		fw->count = 0;
479		fw->words--;
480		fw->offset++;
481
482		switch (opcode) {
483		case 0:
484			fw->class = word >> 6 & 0x3ff;
485			fw->mask = word & 0x3f;
486			fw->reg = word >> 16 & 0xfff;
487			err = check_class(fw, job_class);
488			if (!err)
489				err = check_mask(fw);
490			if (err)
491				goto out;
492			break;
493		case 1:
494			fw->reg = word >> 16 & 0xfff;
495			fw->count = word & 0xffff;
496			err = check_incr(fw);
497			if (err)
498				goto out;
499			break;
500
501		case 2:
502			fw->reg = word >> 16 & 0xfff;
503			fw->count = word & 0xffff;
504			err = check_nonincr(fw);
505			if (err)
506				goto out;
507			break;
508
509		case 3:
510			fw->mask = word & 0xffff;
511			fw->reg = word >> 16 & 0xfff;
512			err = check_mask(fw);
513			if (err)
514				goto out;
515			break;
516		case 4:
517		case 14:
518			break;
519		default:
520			err = -EINVAL;
521			break;
522		}
523	}
524
525out:
526	return err;
527}
528
529static inline int copy_gathers(struct device *host, struct host1x_job *job,
530			       struct device *dev)
531{
532	struct host1x_firewall fw;
533	size_t size = 0;
534	size_t offset = 0;
535	unsigned int i;
536
537	fw.job = job;
538	fw.dev = dev;
539	fw.reloc = job->relocs;
540	fw.num_relocs = job->num_relocs;
541	fw.class = job->class;
542
543	for (i = 0; i < job->num_gathers; i++) {
544		struct host1x_job_gather *g = &job->gathers[i];
545
546		size += g->words * sizeof(u32);
547	}
548
549	/*
550	 * Try a non-blocking allocation from a higher priority pools first,
551	 * as awaiting for the allocation here is a major performance hit.
552	 */
553	job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
554					       GFP_NOWAIT);
555
556	/* the higher priority allocation failed, try the generic-blocking */
557	if (!job->gather_copy_mapped)
558		job->gather_copy_mapped = dma_alloc_wc(host, size,
559						       &job->gather_copy,
560						       GFP_KERNEL);
561	if (!job->gather_copy_mapped)
562		return -ENOMEM;
563
564	job->gather_copy_size = size;
565
566	for (i = 0; i < job->num_gathers; i++) {
567		struct host1x_job_gather *g = &job->gathers[i];
568		void *gather;
569
570		/* Copy the gather */
571		gather = host1x_bo_mmap(g->bo);
572		memcpy(job->gather_copy_mapped + offset, gather + g->offset,
573		       g->words * sizeof(u32));
574		host1x_bo_munmap(g->bo, gather);
575
576		/* Store the location in the buffer */
577		g->base = job->gather_copy;
578		g->offset = offset;
579
580		/* Validate the job */
581		if (validate(&fw, g))
582			return -EINVAL;
583
584		offset += g->words * sizeof(u32);
585	}
586
587	/* No relocs should remain at this point */
588	if (fw.num_relocs)
589		return -EINVAL;
590
591	return 0;
592}
593
594int host1x_job_pin(struct host1x_job *job, struct device *dev)
595{
596	int err;
597	unsigned int i, j;
598	struct host1x *host = dev_get_drvdata(dev->parent);
599
600	/* pin memory */
601	err = pin_job(host, job);
602	if (err)
603		goto out;
604
605	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
606		err = copy_gathers(host->dev, job, dev);
607		if (err)
608			goto out;
609	}
610
611	/* patch gathers */
612	for (i = 0; i < job->num_gathers; i++) {
613		struct host1x_job_gather *g = &job->gathers[i];
614
615		/* process each gather mem only once */
616		if (g->handled)
617			continue;
618
619		/* copy_gathers() sets gathers base if firewall is enabled */
620		if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
621			g->base = job->gather_addr_phys[i];
622
623		for (j = i + 1; j < job->num_gathers; j++) {
624			if (job->gathers[j].bo == g->bo) {
625				job->gathers[j].handled = true;
626				job->gathers[j].base = g->base;
627			}
628		}
629
630		err = do_relocs(job, g);
631		if (err)
632			break;
633	}
634
635out:
636	if (err)
637		host1x_job_unpin(job);
638	wmb();
639
640	return err;
641}
642EXPORT_SYMBOL(host1x_job_pin);
643
644void host1x_job_unpin(struct host1x_job *job)
645{
646	struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
647	unsigned int i;
648
649	for (i = 0; i < job->num_unpins; i++) {
650		struct host1x_job_unpin_data *unpin = &job->unpins[i];
651		struct device *dev = unpin->dev ?: host->dev;
652		struct sg_table *sgt = unpin->sgt;
653
654		if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
655		    unpin->size && host->domain) {
656			iommu_unmap(host->domain, job->addr_phys[i],
657				    unpin->size);
658			free_iova(&host->iova,
659				iova_pfn(&host->iova, job->addr_phys[i]));
660		}
661
662		if (unpin->dev && sgt)
663			dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
664				     unpin->dir);
665
666		host1x_bo_unpin(dev, unpin->bo, sgt);
667		host1x_bo_put(unpin->bo);
668	}
669
670	job->num_unpins = 0;
671
672	if (job->gather_copy_size)
673		dma_free_wc(host->dev, job->gather_copy_size,
674			    job->gather_copy_mapped, job->gather_copy);
675}
676EXPORT_SYMBOL(host1x_job_unpin);
677
678/*
679 * Debug routine used to dump job entries
680 */
681void host1x_job_dump(struct device *dev, struct host1x_job *job)
682{
683	dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt_id);
684	dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
685	dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
686	dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
687	dev_dbg(dev, "    NUM_SLOTS   %d\n", job->num_slots);
688	dev_dbg(dev, "    NUM_HANDLES %d\n", job->num_unpins);
689}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Tegra host1x Job
  4 *
  5 * Copyright (c) 2010-2015, NVIDIA Corporation.
  6 */
  7
  8#include <linux/dma-mapping.h>
  9#include <linux/err.h>
 10#include <linux/host1x.h>
 11#include <linux/iommu.h>
 12#include <linux/kref.h>
 13#include <linux/module.h>
 14#include <linux/scatterlist.h>
 15#include <linux/slab.h>
 16#include <linux/vmalloc.h>
 17#include <trace/events/host1x.h>
 18
 19#include "channel.h"
 20#include "dev.h"
 21#include "job.h"
 22#include "syncpt.h"
 23
 24#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
 25
 26struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
 27				    u32 num_cmdbufs, u32 num_relocs)
 28{
 29	struct host1x_job *job = NULL;
 30	unsigned int num_unpins = num_relocs;
 31	u64 total;
 32	void *mem;
 33
 34	if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
 35		num_unpins += num_cmdbufs;
 36
 37	/* Check that we're not going to overflow */
 38	total = sizeof(struct host1x_job) +
 39		(u64)num_relocs * sizeof(struct host1x_reloc) +
 40		(u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
 41		(u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
 42		(u64)num_unpins * sizeof(dma_addr_t) +
 43		(u64)num_unpins * sizeof(u32 *);
 44	if (total > ULONG_MAX)
 45		return NULL;
 46
 47	mem = job = kzalloc(total, GFP_KERNEL);
 48	if (!job)
 49		return NULL;
 50
 51	kref_init(&job->ref);
 52	job->channel = ch;
 53
 54	/* Redistribute memory to the structs  */
 55	mem += sizeof(struct host1x_job);
 56	job->relocs = num_relocs ? mem : NULL;
 57	mem += num_relocs * sizeof(struct host1x_reloc);
 58	job->unpins = num_unpins ? mem : NULL;
 59	mem += num_unpins * sizeof(struct host1x_job_unpin_data);
 60	job->gathers = num_cmdbufs ? mem : NULL;
 61	mem += num_cmdbufs * sizeof(struct host1x_job_gather);
 62	job->addr_phys = num_unpins ? mem : NULL;
 63
 64	job->reloc_addr_phys = job->addr_phys;
 65	job->gather_addr_phys = &job->addr_phys[num_relocs];
 66
 67	return job;
 68}
 69EXPORT_SYMBOL(host1x_job_alloc);
 70
 71struct host1x_job *host1x_job_get(struct host1x_job *job)
 72{
 73	kref_get(&job->ref);
 74	return job;
 75}
 76EXPORT_SYMBOL(host1x_job_get);
 77
 78static void job_free(struct kref *ref)
 79{
 80	struct host1x_job *job = container_of(ref, struct host1x_job, ref);
 81
 82	if (job->syncpt)
 83		host1x_syncpt_put(job->syncpt);
 84
 85	kfree(job);
 86}
 87
 88void host1x_job_put(struct host1x_job *job)
 89{
 90	kref_put(&job->ref, job_free);
 91}
 92EXPORT_SYMBOL(host1x_job_put);
 93
 94void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
 95			   unsigned int words, unsigned int offset)
 96{
 97	struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
 98
 99	gather->words = words;
100	gather->bo = bo;
101	gather->offset = offset;
102
103	job->num_gathers++;
104}
105EXPORT_SYMBOL(host1x_job_add_gather);
106
107static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
108{
109	struct host1x_client *client = job->client;
110	struct device *dev = client->dev;
111	struct host1x_job_gather *g;
112	struct iommu_domain *domain;
113	unsigned int i;
114	int err;
115
116	domain = iommu_get_domain_for_dev(dev);
117	job->num_unpins = 0;
118
119	for (i = 0; i < job->num_relocs; i++) {
120		struct host1x_reloc *reloc = &job->relocs[i];
121		dma_addr_t phys_addr, *phys;
122		struct sg_table *sgt;
123
124		reloc->target.bo = host1x_bo_get(reloc->target.bo);
125		if (!reloc->target.bo) {
126			err = -EINVAL;
127			goto unpin;
128		}
129
130		/*
131		 * If the client device is not attached to an IOMMU, the
132		 * physical address of the buffer object can be used.
133		 *
134		 * Similarly, when an IOMMU domain is shared between all
135		 * host1x clients, the IOVA is already available, so no
136		 * need to map the buffer object again.
137		 *
138		 * XXX Note that this isn't always safe to do because it
139		 * relies on an assumption that no cache maintenance is
140		 * needed on the buffer objects.
141		 */
142		if (!domain || client->group)
143			phys = &phys_addr;
144		else
145			phys = NULL;
146
147		sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
148		if (IS_ERR(sgt)) {
149			err = PTR_ERR(sgt);
150			goto unpin;
151		}
152
153		if (sgt) {
154			unsigned long mask = HOST1X_RELOC_READ |
155					     HOST1X_RELOC_WRITE;
156			enum dma_data_direction dir;
157
158			switch (reloc->flags & mask) {
159			case HOST1X_RELOC_READ:
160				dir = DMA_TO_DEVICE;
161				break;
162
163			case HOST1X_RELOC_WRITE:
164				dir = DMA_FROM_DEVICE;
165				break;
166
167			case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
168				dir = DMA_BIDIRECTIONAL;
169				break;
170
171			default:
172				err = -EINVAL;
173				goto unpin;
174			}
175
176			err = dma_map_sgtable(dev, sgt, dir, 0);
177			if (err)
 
178				goto unpin;
 
179
180			job->unpins[job->num_unpins].dev = dev;
181			job->unpins[job->num_unpins].dir = dir;
182			phys_addr = sg_dma_address(sgt->sgl);
183		}
184
185		job->addr_phys[job->num_unpins] = phys_addr;
186		job->unpins[job->num_unpins].bo = reloc->target.bo;
187		job->unpins[job->num_unpins].sgt = sgt;
188		job->num_unpins++;
189	}
190
191	/*
192	 * We will copy gathers BO content later, so there is no need to
193	 * hold and pin them.
194	 */
195	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
196		return 0;
197
198	for (i = 0; i < job->num_gathers; i++) {
199		size_t gather_size = 0;
200		struct scatterlist *sg;
201		struct sg_table *sgt;
202		dma_addr_t phys_addr;
203		unsigned long shift;
204		struct iova *alloc;
205		dma_addr_t *phys;
206		unsigned int j;
207
208		g = &job->gathers[i];
209		g->bo = host1x_bo_get(g->bo);
210		if (!g->bo) {
211			err = -EINVAL;
212			goto unpin;
213		}
214
215		/**
216		 * If the host1x is not attached to an IOMMU, there is no need
217		 * to map the buffer object for the host1x, since the physical
218		 * address can simply be used.
219		 */
220		if (!iommu_get_domain_for_dev(host->dev))
221			phys = &phys_addr;
222		else
223			phys = NULL;
224
225		sgt = host1x_bo_pin(host->dev, g->bo, phys);
226		if (IS_ERR(sgt)) {
227			err = PTR_ERR(sgt);
228			goto put;
229		}
230
231		if (host->domain) {
232			for_each_sgtable_sg(sgt, sg, j)
233				gather_size += sg->length;
234			gather_size = iova_align(&host->iova, gather_size);
235
236			shift = iova_shift(&host->iova);
237			alloc = alloc_iova(&host->iova, gather_size >> shift,
238					   host->iova_end >> shift, true);
239			if (!alloc) {
240				err = -ENOMEM;
241				goto put;
242			}
243
244			err = iommu_map_sgtable(host->domain,
245					iova_dma_addr(&host->iova, alloc),
246					sgt, IOMMU_READ);
247			if (err == 0) {
248				__free_iova(&host->iova, alloc);
249				err = -EINVAL;
250				goto put;
251			}
252
253			job->unpins[job->num_unpins].size = gather_size;
254			phys_addr = iova_dma_addr(&host->iova, alloc);
255		} else if (sgt) {
256			err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
257			if (err)
 
 
258				goto put;
 
259
260			job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
261			job->unpins[job->num_unpins].dev = host->dev;
262			phys_addr = sg_dma_address(sgt->sgl);
263		}
264
265		job->addr_phys[job->num_unpins] = phys_addr;
266		job->gather_addr_phys[i] = phys_addr;
267
268		job->unpins[job->num_unpins].bo = g->bo;
269		job->unpins[job->num_unpins].sgt = sgt;
270		job->num_unpins++;
271	}
272
273	return 0;
274
275put:
276	host1x_bo_put(g->bo);
277unpin:
278	host1x_job_unpin(job);
279	return err;
280}
281
282static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
283{
284	void *cmdbuf_addr = NULL;
285	struct host1x_bo *cmdbuf = g->bo;
286	unsigned int i;
287
288	/* pin & patch the relocs for one gather */
289	for (i = 0; i < job->num_relocs; i++) {
290		struct host1x_reloc *reloc = &job->relocs[i];
291		u32 reloc_addr = (job->reloc_addr_phys[i] +
292				  reloc->target.offset) >> reloc->shift;
293		u32 *target;
294
295		/* skip all other gathers */
296		if (cmdbuf != reloc->cmdbuf.bo)
297			continue;
298
299		if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
300			target = (u32 *)job->gather_copy_mapped +
301					reloc->cmdbuf.offset / sizeof(u32) +
302						g->offset / sizeof(u32);
303			goto patch_reloc;
304		}
305
306		if (!cmdbuf_addr) {
307			cmdbuf_addr = host1x_bo_mmap(cmdbuf);
308
309			if (unlikely(!cmdbuf_addr)) {
310				pr_err("Could not map cmdbuf for relocation\n");
311				return -ENOMEM;
312			}
313		}
314
315		target = cmdbuf_addr + reloc->cmdbuf.offset;
316patch_reloc:
317		*target = reloc_addr;
318	}
319
320	if (cmdbuf_addr)
321		host1x_bo_munmap(cmdbuf, cmdbuf_addr);
322
323	return 0;
324}
325
326static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
327			unsigned int offset)
328{
329	offset *= sizeof(u32);
330
331	if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
332		return false;
333
334	/* relocation shift value validation isn't implemented yet */
335	if (reloc->shift)
336		return false;
337
338	return true;
339}
340
341struct host1x_firewall {
342	struct host1x_job *job;
343	struct device *dev;
344
345	unsigned int num_relocs;
346	struct host1x_reloc *reloc;
347
348	struct host1x_bo *cmdbuf;
349	unsigned int offset;
350
351	u32 words;
352	u32 class;
353	u32 reg;
354	u32 mask;
355	u32 count;
356};
357
358static int check_register(struct host1x_firewall *fw, unsigned long offset)
359{
360	if (!fw->job->is_addr_reg)
361		return 0;
362
363	if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
364		if (!fw->num_relocs)
365			return -EINVAL;
366
367		if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
368			return -EINVAL;
369
370		fw->num_relocs--;
371		fw->reloc++;
372	}
373
374	return 0;
375}
376
377static int check_class(struct host1x_firewall *fw, u32 class)
378{
379	if (!fw->job->is_valid_class) {
380		if (fw->class != class)
381			return -EINVAL;
382	} else {
383		if (!fw->job->is_valid_class(fw->class))
384			return -EINVAL;
385	}
386
387	return 0;
388}
389
390static int check_mask(struct host1x_firewall *fw)
391{
392	u32 mask = fw->mask;
393	u32 reg = fw->reg;
394	int ret;
395
396	while (mask) {
397		if (fw->words == 0)
398			return -EINVAL;
399
400		if (mask & 1) {
401			ret = check_register(fw, reg);
402			if (ret < 0)
403				return ret;
404
405			fw->words--;
406			fw->offset++;
407		}
408		mask >>= 1;
409		reg++;
410	}
411
412	return 0;
413}
414
415static int check_incr(struct host1x_firewall *fw)
416{
417	u32 count = fw->count;
418	u32 reg = fw->reg;
419	int ret;
420
421	while (count) {
422		if (fw->words == 0)
423			return -EINVAL;
424
425		ret = check_register(fw, reg);
426		if (ret < 0)
427			return ret;
428
429		reg++;
430		fw->words--;
431		fw->offset++;
432		count--;
433	}
434
435	return 0;
436}
437
438static int check_nonincr(struct host1x_firewall *fw)
439{
440	u32 count = fw->count;
441	int ret;
442
443	while (count) {
444		if (fw->words == 0)
445			return -EINVAL;
446
447		ret = check_register(fw, fw->reg);
448		if (ret < 0)
449			return ret;
450
451		fw->words--;
452		fw->offset++;
453		count--;
454	}
455
456	return 0;
457}
458
459static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
460{
461	u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
462		(g->offset / sizeof(u32));
463	u32 job_class = fw->class;
464	int err = 0;
465
466	fw->words = g->words;
467	fw->cmdbuf = g->bo;
468	fw->offset = 0;
469
470	while (fw->words && !err) {
471		u32 word = cmdbuf_base[fw->offset];
472		u32 opcode = (word & 0xf0000000) >> 28;
473
474		fw->mask = 0;
475		fw->reg = 0;
476		fw->count = 0;
477		fw->words--;
478		fw->offset++;
479
480		switch (opcode) {
481		case 0:
482			fw->class = word >> 6 & 0x3ff;
483			fw->mask = word & 0x3f;
484			fw->reg = word >> 16 & 0xfff;
485			err = check_class(fw, job_class);
486			if (!err)
487				err = check_mask(fw);
488			if (err)
489				goto out;
490			break;
491		case 1:
492			fw->reg = word >> 16 & 0xfff;
493			fw->count = word & 0xffff;
494			err = check_incr(fw);
495			if (err)
496				goto out;
497			break;
498
499		case 2:
500			fw->reg = word >> 16 & 0xfff;
501			fw->count = word & 0xffff;
502			err = check_nonincr(fw);
503			if (err)
504				goto out;
505			break;
506
507		case 3:
508			fw->mask = word & 0xffff;
509			fw->reg = word >> 16 & 0xfff;
510			err = check_mask(fw);
511			if (err)
512				goto out;
513			break;
514		case 4:
515		case 14:
516			break;
517		default:
518			err = -EINVAL;
519			break;
520		}
521	}
522
523out:
524	return err;
525}
526
527static inline int copy_gathers(struct device *host, struct host1x_job *job,
528			       struct device *dev)
529{
530	struct host1x_firewall fw;
531	size_t size = 0;
532	size_t offset = 0;
533	unsigned int i;
534
535	fw.job = job;
536	fw.dev = dev;
537	fw.reloc = job->relocs;
538	fw.num_relocs = job->num_relocs;
539	fw.class = job->class;
540
541	for (i = 0; i < job->num_gathers; i++) {
542		struct host1x_job_gather *g = &job->gathers[i];
543
544		size += g->words * sizeof(u32);
545	}
546
547	/*
548	 * Try a non-blocking allocation from a higher priority pools first,
549	 * as awaiting for the allocation here is a major performance hit.
550	 */
551	job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
552					       GFP_NOWAIT);
553
554	/* the higher priority allocation failed, try the generic-blocking */
555	if (!job->gather_copy_mapped)
556		job->gather_copy_mapped = dma_alloc_wc(host, size,
557						       &job->gather_copy,
558						       GFP_KERNEL);
559	if (!job->gather_copy_mapped)
560		return -ENOMEM;
561
562	job->gather_copy_size = size;
563
564	for (i = 0; i < job->num_gathers; i++) {
565		struct host1x_job_gather *g = &job->gathers[i];
566		void *gather;
567
568		/* Copy the gather */
569		gather = host1x_bo_mmap(g->bo);
570		memcpy(job->gather_copy_mapped + offset, gather + g->offset,
571		       g->words * sizeof(u32));
572		host1x_bo_munmap(g->bo, gather);
573
574		/* Store the location in the buffer */
575		g->base = job->gather_copy;
576		g->offset = offset;
577
578		/* Validate the job */
579		if (validate(&fw, g))
580			return -EINVAL;
581
582		offset += g->words * sizeof(u32);
583	}
584
585	/* No relocs should remain at this point */
586	if (fw.num_relocs)
587		return -EINVAL;
588
589	return 0;
590}
591
592int host1x_job_pin(struct host1x_job *job, struct device *dev)
593{
594	int err;
595	unsigned int i, j;
596	struct host1x *host = dev_get_drvdata(dev->parent);
597
598	/* pin memory */
599	err = pin_job(host, job);
600	if (err)
601		goto out;
602
603	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
604		err = copy_gathers(host->dev, job, dev);
605		if (err)
606			goto out;
607	}
608
609	/* patch gathers */
610	for (i = 0; i < job->num_gathers; i++) {
611		struct host1x_job_gather *g = &job->gathers[i];
612
613		/* process each gather mem only once */
614		if (g->handled)
615			continue;
616
617		/* copy_gathers() sets gathers base if firewall is enabled */
618		if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
619			g->base = job->gather_addr_phys[i];
620
621		for (j = i + 1; j < job->num_gathers; j++) {
622			if (job->gathers[j].bo == g->bo) {
623				job->gathers[j].handled = true;
624				job->gathers[j].base = g->base;
625			}
626		}
627
628		err = do_relocs(job, g);
629		if (err)
630			break;
631	}
632
633out:
634	if (err)
635		host1x_job_unpin(job);
636	wmb();
637
638	return err;
639}
640EXPORT_SYMBOL(host1x_job_pin);
641
642void host1x_job_unpin(struct host1x_job *job)
643{
644	struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
645	unsigned int i;
646
647	for (i = 0; i < job->num_unpins; i++) {
648		struct host1x_job_unpin_data *unpin = &job->unpins[i];
649		struct device *dev = unpin->dev ?: host->dev;
650		struct sg_table *sgt = unpin->sgt;
651
652		if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
653		    unpin->size && host->domain) {
654			iommu_unmap(host->domain, job->addr_phys[i],
655				    unpin->size);
656			free_iova(&host->iova,
657				iova_pfn(&host->iova, job->addr_phys[i]));
658		}
659
660		if (unpin->dev && sgt)
661			dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
 
662
663		host1x_bo_unpin(dev, unpin->bo, sgt);
664		host1x_bo_put(unpin->bo);
665	}
666
667	job->num_unpins = 0;
668
669	if (job->gather_copy_size)
670		dma_free_wc(host->dev, job->gather_copy_size,
671			    job->gather_copy_mapped, job->gather_copy);
672}
673EXPORT_SYMBOL(host1x_job_unpin);
674
675/*
676 * Debug routine used to dump job entries
677 */
678void host1x_job_dump(struct device *dev, struct host1x_job *job)
679{
680	dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt->id);
681	dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
682	dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
683	dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
684	dev_dbg(dev, "    NUM_SLOTS   %d\n", job->num_slots);
685	dev_dbg(dev, "    NUM_HANDLES %d\n", job->num_unpins);
686}