Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Tegra host1x Job
  4 *
  5 * Copyright (c) 2010-2015, NVIDIA Corporation.
  6 */
  7
  8#include <linux/dma-mapping.h>
  9#include <linux/err.h>
 10#include <linux/host1x.h>
 11#include <linux/iommu.h>
 12#include <linux/kref.h>
 13#include <linux/module.h>
 14#include <linux/scatterlist.h>
 15#include <linux/slab.h>
 16#include <linux/vmalloc.h>
 17#include <trace/events/host1x.h>
 18
 19#include "channel.h"
 20#include "dev.h"
 21#include "job.h"
 22#include "syncpt.h"
 23
 24#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
 25
 26struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
 27				    u32 num_cmdbufs, u32 num_relocs)
 
 28{
 29	struct host1x_job *job = NULL;
 30	unsigned int num_unpins = num_relocs;
 
 31	u64 total;
 32	void *mem;
 33
 34	if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
 
 
 35		num_unpins += num_cmdbufs;
 36
 37	/* Check that we're not going to overflow */
 38	total = sizeof(struct host1x_job) +
 39		(u64)num_relocs * sizeof(struct host1x_reloc) +
 40		(u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
 41		(u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
 42		(u64)num_unpins * sizeof(dma_addr_t) +
 43		(u64)num_unpins * sizeof(u32 *);
 44	if (total > ULONG_MAX)
 45		return NULL;
 46
 47	mem = job = kzalloc(total, GFP_KERNEL);
 48	if (!job)
 49		return NULL;
 50
 
 
 51	kref_init(&job->ref);
 52	job->channel = ch;
 53
 54	/* Redistribute memory to the structs  */
 55	mem += sizeof(struct host1x_job);
 56	job->relocs = num_relocs ? mem : NULL;
 57	mem += num_relocs * sizeof(struct host1x_reloc);
 58	job->unpins = num_unpins ? mem : NULL;
 59	mem += num_unpins * sizeof(struct host1x_job_unpin_data);
 60	job->gathers = num_cmdbufs ? mem : NULL;
 61	mem += num_cmdbufs * sizeof(struct host1x_job_gather);
 62	job->addr_phys = num_unpins ? mem : NULL;
 63
 64	job->reloc_addr_phys = job->addr_phys;
 65	job->gather_addr_phys = &job->addr_phys[num_relocs];
 66
 67	return job;
 68}
 69EXPORT_SYMBOL(host1x_job_alloc);
 70
 71struct host1x_job *host1x_job_get(struct host1x_job *job)
 72{
 73	kref_get(&job->ref);
 74	return job;
 75}
 76EXPORT_SYMBOL(host1x_job_get);
 77
 78static void job_free(struct kref *ref)
 79{
 80	struct host1x_job *job = container_of(ref, struct host1x_job, ref);
 81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 82	kfree(job);
 83}
 84
 85void host1x_job_put(struct host1x_job *job)
 86{
 87	kref_put(&job->ref, job_free);
 88}
 89EXPORT_SYMBOL(host1x_job_put);
 90
 91void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
 92			   unsigned int words, unsigned int offset)
 93{
 94	struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
 95
 96	gather->words = words;
 97	gather->bo = bo;
 98	gather->offset = offset;
 99
100	job->num_gathers++;
101}
102EXPORT_SYMBOL(host1x_job_add_gather);
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
105{
 
106	struct host1x_client *client = job->client;
107	struct device *dev = client->dev;
108	struct host1x_job_gather *g;
109	struct iommu_domain *domain;
110	unsigned int i;
111	int err;
112
113	domain = iommu_get_domain_for_dev(dev);
114	job->num_unpins = 0;
115
116	for (i = 0; i < job->num_relocs; i++) {
117		struct host1x_reloc *reloc = &job->relocs[i];
118		dma_addr_t phys_addr, *phys;
119		struct sg_table *sgt;
 
120
121		reloc->target.bo = host1x_bo_get(reloc->target.bo);
122		if (!reloc->target.bo) {
123			err = -EINVAL;
124			goto unpin;
125		}
126
127		/*
128		 * If the client device is not attached to an IOMMU, the
129		 * physical address of the buffer object can be used.
130		 *
131		 * Similarly, when an IOMMU domain is shared between all
132		 * host1x clients, the IOVA is already available, so no
133		 * need to map the buffer object again.
134		 *
135		 * XXX Note that this isn't always safe to do because it
136		 * relies on an assumption that no cache maintenance is
137		 * needed on the buffer objects.
138		 */
139		if (!domain || client->group)
140			phys = &phys_addr;
141		else
142			phys = NULL;
143
144		sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
145		if (IS_ERR(sgt)) {
146			err = PTR_ERR(sgt);
147			goto unpin;
148		}
149
150		if (sgt) {
151			unsigned long mask = HOST1X_RELOC_READ |
152					     HOST1X_RELOC_WRITE;
153			enum dma_data_direction dir;
154
155			switch (reloc->flags & mask) {
156			case HOST1X_RELOC_READ:
157				dir = DMA_TO_DEVICE;
158				break;
159
160			case HOST1X_RELOC_WRITE:
161				dir = DMA_FROM_DEVICE;
162				break;
163
164			case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
165				dir = DMA_BIDIRECTIONAL;
166				break;
167
168			default:
169				err = -EINVAL;
170				goto unpin;
171			}
172
173			err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
174			if (!err) {
175				err = -ENOMEM;
176				goto unpin;
177			}
178
179			job->unpins[job->num_unpins].dev = dev;
180			job->unpins[job->num_unpins].dir = dir;
181			phys_addr = sg_dma_address(sgt->sgl);
182		}
183
184		job->addr_phys[job->num_unpins] = phys_addr;
185		job->unpins[job->num_unpins].bo = reloc->target.bo;
186		job->unpins[job->num_unpins].sgt = sgt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187		job->num_unpins++;
188	}
189
190	/*
191	 * We will copy gathers BO content later, so there is no need to
192	 * hold and pin them.
193	 */
194	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
195		return 0;
196
197	for (i = 0; i < job->num_gathers; i++) {
 
198		size_t gather_size = 0;
199		struct scatterlist *sg;
200		struct sg_table *sgt;
201		dma_addr_t phys_addr;
202		unsigned long shift;
203		struct iova *alloc;
204		dma_addr_t *phys;
205		unsigned int j;
206
207		g = &job->gathers[i];
 
 
 
 
208		g->bo = host1x_bo_get(g->bo);
209		if (!g->bo) {
210			err = -EINVAL;
211			goto unpin;
212		}
213
214		/**
215		 * If the host1x is not attached to an IOMMU, there is no need
216		 * to map the buffer object for the host1x, since the physical
217		 * address can simply be used.
218		 */
219		if (!iommu_get_domain_for_dev(host->dev))
220			phys = &phys_addr;
221		else
222			phys = NULL;
223
224		sgt = host1x_bo_pin(host->dev, g->bo, phys);
225		if (IS_ERR(sgt)) {
226			err = PTR_ERR(sgt);
227			goto put;
228		}
229
230		if (host->domain) {
231			for_each_sg(sgt->sgl, sg, sgt->nents, j)
232				gather_size += sg->length;
 
233			gather_size = iova_align(&host->iova, gather_size);
234
235			shift = iova_shift(&host->iova);
236			alloc = alloc_iova(&host->iova, gather_size >> shift,
237					   host->iova_end >> shift, true);
238			if (!alloc) {
239				err = -ENOMEM;
240				goto put;
241			}
242
243			err = iommu_map_sg(host->domain,
244					iova_dma_addr(&host->iova, alloc),
245					sgt->sgl, sgt->nents, IOMMU_READ);
246			if (err == 0) {
247				__free_iova(&host->iova, alloc);
248				err = -EINVAL;
249				goto put;
250			}
251
252			job->unpins[job->num_unpins].size = gather_size;
253			phys_addr = iova_dma_addr(&host->iova, alloc);
254		} else if (sgt) {
255			err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
256					 DMA_TO_DEVICE);
257			if (!err) {
258				err = -ENOMEM;
259				goto put;
260			}
261
262			job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
263			job->unpins[job->num_unpins].dev = host->dev;
264			phys_addr = sg_dma_address(sgt->sgl);
265		}
266
267		job->addr_phys[job->num_unpins] = phys_addr;
268		job->gather_addr_phys[i] = phys_addr;
269
270		job->unpins[job->num_unpins].bo = g->bo;
271		job->unpins[job->num_unpins].sgt = sgt;
272		job->num_unpins++;
 
 
273	}
274
275	return 0;
276
277put:
278	host1x_bo_put(g->bo);
279unpin:
280	host1x_job_unpin(job);
281	return err;
282}
283
284static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
285{
286	void *cmdbuf_addr = NULL;
287	struct host1x_bo *cmdbuf = g->bo;
288	unsigned int i;
289
290	/* pin & patch the relocs for one gather */
291	for (i = 0; i < job->num_relocs; i++) {
292		struct host1x_reloc *reloc = &job->relocs[i];
293		u32 reloc_addr = (job->reloc_addr_phys[i] +
294				  reloc->target.offset) >> reloc->shift;
295		u32 *target;
296
297		/* skip all other gathers */
298		if (cmdbuf != reloc->cmdbuf.bo)
299			continue;
300
301		if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
302			target = (u32 *)job->gather_copy_mapped +
303					reloc->cmdbuf.offset / sizeof(u32) +
304						g->offset / sizeof(u32);
305			goto patch_reloc;
306		}
307
308		if (!cmdbuf_addr) {
309			cmdbuf_addr = host1x_bo_mmap(cmdbuf);
310
311			if (unlikely(!cmdbuf_addr)) {
312				pr_err("Could not map cmdbuf for relocation\n");
313				return -ENOMEM;
314			}
315		}
316
317		target = cmdbuf_addr + reloc->cmdbuf.offset;
318patch_reloc:
319		*target = reloc_addr;
320	}
321
322	if (cmdbuf_addr)
323		host1x_bo_munmap(cmdbuf, cmdbuf_addr);
324
325	return 0;
326}
327
328static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
329			unsigned int offset)
330{
331	offset *= sizeof(u32);
332
333	if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
334		return false;
335
336	/* relocation shift value validation isn't implemented yet */
337	if (reloc->shift)
338		return false;
339
340	return true;
341}
342
343struct host1x_firewall {
344	struct host1x_job *job;
345	struct device *dev;
346
347	unsigned int num_relocs;
348	struct host1x_reloc *reloc;
349
350	struct host1x_bo *cmdbuf;
351	unsigned int offset;
352
353	u32 words;
354	u32 class;
355	u32 reg;
356	u32 mask;
357	u32 count;
358};
359
360static int check_register(struct host1x_firewall *fw, unsigned long offset)
361{
362	if (!fw->job->is_addr_reg)
363		return 0;
364
365	if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
366		if (!fw->num_relocs)
367			return -EINVAL;
368
369		if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
370			return -EINVAL;
371
372		fw->num_relocs--;
373		fw->reloc++;
374	}
375
376	return 0;
377}
378
379static int check_class(struct host1x_firewall *fw, u32 class)
380{
381	if (!fw->job->is_valid_class) {
382		if (fw->class != class)
383			return -EINVAL;
384	} else {
385		if (!fw->job->is_valid_class(fw->class))
386			return -EINVAL;
387	}
388
389	return 0;
390}
391
392static int check_mask(struct host1x_firewall *fw)
393{
394	u32 mask = fw->mask;
395	u32 reg = fw->reg;
396	int ret;
397
398	while (mask) {
399		if (fw->words == 0)
400			return -EINVAL;
401
402		if (mask & 1) {
403			ret = check_register(fw, reg);
404			if (ret < 0)
405				return ret;
406
407			fw->words--;
408			fw->offset++;
409		}
410		mask >>= 1;
411		reg++;
412	}
413
414	return 0;
415}
416
417static int check_incr(struct host1x_firewall *fw)
418{
419	u32 count = fw->count;
420	u32 reg = fw->reg;
421	int ret;
422
423	while (count) {
424		if (fw->words == 0)
425			return -EINVAL;
426
427		ret = check_register(fw, reg);
428		if (ret < 0)
429			return ret;
430
431		reg++;
432		fw->words--;
433		fw->offset++;
434		count--;
435	}
436
437	return 0;
438}
439
440static int check_nonincr(struct host1x_firewall *fw)
441{
442	u32 count = fw->count;
443	int ret;
444
445	while (count) {
446		if (fw->words == 0)
447			return -EINVAL;
448
449		ret = check_register(fw, fw->reg);
450		if (ret < 0)
451			return ret;
452
453		fw->words--;
454		fw->offset++;
455		count--;
456	}
457
458	return 0;
459}
460
461static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
462{
463	u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
464		(g->offset / sizeof(u32));
465	u32 job_class = fw->class;
466	int err = 0;
467
468	fw->words = g->words;
469	fw->cmdbuf = g->bo;
470	fw->offset = 0;
471
472	while (fw->words && !err) {
473		u32 word = cmdbuf_base[fw->offset];
474		u32 opcode = (word & 0xf0000000) >> 28;
475
476		fw->mask = 0;
477		fw->reg = 0;
478		fw->count = 0;
479		fw->words--;
480		fw->offset++;
481
482		switch (opcode) {
483		case 0:
484			fw->class = word >> 6 & 0x3ff;
485			fw->mask = word & 0x3f;
486			fw->reg = word >> 16 & 0xfff;
487			err = check_class(fw, job_class);
488			if (!err)
489				err = check_mask(fw);
490			if (err)
491				goto out;
492			break;
493		case 1:
494			fw->reg = word >> 16 & 0xfff;
495			fw->count = word & 0xffff;
496			err = check_incr(fw);
497			if (err)
498				goto out;
499			break;
500
501		case 2:
502			fw->reg = word >> 16 & 0xfff;
503			fw->count = word & 0xffff;
504			err = check_nonincr(fw);
505			if (err)
506				goto out;
507			break;
508
509		case 3:
510			fw->mask = word & 0xffff;
511			fw->reg = word >> 16 & 0xfff;
512			err = check_mask(fw);
513			if (err)
514				goto out;
515			break;
516		case 4:
517		case 14:
518			break;
519		default:
520			err = -EINVAL;
521			break;
522		}
523	}
524
525out:
526	return err;
527}
528
529static inline int copy_gathers(struct device *host, struct host1x_job *job,
530			       struct device *dev)
531{
532	struct host1x_firewall fw;
533	size_t size = 0;
534	size_t offset = 0;
535	unsigned int i;
536
537	fw.job = job;
538	fw.dev = dev;
539	fw.reloc = job->relocs;
540	fw.num_relocs = job->num_relocs;
541	fw.class = job->class;
542
543	for (i = 0; i < job->num_gathers; i++) {
544		struct host1x_job_gather *g = &job->gathers[i];
 
 
 
 
 
545
546		size += g->words * sizeof(u32);
547	}
548
549	/*
550	 * Try a non-blocking allocation from a higher priority pools first,
551	 * as awaiting for the allocation here is a major performance hit.
552	 */
553	job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
554					       GFP_NOWAIT);
555
556	/* the higher priority allocation failed, try the generic-blocking */
557	if (!job->gather_copy_mapped)
558		job->gather_copy_mapped = dma_alloc_wc(host, size,
559						       &job->gather_copy,
560						       GFP_KERNEL);
561	if (!job->gather_copy_mapped)
562		return -ENOMEM;
563
564	job->gather_copy_size = size;
565
566	for (i = 0; i < job->num_gathers; i++) {
567		struct host1x_job_gather *g = &job->gathers[i];
568		void *gather;
569
 
 
 
 
570		/* Copy the gather */
571		gather = host1x_bo_mmap(g->bo);
572		memcpy(job->gather_copy_mapped + offset, gather + g->offset,
573		       g->words * sizeof(u32));
574		host1x_bo_munmap(g->bo, gather);
575
576		/* Store the location in the buffer */
577		g->base = job->gather_copy;
578		g->offset = offset;
579
580		/* Validate the job */
581		if (validate(&fw, g))
582			return -EINVAL;
583
584		offset += g->words * sizeof(u32);
585	}
586
587	/* No relocs should remain at this point */
588	if (fw.num_relocs)
589		return -EINVAL;
590
591	return 0;
592}
593
594int host1x_job_pin(struct host1x_job *job, struct device *dev)
595{
596	int err;
597	unsigned int i, j;
598	struct host1x *host = dev_get_drvdata(dev->parent);
599
600	/* pin memory */
601	err = pin_job(host, job);
602	if (err)
603		goto out;
604
605	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
606		err = copy_gathers(host->dev, job, dev);
607		if (err)
608			goto out;
609	}
610
611	/* patch gathers */
612	for (i = 0; i < job->num_gathers; i++) {
613		struct host1x_job_gather *g = &job->gathers[i];
 
 
 
 
614
615		/* process each gather mem only once */
616		if (g->handled)
617			continue;
618
619		/* copy_gathers() sets gathers base if firewall is enabled */
620		if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
621			g->base = job->gather_addr_phys[i];
622
623		for (j = i + 1; j < job->num_gathers; j++) {
624			if (job->gathers[j].bo == g->bo) {
625				job->gathers[j].handled = true;
626				job->gathers[j].base = g->base;
 
627			}
628		}
629
630		err = do_relocs(job, g);
631		if (err)
632			break;
633	}
634
635out:
636	if (err)
637		host1x_job_unpin(job);
638	wmb();
639
640	return err;
641}
642EXPORT_SYMBOL(host1x_job_pin);
643
644void host1x_job_unpin(struct host1x_job *job)
645{
646	struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
647	unsigned int i;
648
649	for (i = 0; i < job->num_unpins; i++) {
650		struct host1x_job_unpin_data *unpin = &job->unpins[i];
651		struct device *dev = unpin->dev ?: host->dev;
652		struct sg_table *sgt = unpin->sgt;
653
654		if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
655		    unpin->size && host->domain) {
656			iommu_unmap(host->domain, job->addr_phys[i],
657				    unpin->size);
658			free_iova(&host->iova,
659				iova_pfn(&host->iova, job->addr_phys[i]));
660		}
661
662		if (unpin->dev && sgt)
663			dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
664				     unpin->dir);
665
666		host1x_bo_unpin(dev, unpin->bo, sgt);
667		host1x_bo_put(unpin->bo);
668	}
669
670	job->num_unpins = 0;
671
672	if (job->gather_copy_size)
673		dma_free_wc(host->dev, job->gather_copy_size,
674			    job->gather_copy_mapped, job->gather_copy);
675}
676EXPORT_SYMBOL(host1x_job_unpin);
677
678/*
679 * Debug routine used to dump job entries
680 */
681void host1x_job_dump(struct device *dev, struct host1x_job *job)
682{
683	dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt_id);
684	dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
685	dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
686	dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
687	dev_dbg(dev, "    NUM_SLOTS   %d\n", job->num_slots);
688	dev_dbg(dev, "    NUM_HANDLES %d\n", job->num_unpins);
689}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Tegra host1x Job
  4 *
  5 * Copyright (c) 2010-2015, NVIDIA Corporation.
  6 */
  7
  8#include <linux/dma-mapping.h>
  9#include <linux/err.h>
 10#include <linux/host1x.h>
 11#include <linux/iommu.h>
 12#include <linux/kref.h>
 13#include <linux/module.h>
 14#include <linux/scatterlist.h>
 15#include <linux/slab.h>
 16#include <linux/vmalloc.h>
 17#include <trace/events/host1x.h>
 18
 19#include "channel.h"
 20#include "dev.h"
 21#include "job.h"
 22#include "syncpt.h"
 23
 24#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
 25
 26struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
 27				    u32 num_cmdbufs, u32 num_relocs,
 28				    bool skip_firewall)
 29{
 30	struct host1x_job *job = NULL;
 31	unsigned int num_unpins = num_relocs;
 32	bool enable_firewall;
 33	u64 total;
 34	void *mem;
 35
 36	enable_firewall = IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !skip_firewall;
 37
 38	if (!enable_firewall)
 39		num_unpins += num_cmdbufs;
 40
 41	/* Check that we're not going to overflow */
 42	total = sizeof(struct host1x_job) +
 43		(u64)num_relocs * sizeof(struct host1x_reloc) +
 44		(u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
 45		(u64)num_cmdbufs * sizeof(struct host1x_job_cmd) +
 46		(u64)num_unpins * sizeof(dma_addr_t) +
 47		(u64)num_unpins * sizeof(u32 *);
 48	if (total > ULONG_MAX)
 49		return NULL;
 50
 51	mem = job = kzalloc(total, GFP_KERNEL);
 52	if (!job)
 53		return NULL;
 54
 55	job->enable_firewall = enable_firewall;
 56
 57	kref_init(&job->ref);
 58	job->channel = ch;
 59
 60	/* Redistribute memory to the structs  */
 61	mem += sizeof(struct host1x_job);
 62	job->relocs = num_relocs ? mem : NULL;
 63	mem += num_relocs * sizeof(struct host1x_reloc);
 64	job->unpins = num_unpins ? mem : NULL;
 65	mem += num_unpins * sizeof(struct host1x_job_unpin_data);
 66	job->cmds = num_cmdbufs ? mem : NULL;
 67	mem += num_cmdbufs * sizeof(struct host1x_job_cmd);
 68	job->addr_phys = num_unpins ? mem : NULL;
 69
 70	job->reloc_addr_phys = job->addr_phys;
 71	job->gather_addr_phys = &job->addr_phys[num_relocs];
 72
 73	return job;
 74}
 75EXPORT_SYMBOL(host1x_job_alloc);
 76
 77struct host1x_job *host1x_job_get(struct host1x_job *job)
 78{
 79	kref_get(&job->ref);
 80	return job;
 81}
 82EXPORT_SYMBOL(host1x_job_get);
 83
 84static void job_free(struct kref *ref)
 85{
 86	struct host1x_job *job = container_of(ref, struct host1x_job, ref);
 87
 88	if (job->release)
 89		job->release(job);
 90
 91	if (job->fence) {
 92		/*
 93		 * remove_callback is atomic w.r.t. fence signaling, so
 94		 * after the call returns, we know that the callback is not
 95		 * in execution, and the fence can be safely freed.
 96		 */
 97		dma_fence_remove_callback(job->fence, &job->fence_cb);
 98		dma_fence_put(job->fence);
 99	}
100
101	if (job->syncpt)
102		host1x_syncpt_put(job->syncpt);
103
104	kfree(job);
105}
106
107void host1x_job_put(struct host1x_job *job)
108{
109	kref_put(&job->ref, job_free);
110}
111EXPORT_SYMBOL(host1x_job_put);
112
113void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
114			   unsigned int words, unsigned int offset)
115{
116	struct host1x_job_gather *gather = &job->cmds[job->num_cmds].gather;
117
118	gather->words = words;
119	gather->bo = bo;
120	gather->offset = offset;
121
122	job->num_cmds++;
123}
124EXPORT_SYMBOL(host1x_job_add_gather);
125
126void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
127			 bool relative, u32 next_class)
128{
129	struct host1x_job_cmd *cmd = &job->cmds[job->num_cmds];
130
131	cmd->is_wait = true;
132	cmd->wait.id = id;
133	cmd->wait.threshold = thresh;
134	cmd->wait.next_class = next_class;
135	cmd->wait.relative = relative;
136
137	job->num_cmds++;
138}
139EXPORT_SYMBOL(host1x_job_add_wait);
140
141static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
142{
143	unsigned long mask = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
144	struct host1x_client *client = job->client;
145	struct device *dev = client->dev;
146	struct host1x_job_gather *g;
 
147	unsigned int i;
148	int err;
149
 
150	job->num_unpins = 0;
151
152	for (i = 0; i < job->num_relocs; i++) {
153		struct host1x_reloc *reloc = &job->relocs[i];
154		enum dma_data_direction direction;
155		struct host1x_bo_mapping *map;
156		struct host1x_bo *bo;
157
158		reloc->target.bo = host1x_bo_get(reloc->target.bo);
159		if (!reloc->target.bo) {
160			err = -EINVAL;
161			goto unpin;
162		}
163
164		bo = reloc->target.bo;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
166		switch (reloc->flags & mask) {
167		case HOST1X_RELOC_READ:
168			direction = DMA_TO_DEVICE;
169			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
170
171		case HOST1X_RELOC_WRITE:
172			direction = DMA_FROM_DEVICE;
173			break;
 
174
175		case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
176			direction = DMA_BIDIRECTIONAL;
177			break;
 
 
178
179		default:
180			err = -EINVAL;
181			goto unpin;
182		}
183
184		map = host1x_bo_pin(dev, bo, direction, NULL);
185		if (IS_ERR(map)) {
186			err = PTR_ERR(map);
187			goto unpin;
188		}
189
190		/*
191		 * host1x clients are generally not able to do scatter-gather themselves, so fail
192		 * if the buffer is discontiguous and we fail to map its SG table to a single
193		 * contiguous chunk of I/O virtual memory.
194		 */
195		if (map->chunks > 1) {
196			err = -EINVAL;
197			goto unpin;
198		}
199
200		job->addr_phys[job->num_unpins] = map->phys;
201		job->unpins[job->num_unpins].map = map;
202		job->num_unpins++;
203	}
204
205	/*
206	 * We will copy gathers BO content later, so there is no need to
207	 * hold and pin them.
208	 */
209	if (job->enable_firewall)
210		return 0;
211
212	for (i = 0; i < job->num_cmds; i++) {
213		struct host1x_bo_mapping *map;
214		size_t gather_size = 0;
215		struct scatterlist *sg;
 
 
216		unsigned long shift;
217		struct iova *alloc;
 
218		unsigned int j;
219
220		if (job->cmds[i].is_wait)
221			continue;
222
223		g = &job->cmds[i].gather;
224
225		g->bo = host1x_bo_get(g->bo);
226		if (!g->bo) {
227			err = -EINVAL;
228			goto unpin;
229		}
230
231		map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE, NULL);
232		if (IS_ERR(map)) {
233			err = PTR_ERR(map);
234			goto unpin;
 
 
 
 
 
 
 
 
 
 
235		}
236
237		if (host->domain) {
238			for_each_sgtable_sg(map->sgt, sg, j)
239				gather_size += sg->length;
240
241			gather_size = iova_align(&host->iova, gather_size);
242
243			shift = iova_shift(&host->iova);
244			alloc = alloc_iova(&host->iova, gather_size >> shift,
245					   host->iova_end >> shift, true);
246			if (!alloc) {
247				err = -ENOMEM;
248				goto put;
249			}
250
251			err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc),
252						map->sgt, IOMMU_READ);
 
253			if (err == 0) {
254				__free_iova(&host->iova, alloc);
255				err = -EINVAL;
256				goto put;
257			}
258
259			map->phys = iova_dma_addr(&host->iova, alloc);
260			map->size = gather_size;
 
 
 
 
 
 
 
 
 
 
 
261		}
262
263		job->addr_phys[job->num_unpins] = map->phys;
264		job->unpins[job->num_unpins].map = map;
 
 
 
265		job->num_unpins++;
266
267		job->gather_addr_phys[i] = map->phys;
268	}
269
270	return 0;
271
272put:
273	host1x_bo_put(g->bo);
274unpin:
275	host1x_job_unpin(job);
276	return err;
277}
278
279static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
280{
281	void *cmdbuf_addr = NULL;
282	struct host1x_bo *cmdbuf = g->bo;
283	unsigned int i;
284
285	/* pin & patch the relocs for one gather */
286	for (i = 0; i < job->num_relocs; i++) {
287		struct host1x_reloc *reloc = &job->relocs[i];
288		u32 reloc_addr = (job->reloc_addr_phys[i] +
289				  reloc->target.offset) >> reloc->shift;
290		u32 *target;
291
292		/* skip all other gathers */
293		if (cmdbuf != reloc->cmdbuf.bo)
294			continue;
295
296		if (job->enable_firewall) {
297			target = (u32 *)job->gather_copy_mapped +
298					reloc->cmdbuf.offset / sizeof(u32) +
299						g->offset / sizeof(u32);
300			goto patch_reloc;
301		}
302
303		if (!cmdbuf_addr) {
304			cmdbuf_addr = host1x_bo_mmap(cmdbuf);
305
306			if (unlikely(!cmdbuf_addr)) {
307				pr_err("Could not map cmdbuf for relocation\n");
308				return -ENOMEM;
309			}
310		}
311
312		target = cmdbuf_addr + reloc->cmdbuf.offset;
313patch_reloc:
314		*target = reloc_addr;
315	}
316
317	if (cmdbuf_addr)
318		host1x_bo_munmap(cmdbuf, cmdbuf_addr);
319
320	return 0;
321}
322
323static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
324			unsigned int offset)
325{
326	offset *= sizeof(u32);
327
328	if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
329		return false;
330
331	/* relocation shift value validation isn't implemented yet */
332	if (reloc->shift)
333		return false;
334
335	return true;
336}
337
338struct host1x_firewall {
339	struct host1x_job *job;
340	struct device *dev;
341
342	unsigned int num_relocs;
343	struct host1x_reloc *reloc;
344
345	struct host1x_bo *cmdbuf;
346	unsigned int offset;
347
348	u32 words;
349	u32 class;
350	u32 reg;
351	u32 mask;
352	u32 count;
353};
354
355static int check_register(struct host1x_firewall *fw, unsigned long offset)
356{
357	if (!fw->job->is_addr_reg)
358		return 0;
359
360	if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
361		if (!fw->num_relocs)
362			return -EINVAL;
363
364		if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
365			return -EINVAL;
366
367		fw->num_relocs--;
368		fw->reloc++;
369	}
370
371	return 0;
372}
373
374static int check_class(struct host1x_firewall *fw, u32 class)
375{
376	if (!fw->job->is_valid_class) {
377		if (fw->class != class)
378			return -EINVAL;
379	} else {
380		if (!fw->job->is_valid_class(fw->class))
381			return -EINVAL;
382	}
383
384	return 0;
385}
386
387static int check_mask(struct host1x_firewall *fw)
388{
389	u32 mask = fw->mask;
390	u32 reg = fw->reg;
391	int ret;
392
393	while (mask) {
394		if (fw->words == 0)
395			return -EINVAL;
396
397		if (mask & 1) {
398			ret = check_register(fw, reg);
399			if (ret < 0)
400				return ret;
401
402			fw->words--;
403			fw->offset++;
404		}
405		mask >>= 1;
406		reg++;
407	}
408
409	return 0;
410}
411
412static int check_incr(struct host1x_firewall *fw)
413{
414	u32 count = fw->count;
415	u32 reg = fw->reg;
416	int ret;
417
418	while (count) {
419		if (fw->words == 0)
420			return -EINVAL;
421
422		ret = check_register(fw, reg);
423		if (ret < 0)
424			return ret;
425
426		reg++;
427		fw->words--;
428		fw->offset++;
429		count--;
430	}
431
432	return 0;
433}
434
435static int check_nonincr(struct host1x_firewall *fw)
436{
437	u32 count = fw->count;
438	int ret;
439
440	while (count) {
441		if (fw->words == 0)
442			return -EINVAL;
443
444		ret = check_register(fw, fw->reg);
445		if (ret < 0)
446			return ret;
447
448		fw->words--;
449		fw->offset++;
450		count--;
451	}
452
453	return 0;
454}
455
456static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
457{
458	u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
459		(g->offset / sizeof(u32));
460	u32 job_class = fw->class;
461	int err = 0;
462
463	fw->words = g->words;
464	fw->cmdbuf = g->bo;
465	fw->offset = 0;
466
467	while (fw->words && !err) {
468		u32 word = cmdbuf_base[fw->offset];
469		u32 opcode = (word & 0xf0000000) >> 28;
470
471		fw->mask = 0;
472		fw->reg = 0;
473		fw->count = 0;
474		fw->words--;
475		fw->offset++;
476
477		switch (opcode) {
478		case 0:
479			fw->class = word >> 6 & 0x3ff;
480			fw->mask = word & 0x3f;
481			fw->reg = word >> 16 & 0xfff;
482			err = check_class(fw, job_class);
483			if (!err)
484				err = check_mask(fw);
485			if (err)
486				goto out;
487			break;
488		case 1:
489			fw->reg = word >> 16 & 0xfff;
490			fw->count = word & 0xffff;
491			err = check_incr(fw);
492			if (err)
493				goto out;
494			break;
495
496		case 2:
497			fw->reg = word >> 16 & 0xfff;
498			fw->count = word & 0xffff;
499			err = check_nonincr(fw);
500			if (err)
501				goto out;
502			break;
503
504		case 3:
505			fw->mask = word & 0xffff;
506			fw->reg = word >> 16 & 0xfff;
507			err = check_mask(fw);
508			if (err)
509				goto out;
510			break;
511		case 4:
512		case 14:
513			break;
514		default:
515			err = -EINVAL;
516			break;
517		}
518	}
519
520out:
521	return err;
522}
523
524static inline int copy_gathers(struct device *host, struct host1x_job *job,
525			       struct device *dev)
526{
527	struct host1x_firewall fw;
528	size_t size = 0;
529	size_t offset = 0;
530	unsigned int i;
531
532	fw.job = job;
533	fw.dev = dev;
534	fw.reloc = job->relocs;
535	fw.num_relocs = job->num_relocs;
536	fw.class = job->class;
537
538	for (i = 0; i < job->num_cmds; i++) {
539		struct host1x_job_gather *g;
540
541		if (job->cmds[i].is_wait)
542			continue;
543
544		g = &job->cmds[i].gather;
545
546		size += g->words * sizeof(u32);
547	}
548
549	/*
550	 * Try a non-blocking allocation from a higher priority pools first,
551	 * as awaiting for the allocation here is a major performance hit.
552	 */
553	job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
554					       GFP_NOWAIT);
555
556	/* the higher priority allocation failed, try the generic-blocking */
557	if (!job->gather_copy_mapped)
558		job->gather_copy_mapped = dma_alloc_wc(host, size,
559						       &job->gather_copy,
560						       GFP_KERNEL);
561	if (!job->gather_copy_mapped)
562		return -ENOMEM;
563
564	job->gather_copy_size = size;
565
566	for (i = 0; i < job->num_cmds; i++) {
567		struct host1x_job_gather *g;
568		void *gather;
569
570		if (job->cmds[i].is_wait)
571			continue;
572		g = &job->cmds[i].gather;
573
574		/* Copy the gather */
575		gather = host1x_bo_mmap(g->bo);
576		memcpy(job->gather_copy_mapped + offset, gather + g->offset,
577		       g->words * sizeof(u32));
578		host1x_bo_munmap(g->bo, gather);
579
580		/* Store the location in the buffer */
581		g->base = job->gather_copy;
582		g->offset = offset;
583
584		/* Validate the job */
585		if (validate(&fw, g))
586			return -EINVAL;
587
588		offset += g->words * sizeof(u32);
589	}
590
591	/* No relocs should remain at this point */
592	if (fw.num_relocs)
593		return -EINVAL;
594
595	return 0;
596}
597
598int host1x_job_pin(struct host1x_job *job, struct device *dev)
599{
600	int err;
601	unsigned int i, j;
602	struct host1x *host = dev_get_drvdata(dev->parent);
603
604	/* pin memory */
605	err = pin_job(host, job);
606	if (err)
607		goto out;
608
609	if (job->enable_firewall) {
610		err = copy_gathers(host->dev, job, dev);
611		if (err)
612			goto out;
613	}
614
615	/* patch gathers */
616	for (i = 0; i < job->num_cmds; i++) {
617		struct host1x_job_gather *g;
618
619		if (job->cmds[i].is_wait)
620			continue;
621		g = &job->cmds[i].gather;
622
623		/* process each gather mem only once */
624		if (g->handled)
625			continue;
626
627		/* copy_gathers() sets gathers base if firewall is enabled */
628		if (!job->enable_firewall)
629			g->base = job->gather_addr_phys[i];
630
631		for (j = i + 1; j < job->num_cmds; j++) {
632			if (!job->cmds[j].is_wait &&
633			    job->cmds[j].gather.bo == g->bo) {
634				job->cmds[j].gather.handled = true;
635				job->cmds[j].gather.base = g->base;
636			}
637		}
638
639		err = do_relocs(job, g);
640		if (err)
641			break;
642	}
643
644out:
645	if (err)
646		host1x_job_unpin(job);
647	wmb();
648
649	return err;
650}
651EXPORT_SYMBOL(host1x_job_pin);
652
653void host1x_job_unpin(struct host1x_job *job)
654{
655	struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
656	unsigned int i;
657
658	for (i = 0; i < job->num_unpins; i++) {
659		struct host1x_bo_mapping *map = job->unpins[i].map;
660		struct host1x_bo *bo = map->bo;
661
662		if (!job->enable_firewall && map->size && host->domain) {
663			iommu_unmap(host->domain, job->addr_phys[i], map->size);
664			free_iova(&host->iova, iova_pfn(&host->iova, job->addr_phys[i]));
665		}
 
 
 
 
 
 
 
 
666
667		host1x_bo_unpin(map);
668		host1x_bo_put(bo);
669	}
670
671	job->num_unpins = 0;
672
673	if (job->gather_copy_size)
674		dma_free_wc(host->dev, job->gather_copy_size,
675			    job->gather_copy_mapped, job->gather_copy);
676}
677EXPORT_SYMBOL(host1x_job_unpin);
678
679/*
680 * Debug routine used to dump job entries
681 */
682void host1x_job_dump(struct device *dev, struct host1x_job *job)
683{
684	dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt->id);
685	dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
686	dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
687	dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
688	dev_dbg(dev, "    NUM_SLOTS   %d\n", job->num_slots);
689	dev_dbg(dev, "    NUM_HANDLES %d\n", job->num_unpins);
690}