Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Tegra host1x Job
  4 *
  5 * Copyright (c) 2010-2015, NVIDIA Corporation.
  6 */
  7
  8#include <linux/dma-mapping.h>
  9#include <linux/err.h>
 10#include <linux/host1x.h>
 11#include <linux/iommu.h>
 12#include <linux/kref.h>
 13#include <linux/module.h>
 14#include <linux/scatterlist.h>
 15#include <linux/slab.h>
 16#include <linux/vmalloc.h>
 17#include <trace/events/host1x.h>
 18
 19#include "channel.h"
 20#include "dev.h"
 21#include "job.h"
 22#include "syncpt.h"
 23
 24#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
 25
 26struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
 27				    u32 num_cmdbufs, u32 num_relocs,
 28				    bool skip_firewall)
 29{
 30	struct host1x_job *job = NULL;
 31	unsigned int num_unpins = num_relocs;
 32	bool enable_firewall;
 33	u64 total;
 34	void *mem;
 35
 36	enable_firewall = IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !skip_firewall;
 37
 38	if (!enable_firewall)
 39		num_unpins += num_cmdbufs;
 40
 41	/* Check that we're not going to overflow */
 42	total = sizeof(struct host1x_job) +
 43		(u64)num_relocs * sizeof(struct host1x_reloc) +
 44		(u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
 45		(u64)num_cmdbufs * sizeof(struct host1x_job_cmd) +
 46		(u64)num_unpins * sizeof(dma_addr_t) +
 47		(u64)num_unpins * sizeof(u32 *);
 48	if (total > ULONG_MAX)
 49		return NULL;
 50
 51	mem = job = kzalloc(total, GFP_KERNEL);
 52	if (!job)
 53		return NULL;
 54
 55	job->enable_firewall = enable_firewall;
 56
 57	kref_init(&job->ref);
 58	job->channel = ch;
 59
 60	/* Redistribute memory to the structs  */
 61	mem += sizeof(struct host1x_job);
 62	job->relocs = num_relocs ? mem : NULL;
 63	mem += num_relocs * sizeof(struct host1x_reloc);
 64	job->unpins = num_unpins ? mem : NULL;
 65	mem += num_unpins * sizeof(struct host1x_job_unpin_data);
 66	job->cmds = num_cmdbufs ? mem : NULL;
 67	mem += num_cmdbufs * sizeof(struct host1x_job_cmd);
 68	job->addr_phys = num_unpins ? mem : NULL;
 69
 70	job->reloc_addr_phys = job->addr_phys;
 71	job->gather_addr_phys = &job->addr_phys[num_relocs];
 72
 73	return job;
 74}
 75EXPORT_SYMBOL(host1x_job_alloc);
 76
 77struct host1x_job *host1x_job_get(struct host1x_job *job)
 78{
 79	kref_get(&job->ref);
 80	return job;
 81}
 82EXPORT_SYMBOL(host1x_job_get);
 83
 84static void job_free(struct kref *ref)
 85{
 86	struct host1x_job *job = container_of(ref, struct host1x_job, ref);
 87
 88	if (job->release)
 89		job->release(job);
 90
 91	if (job->fence) {
 92		/*
 93		 * remove_callback is atomic w.r.t. fence signaling, so
 94		 * after the call returns, we know that the callback is not
 95		 * in execution, and the fence can be safely freed.
 96		 */
 97		dma_fence_remove_callback(job->fence, &job->fence_cb);
 98		dma_fence_put(job->fence);
 99	}
100
101	if (job->syncpt)
102		host1x_syncpt_put(job->syncpt);
103
104	kfree(job);
105}
106
107void host1x_job_put(struct host1x_job *job)
108{
109	kref_put(&job->ref, job_free);
110}
111EXPORT_SYMBOL(host1x_job_put);
112
113void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
114			   unsigned int words, unsigned int offset)
115{
116	struct host1x_job_gather *gather = &job->cmds[job->num_cmds].gather;
117
118	gather->words = words;
119	gather->bo = bo;
120	gather->offset = offset;
121
122	job->num_cmds++;
123}
124EXPORT_SYMBOL(host1x_job_add_gather);
125
126void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
127			 bool relative, u32 next_class)
128{
129	struct host1x_job_cmd *cmd = &job->cmds[job->num_cmds];
130
131	cmd->is_wait = true;
132	cmd->wait.id = id;
133	cmd->wait.threshold = thresh;
134	cmd->wait.next_class = next_class;
135	cmd->wait.relative = relative;
136
137	job->num_cmds++;
138}
139EXPORT_SYMBOL(host1x_job_add_wait);
140
141static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
142{
143	unsigned long mask = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
144	struct host1x_client *client = job->client;
145	struct device *dev = client->dev;
146	struct host1x_job_gather *g;
 
147	unsigned int i;
148	int err;
149
 
150	job->num_unpins = 0;
151
152	for (i = 0; i < job->num_relocs; i++) {
153		struct host1x_reloc *reloc = &job->relocs[i];
154		enum dma_data_direction direction;
155		struct host1x_bo_mapping *map;
156		struct host1x_bo *bo;
157
158		reloc->target.bo = host1x_bo_get(reloc->target.bo);
159		if (!reloc->target.bo) {
160			err = -EINVAL;
161			goto unpin;
162		}
163
164		bo = reloc->target.bo;
165
166		switch (reloc->flags & mask) {
167		case HOST1X_RELOC_READ:
168			direction = DMA_TO_DEVICE;
169			break;
170
171		case HOST1X_RELOC_WRITE:
172			direction = DMA_FROM_DEVICE;
173			break;
174
175		case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
176			direction = DMA_BIDIRECTIONAL;
177			break;
178
179		default:
180			err = -EINVAL;
 
 
 
181			goto unpin;
182		}
183
184		map = host1x_bo_pin(dev, bo, direction, NULL);
185		if (IS_ERR(map)) {
186			err = PTR_ERR(map);
187			goto unpin;
188		}
 
 
 
 
 
 
 
 
 
 
 
 
189
190		/*
191		 * host1x clients are generally not able to do scatter-gather themselves, so fail
192		 * if the buffer is discontiguous and we fail to map its SG table to a single
193		 * contiguous chunk of I/O virtual memory.
194		 */
195		if (map->chunks > 1) {
196			err = -EINVAL;
197			goto unpin;
 
 
 
 
198		}
199
200		job->addr_phys[job->num_unpins] = map->phys;
201		job->unpins[job->num_unpins].map = map;
 
202		job->num_unpins++;
203	}
204
205	/*
206	 * We will copy gathers BO content later, so there is no need to
207	 * hold and pin them.
208	 */
209	if (job->enable_firewall)
210		return 0;
211
212	for (i = 0; i < job->num_cmds; i++) {
213		struct host1x_bo_mapping *map;
214		size_t gather_size = 0;
215		struct scatterlist *sg;
 
 
216		unsigned long shift;
217		struct iova *alloc;
 
218		unsigned int j;
219
220		if (job->cmds[i].is_wait)
221			continue;
222
223		g = &job->cmds[i].gather;
224
225		g->bo = host1x_bo_get(g->bo);
226		if (!g->bo) {
227			err = -EINVAL;
228			goto unpin;
229		}
230
231		map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE, NULL);
232		if (IS_ERR(map)) {
233			err = PTR_ERR(map);
234			goto unpin;
 
 
 
 
 
 
 
 
 
 
235		}
236
237		if (host->domain) {
238			for_each_sgtable_sg(map->sgt, sg, j)
239				gather_size += sg->length;
240
241			gather_size = iova_align(&host->iova, gather_size);
242
243			shift = iova_shift(&host->iova);
244			alloc = alloc_iova(&host->iova, gather_size >> shift,
245					   host->iova_end >> shift, true);
246			if (!alloc) {
247				err = -ENOMEM;
248				goto put;
249			}
250
251			err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc),
252						map->sgt, IOMMU_READ);
 
253			if (err == 0) {
254				__free_iova(&host->iova, alloc);
255				err = -EINVAL;
256				goto put;
257			}
258
259			map->phys = iova_dma_addr(&host->iova, alloc);
260			map->size = gather_size;
 
 
 
 
 
 
 
 
261		}
262
263		job->addr_phys[job->num_unpins] = map->phys;
264		job->unpins[job->num_unpins].map = map;
265		job->num_unpins++;
266
267		job->gather_addr_phys[i] = map->phys;
 
 
268	}
269
270	return 0;
271
272put:
273	host1x_bo_put(g->bo);
274unpin:
275	host1x_job_unpin(job);
276	return err;
277}
278
279static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
280{
281	void *cmdbuf_addr = NULL;
282	struct host1x_bo *cmdbuf = g->bo;
283	unsigned int i;
284
285	/* pin & patch the relocs for one gather */
286	for (i = 0; i < job->num_relocs; i++) {
287		struct host1x_reloc *reloc = &job->relocs[i];
288		u32 reloc_addr = (job->reloc_addr_phys[i] +
289				  reloc->target.offset) >> reloc->shift;
290		u32 *target;
291
292		/* skip all other gathers */
293		if (cmdbuf != reloc->cmdbuf.bo)
294			continue;
295
296		if (job->enable_firewall) {
297			target = (u32 *)job->gather_copy_mapped +
298					reloc->cmdbuf.offset / sizeof(u32) +
299						g->offset / sizeof(u32);
300			goto patch_reloc;
301		}
302
303		if (!cmdbuf_addr) {
304			cmdbuf_addr = host1x_bo_mmap(cmdbuf);
305
306			if (unlikely(!cmdbuf_addr)) {
307				pr_err("Could not map cmdbuf for relocation\n");
308				return -ENOMEM;
309			}
310		}
311
312		target = cmdbuf_addr + reloc->cmdbuf.offset;
313patch_reloc:
314		*target = reloc_addr;
315	}
316
317	if (cmdbuf_addr)
318		host1x_bo_munmap(cmdbuf, cmdbuf_addr);
319
320	return 0;
321}
322
323static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
324			unsigned int offset)
325{
326	offset *= sizeof(u32);
327
328	if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
329		return false;
330
331	/* relocation shift value validation isn't implemented yet */
332	if (reloc->shift)
333		return false;
334
335	return true;
336}
337
338struct host1x_firewall {
339	struct host1x_job *job;
340	struct device *dev;
341
342	unsigned int num_relocs;
343	struct host1x_reloc *reloc;
344
345	struct host1x_bo *cmdbuf;
346	unsigned int offset;
347
348	u32 words;
349	u32 class;
350	u32 reg;
351	u32 mask;
352	u32 count;
353};
354
355static int check_register(struct host1x_firewall *fw, unsigned long offset)
356{
357	if (!fw->job->is_addr_reg)
358		return 0;
359
360	if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
361		if (!fw->num_relocs)
362			return -EINVAL;
363
364		if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
365			return -EINVAL;
366
367		fw->num_relocs--;
368		fw->reloc++;
369	}
370
371	return 0;
372}
373
374static int check_class(struct host1x_firewall *fw, u32 class)
375{
376	if (!fw->job->is_valid_class) {
377		if (fw->class != class)
378			return -EINVAL;
379	} else {
380		if (!fw->job->is_valid_class(fw->class))
381			return -EINVAL;
382	}
383
384	return 0;
385}
386
387static int check_mask(struct host1x_firewall *fw)
388{
389	u32 mask = fw->mask;
390	u32 reg = fw->reg;
391	int ret;
392
393	while (mask) {
394		if (fw->words == 0)
395			return -EINVAL;
396
397		if (mask & 1) {
398			ret = check_register(fw, reg);
399			if (ret < 0)
400				return ret;
401
402			fw->words--;
403			fw->offset++;
404		}
405		mask >>= 1;
406		reg++;
407	}
408
409	return 0;
410}
411
412static int check_incr(struct host1x_firewall *fw)
413{
414	u32 count = fw->count;
415	u32 reg = fw->reg;
416	int ret;
417
418	while (count) {
419		if (fw->words == 0)
420			return -EINVAL;
421
422		ret = check_register(fw, reg);
423		if (ret < 0)
424			return ret;
425
426		reg++;
427		fw->words--;
428		fw->offset++;
429		count--;
430	}
431
432	return 0;
433}
434
435static int check_nonincr(struct host1x_firewall *fw)
436{
437	u32 count = fw->count;
438	int ret;
439
440	while (count) {
441		if (fw->words == 0)
442			return -EINVAL;
443
444		ret = check_register(fw, fw->reg);
445		if (ret < 0)
446			return ret;
447
448		fw->words--;
449		fw->offset++;
450		count--;
451	}
452
453	return 0;
454}
455
456static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
457{
458	u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
459		(g->offset / sizeof(u32));
460	u32 job_class = fw->class;
461	int err = 0;
462
463	fw->words = g->words;
464	fw->cmdbuf = g->bo;
465	fw->offset = 0;
466
467	while (fw->words && !err) {
468		u32 word = cmdbuf_base[fw->offset];
469		u32 opcode = (word & 0xf0000000) >> 28;
470
471		fw->mask = 0;
472		fw->reg = 0;
473		fw->count = 0;
474		fw->words--;
475		fw->offset++;
476
477		switch (opcode) {
478		case 0:
479			fw->class = word >> 6 & 0x3ff;
480			fw->mask = word & 0x3f;
481			fw->reg = word >> 16 & 0xfff;
482			err = check_class(fw, job_class);
483			if (!err)
484				err = check_mask(fw);
485			if (err)
486				goto out;
487			break;
488		case 1:
489			fw->reg = word >> 16 & 0xfff;
490			fw->count = word & 0xffff;
491			err = check_incr(fw);
492			if (err)
493				goto out;
494			break;
495
496		case 2:
497			fw->reg = word >> 16 & 0xfff;
498			fw->count = word & 0xffff;
499			err = check_nonincr(fw);
500			if (err)
501				goto out;
502			break;
503
504		case 3:
505			fw->mask = word & 0xffff;
506			fw->reg = word >> 16 & 0xfff;
507			err = check_mask(fw);
508			if (err)
509				goto out;
510			break;
511		case 4:
512		case 14:
513			break;
514		default:
515			err = -EINVAL;
516			break;
517		}
518	}
519
520out:
521	return err;
522}
523
524static inline int copy_gathers(struct device *host, struct host1x_job *job,
525			       struct device *dev)
526{
527	struct host1x_firewall fw;
528	size_t size = 0;
529	size_t offset = 0;
530	unsigned int i;
531
532	fw.job = job;
533	fw.dev = dev;
534	fw.reloc = job->relocs;
535	fw.num_relocs = job->num_relocs;
536	fw.class = job->class;
537
538	for (i = 0; i < job->num_cmds; i++) {
539		struct host1x_job_gather *g;
540
541		if (job->cmds[i].is_wait)
542			continue;
543
544		g = &job->cmds[i].gather;
545
546		size += g->words * sizeof(u32);
547	}
548
549	/*
550	 * Try a non-blocking allocation from a higher priority pools first,
551	 * as awaiting for the allocation here is a major performance hit.
552	 */
553	job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
554					       GFP_NOWAIT);
555
556	/* the higher priority allocation failed, try the generic-blocking */
557	if (!job->gather_copy_mapped)
558		job->gather_copy_mapped = dma_alloc_wc(host, size,
559						       &job->gather_copy,
560						       GFP_KERNEL);
561	if (!job->gather_copy_mapped)
562		return -ENOMEM;
563
564	job->gather_copy_size = size;
565
566	for (i = 0; i < job->num_cmds; i++) {
567		struct host1x_job_gather *g;
568		void *gather;
569
570		if (job->cmds[i].is_wait)
571			continue;
572		g = &job->cmds[i].gather;
573
574		/* Copy the gather */
575		gather = host1x_bo_mmap(g->bo);
576		memcpy(job->gather_copy_mapped + offset, gather + g->offset,
577		       g->words * sizeof(u32));
578		host1x_bo_munmap(g->bo, gather);
579
580		/* Store the location in the buffer */
581		g->base = job->gather_copy;
582		g->offset = offset;
583
584		/* Validate the job */
585		if (validate(&fw, g))
586			return -EINVAL;
587
588		offset += g->words * sizeof(u32);
589	}
590
591	/* No relocs should remain at this point */
592	if (fw.num_relocs)
593		return -EINVAL;
594
595	return 0;
596}
597
598int host1x_job_pin(struct host1x_job *job, struct device *dev)
599{
600	int err;
601	unsigned int i, j;
602	struct host1x *host = dev_get_drvdata(dev->parent);
603
604	/* pin memory */
605	err = pin_job(host, job);
606	if (err)
607		goto out;
608
609	if (job->enable_firewall) {
610		err = copy_gathers(host->dev, job, dev);
611		if (err)
612			goto out;
613	}
614
615	/* patch gathers */
616	for (i = 0; i < job->num_cmds; i++) {
617		struct host1x_job_gather *g;
618
619		if (job->cmds[i].is_wait)
620			continue;
621		g = &job->cmds[i].gather;
622
623		/* process each gather mem only once */
624		if (g->handled)
625			continue;
626
627		/* copy_gathers() sets gathers base if firewall is enabled */
628		if (!job->enable_firewall)
629			g->base = job->gather_addr_phys[i];
630
631		for (j = i + 1; j < job->num_cmds; j++) {
632			if (!job->cmds[j].is_wait &&
633			    job->cmds[j].gather.bo == g->bo) {
634				job->cmds[j].gather.handled = true;
635				job->cmds[j].gather.base = g->base;
636			}
637		}
638
639		err = do_relocs(job, g);
640		if (err)
641			break;
642	}
643
644out:
645	if (err)
646		host1x_job_unpin(job);
647	wmb();
648
649	return err;
650}
651EXPORT_SYMBOL(host1x_job_pin);
652
653void host1x_job_unpin(struct host1x_job *job)
654{
655	struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
656	unsigned int i;
657
658	for (i = 0; i < job->num_unpins; i++) {
659		struct host1x_bo_mapping *map = job->unpins[i].map;
660		struct host1x_bo *bo = map->bo;
661
662		if (!job->enable_firewall && map->size && host->domain) {
663			iommu_unmap(host->domain, job->addr_phys[i], map->size);
664			free_iova(&host->iova, iova_pfn(&host->iova, job->addr_phys[i]));
 
 
 
 
665		}
666
667		host1x_bo_unpin(map);
668		host1x_bo_put(bo);
 
 
 
669	}
670
671	job->num_unpins = 0;
672
673	if (job->gather_copy_size)
674		dma_free_wc(host->dev, job->gather_copy_size,
675			    job->gather_copy_mapped, job->gather_copy);
676}
677EXPORT_SYMBOL(host1x_job_unpin);
678
679/*
680 * Debug routine used to dump job entries
681 */
682void host1x_job_dump(struct device *dev, struct host1x_job *job)
683{
684	dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt->id);
685	dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
686	dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
687	dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
688	dev_dbg(dev, "    NUM_SLOTS   %d\n", job->num_slots);
689	dev_dbg(dev, "    NUM_HANDLES %d\n", job->num_unpins);
690}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Tegra host1x Job
  4 *
  5 * Copyright (c) 2010-2015, NVIDIA Corporation.
  6 */
  7
  8#include <linux/dma-mapping.h>
  9#include <linux/err.h>
 10#include <linux/host1x.h>
 11#include <linux/iommu.h>
 12#include <linux/kref.h>
 13#include <linux/module.h>
 14#include <linux/scatterlist.h>
 15#include <linux/slab.h>
 16#include <linux/vmalloc.h>
 17#include <trace/events/host1x.h>
 18
 19#include "channel.h"
 20#include "dev.h"
 21#include "job.h"
 22#include "syncpt.h"
 23
 24#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
 25
 26struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
 27				    u32 num_cmdbufs, u32 num_relocs)
 
 28{
 29	struct host1x_job *job = NULL;
 30	unsigned int num_unpins = num_relocs;
 
 31	u64 total;
 32	void *mem;
 33
 34	if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
 
 
 35		num_unpins += num_cmdbufs;
 36
 37	/* Check that we're not going to overflow */
 38	total = sizeof(struct host1x_job) +
 39		(u64)num_relocs * sizeof(struct host1x_reloc) +
 40		(u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
 41		(u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
 42		(u64)num_unpins * sizeof(dma_addr_t) +
 43		(u64)num_unpins * sizeof(u32 *);
 44	if (total > ULONG_MAX)
 45		return NULL;
 46
 47	mem = job = kzalloc(total, GFP_KERNEL);
 48	if (!job)
 49		return NULL;
 50
 
 
 51	kref_init(&job->ref);
 52	job->channel = ch;
 53
 54	/* Redistribute memory to the structs  */
 55	mem += sizeof(struct host1x_job);
 56	job->relocs = num_relocs ? mem : NULL;
 57	mem += num_relocs * sizeof(struct host1x_reloc);
 58	job->unpins = num_unpins ? mem : NULL;
 59	mem += num_unpins * sizeof(struct host1x_job_unpin_data);
 60	job->gathers = num_cmdbufs ? mem : NULL;
 61	mem += num_cmdbufs * sizeof(struct host1x_job_gather);
 62	job->addr_phys = num_unpins ? mem : NULL;
 63
 64	job->reloc_addr_phys = job->addr_phys;
 65	job->gather_addr_phys = &job->addr_phys[num_relocs];
 66
 67	return job;
 68}
 69EXPORT_SYMBOL(host1x_job_alloc);
 70
 71struct host1x_job *host1x_job_get(struct host1x_job *job)
 72{
 73	kref_get(&job->ref);
 74	return job;
 75}
 76EXPORT_SYMBOL(host1x_job_get);
 77
 78static void job_free(struct kref *ref)
 79{
 80	struct host1x_job *job = container_of(ref, struct host1x_job, ref);
 81
 
 
 
 
 
 
 
 
 
 
 
 
 
 82	if (job->syncpt)
 83		host1x_syncpt_put(job->syncpt);
 84
 85	kfree(job);
 86}
 87
 88void host1x_job_put(struct host1x_job *job)
 89{
 90	kref_put(&job->ref, job_free);
 91}
 92EXPORT_SYMBOL(host1x_job_put);
 93
 94void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
 95			   unsigned int words, unsigned int offset)
 96{
 97	struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
 98
 99	gather->words = words;
100	gather->bo = bo;
101	gather->offset = offset;
102
103	job->num_gathers++;
104}
105EXPORT_SYMBOL(host1x_job_add_gather);
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
108{
 
109	struct host1x_client *client = job->client;
110	struct device *dev = client->dev;
111	struct host1x_job_gather *g;
112	struct iommu_domain *domain;
113	unsigned int i;
114	int err;
115
116	domain = iommu_get_domain_for_dev(dev);
117	job->num_unpins = 0;
118
119	for (i = 0; i < job->num_relocs; i++) {
120		struct host1x_reloc *reloc = &job->relocs[i];
121		dma_addr_t phys_addr, *phys;
122		struct sg_table *sgt;
 
123
124		reloc->target.bo = host1x_bo_get(reloc->target.bo);
125		if (!reloc->target.bo) {
126			err = -EINVAL;
127			goto unpin;
128		}
129
130		/*
131		 * If the client device is not attached to an IOMMU, the
132		 * physical address of the buffer object can be used.
133		 *
134		 * Similarly, when an IOMMU domain is shared between all
135		 * host1x clients, the IOVA is already available, so no
136		 * need to map the buffer object again.
137		 *
138		 * XXX Note that this isn't always safe to do because it
139		 * relies on an assumption that no cache maintenance is
140		 * needed on the buffer objects.
141		 */
142		if (!domain || client->group)
143			phys = &phys_addr;
144		else
145			phys = NULL;
146
147		sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
148		if (IS_ERR(sgt)) {
149			err = PTR_ERR(sgt);
150			goto unpin;
151		}
152
153		if (sgt) {
154			unsigned long mask = HOST1X_RELOC_READ |
155					     HOST1X_RELOC_WRITE;
156			enum dma_data_direction dir;
157
158			switch (reloc->flags & mask) {
159			case HOST1X_RELOC_READ:
160				dir = DMA_TO_DEVICE;
161				break;
162
163			case HOST1X_RELOC_WRITE:
164				dir = DMA_FROM_DEVICE;
165				break;
166
167			case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
168				dir = DMA_BIDIRECTIONAL;
169				break;
170
171			default:
172				err = -EINVAL;
173				goto unpin;
174			}
175
176			err = dma_map_sgtable(dev, sgt, dir, 0);
177			if (err)
178				goto unpin;
179
180			job->unpins[job->num_unpins].dev = dev;
181			job->unpins[job->num_unpins].dir = dir;
182			phys_addr = sg_dma_address(sgt->sgl);
183		}
184
185		job->addr_phys[job->num_unpins] = phys_addr;
186		job->unpins[job->num_unpins].bo = reloc->target.bo;
187		job->unpins[job->num_unpins].sgt = sgt;
188		job->num_unpins++;
189	}
190
191	/*
192	 * We will copy gathers BO content later, so there is no need to
193	 * hold and pin them.
194	 */
195	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
196		return 0;
197
198	for (i = 0; i < job->num_gathers; i++) {
 
199		size_t gather_size = 0;
200		struct scatterlist *sg;
201		struct sg_table *sgt;
202		dma_addr_t phys_addr;
203		unsigned long shift;
204		struct iova *alloc;
205		dma_addr_t *phys;
206		unsigned int j;
207
208		g = &job->gathers[i];
 
 
 
 
209		g->bo = host1x_bo_get(g->bo);
210		if (!g->bo) {
211			err = -EINVAL;
212			goto unpin;
213		}
214
215		/**
216		 * If the host1x is not attached to an IOMMU, there is no need
217		 * to map the buffer object for the host1x, since the physical
218		 * address can simply be used.
219		 */
220		if (!iommu_get_domain_for_dev(host->dev))
221			phys = &phys_addr;
222		else
223			phys = NULL;
224
225		sgt = host1x_bo_pin(host->dev, g->bo, phys);
226		if (IS_ERR(sgt)) {
227			err = PTR_ERR(sgt);
228			goto put;
229		}
230
231		if (host->domain) {
232			for_each_sgtable_sg(sgt, sg, j)
233				gather_size += sg->length;
 
234			gather_size = iova_align(&host->iova, gather_size);
235
236			shift = iova_shift(&host->iova);
237			alloc = alloc_iova(&host->iova, gather_size >> shift,
238					   host->iova_end >> shift, true);
239			if (!alloc) {
240				err = -ENOMEM;
241				goto put;
242			}
243
244			err = iommu_map_sgtable(host->domain,
245					iova_dma_addr(&host->iova, alloc),
246					sgt, IOMMU_READ);
247			if (err == 0) {
248				__free_iova(&host->iova, alloc);
249				err = -EINVAL;
250				goto put;
251			}
252
253			job->unpins[job->num_unpins].size = gather_size;
254			phys_addr = iova_dma_addr(&host->iova, alloc);
255		} else if (sgt) {
256			err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
257			if (err)
258				goto put;
259
260			job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
261			job->unpins[job->num_unpins].dev = host->dev;
262			phys_addr = sg_dma_address(sgt->sgl);
263		}
264
265		job->addr_phys[job->num_unpins] = phys_addr;
266		job->gather_addr_phys[i] = phys_addr;
 
267
268		job->unpins[job->num_unpins].bo = g->bo;
269		job->unpins[job->num_unpins].sgt = sgt;
270		job->num_unpins++;
271	}
272
273	return 0;
274
275put:
276	host1x_bo_put(g->bo);
277unpin:
278	host1x_job_unpin(job);
279	return err;
280}
281
282static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
283{
284	void *cmdbuf_addr = NULL;
285	struct host1x_bo *cmdbuf = g->bo;
286	unsigned int i;
287
288	/* pin & patch the relocs for one gather */
289	for (i = 0; i < job->num_relocs; i++) {
290		struct host1x_reloc *reloc = &job->relocs[i];
291		u32 reloc_addr = (job->reloc_addr_phys[i] +
292				  reloc->target.offset) >> reloc->shift;
293		u32 *target;
294
295		/* skip all other gathers */
296		if (cmdbuf != reloc->cmdbuf.bo)
297			continue;
298
299		if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
300			target = (u32 *)job->gather_copy_mapped +
301					reloc->cmdbuf.offset / sizeof(u32) +
302						g->offset / sizeof(u32);
303			goto patch_reloc;
304		}
305
306		if (!cmdbuf_addr) {
307			cmdbuf_addr = host1x_bo_mmap(cmdbuf);
308
309			if (unlikely(!cmdbuf_addr)) {
310				pr_err("Could not map cmdbuf for relocation\n");
311				return -ENOMEM;
312			}
313		}
314
315		target = cmdbuf_addr + reloc->cmdbuf.offset;
316patch_reloc:
317		*target = reloc_addr;
318	}
319
320	if (cmdbuf_addr)
321		host1x_bo_munmap(cmdbuf, cmdbuf_addr);
322
323	return 0;
324}
325
326static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
327			unsigned int offset)
328{
329	offset *= sizeof(u32);
330
331	if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
332		return false;
333
334	/* relocation shift value validation isn't implemented yet */
335	if (reloc->shift)
336		return false;
337
338	return true;
339}
340
341struct host1x_firewall {
342	struct host1x_job *job;
343	struct device *dev;
344
345	unsigned int num_relocs;
346	struct host1x_reloc *reloc;
347
348	struct host1x_bo *cmdbuf;
349	unsigned int offset;
350
351	u32 words;
352	u32 class;
353	u32 reg;
354	u32 mask;
355	u32 count;
356};
357
358static int check_register(struct host1x_firewall *fw, unsigned long offset)
359{
360	if (!fw->job->is_addr_reg)
361		return 0;
362
363	if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
364		if (!fw->num_relocs)
365			return -EINVAL;
366
367		if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
368			return -EINVAL;
369
370		fw->num_relocs--;
371		fw->reloc++;
372	}
373
374	return 0;
375}
376
377static int check_class(struct host1x_firewall *fw, u32 class)
378{
379	if (!fw->job->is_valid_class) {
380		if (fw->class != class)
381			return -EINVAL;
382	} else {
383		if (!fw->job->is_valid_class(fw->class))
384			return -EINVAL;
385	}
386
387	return 0;
388}
389
390static int check_mask(struct host1x_firewall *fw)
391{
392	u32 mask = fw->mask;
393	u32 reg = fw->reg;
394	int ret;
395
396	while (mask) {
397		if (fw->words == 0)
398			return -EINVAL;
399
400		if (mask & 1) {
401			ret = check_register(fw, reg);
402			if (ret < 0)
403				return ret;
404
405			fw->words--;
406			fw->offset++;
407		}
408		mask >>= 1;
409		reg++;
410	}
411
412	return 0;
413}
414
415static int check_incr(struct host1x_firewall *fw)
416{
417	u32 count = fw->count;
418	u32 reg = fw->reg;
419	int ret;
420
421	while (count) {
422		if (fw->words == 0)
423			return -EINVAL;
424
425		ret = check_register(fw, reg);
426		if (ret < 0)
427			return ret;
428
429		reg++;
430		fw->words--;
431		fw->offset++;
432		count--;
433	}
434
435	return 0;
436}
437
438static int check_nonincr(struct host1x_firewall *fw)
439{
440	u32 count = fw->count;
441	int ret;
442
443	while (count) {
444		if (fw->words == 0)
445			return -EINVAL;
446
447		ret = check_register(fw, fw->reg);
448		if (ret < 0)
449			return ret;
450
451		fw->words--;
452		fw->offset++;
453		count--;
454	}
455
456	return 0;
457}
458
459static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
460{
461	u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
462		(g->offset / sizeof(u32));
463	u32 job_class = fw->class;
464	int err = 0;
465
466	fw->words = g->words;
467	fw->cmdbuf = g->bo;
468	fw->offset = 0;
469
470	while (fw->words && !err) {
471		u32 word = cmdbuf_base[fw->offset];
472		u32 opcode = (word & 0xf0000000) >> 28;
473
474		fw->mask = 0;
475		fw->reg = 0;
476		fw->count = 0;
477		fw->words--;
478		fw->offset++;
479
480		switch (opcode) {
481		case 0:
482			fw->class = word >> 6 & 0x3ff;
483			fw->mask = word & 0x3f;
484			fw->reg = word >> 16 & 0xfff;
485			err = check_class(fw, job_class);
486			if (!err)
487				err = check_mask(fw);
488			if (err)
489				goto out;
490			break;
491		case 1:
492			fw->reg = word >> 16 & 0xfff;
493			fw->count = word & 0xffff;
494			err = check_incr(fw);
495			if (err)
496				goto out;
497			break;
498
499		case 2:
500			fw->reg = word >> 16 & 0xfff;
501			fw->count = word & 0xffff;
502			err = check_nonincr(fw);
503			if (err)
504				goto out;
505			break;
506
507		case 3:
508			fw->mask = word & 0xffff;
509			fw->reg = word >> 16 & 0xfff;
510			err = check_mask(fw);
511			if (err)
512				goto out;
513			break;
514		case 4:
515		case 14:
516			break;
517		default:
518			err = -EINVAL;
519			break;
520		}
521	}
522
523out:
524	return err;
525}
526
527static inline int copy_gathers(struct device *host, struct host1x_job *job,
528			       struct device *dev)
529{
530	struct host1x_firewall fw;
531	size_t size = 0;
532	size_t offset = 0;
533	unsigned int i;
534
535	fw.job = job;
536	fw.dev = dev;
537	fw.reloc = job->relocs;
538	fw.num_relocs = job->num_relocs;
539	fw.class = job->class;
540
541	for (i = 0; i < job->num_gathers; i++) {
542		struct host1x_job_gather *g = &job->gathers[i];
 
 
 
 
 
543
544		size += g->words * sizeof(u32);
545	}
546
547	/*
548	 * Try a non-blocking allocation from a higher priority pools first,
549	 * as awaiting for the allocation here is a major performance hit.
550	 */
551	job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
552					       GFP_NOWAIT);
553
554	/* the higher priority allocation failed, try the generic-blocking */
555	if (!job->gather_copy_mapped)
556		job->gather_copy_mapped = dma_alloc_wc(host, size,
557						       &job->gather_copy,
558						       GFP_KERNEL);
559	if (!job->gather_copy_mapped)
560		return -ENOMEM;
561
562	job->gather_copy_size = size;
563
564	for (i = 0; i < job->num_gathers; i++) {
565		struct host1x_job_gather *g = &job->gathers[i];
566		void *gather;
567
 
 
 
 
568		/* Copy the gather */
569		gather = host1x_bo_mmap(g->bo);
570		memcpy(job->gather_copy_mapped + offset, gather + g->offset,
571		       g->words * sizeof(u32));
572		host1x_bo_munmap(g->bo, gather);
573
574		/* Store the location in the buffer */
575		g->base = job->gather_copy;
576		g->offset = offset;
577
578		/* Validate the job */
579		if (validate(&fw, g))
580			return -EINVAL;
581
582		offset += g->words * sizeof(u32);
583	}
584
585	/* No relocs should remain at this point */
586	if (fw.num_relocs)
587		return -EINVAL;
588
589	return 0;
590}
591
592int host1x_job_pin(struct host1x_job *job, struct device *dev)
593{
594	int err;
595	unsigned int i, j;
596	struct host1x *host = dev_get_drvdata(dev->parent);
597
598	/* pin memory */
599	err = pin_job(host, job);
600	if (err)
601		goto out;
602
603	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
604		err = copy_gathers(host->dev, job, dev);
605		if (err)
606			goto out;
607	}
608
609	/* patch gathers */
610	for (i = 0; i < job->num_gathers; i++) {
611		struct host1x_job_gather *g = &job->gathers[i];
 
 
 
 
612
613		/* process each gather mem only once */
614		if (g->handled)
615			continue;
616
617		/* copy_gathers() sets gathers base if firewall is enabled */
618		if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
619			g->base = job->gather_addr_phys[i];
620
621		for (j = i + 1; j < job->num_gathers; j++) {
622			if (job->gathers[j].bo == g->bo) {
623				job->gathers[j].handled = true;
624				job->gathers[j].base = g->base;
 
625			}
626		}
627
628		err = do_relocs(job, g);
629		if (err)
630			break;
631	}
632
633out:
634	if (err)
635		host1x_job_unpin(job);
636	wmb();
637
638	return err;
639}
640EXPORT_SYMBOL(host1x_job_pin);
641
642void host1x_job_unpin(struct host1x_job *job)
643{
644	struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
645	unsigned int i;
646
647	for (i = 0; i < job->num_unpins; i++) {
648		struct host1x_job_unpin_data *unpin = &job->unpins[i];
649		struct device *dev = unpin->dev ?: host->dev;
650		struct sg_table *sgt = unpin->sgt;
651
652		if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
653		    unpin->size && host->domain) {
654			iommu_unmap(host->domain, job->addr_phys[i],
655				    unpin->size);
656			free_iova(&host->iova,
657				iova_pfn(&host->iova, job->addr_phys[i]));
658		}
659
660		if (unpin->dev && sgt)
661			dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
662
663		host1x_bo_unpin(dev, unpin->bo, sgt);
664		host1x_bo_put(unpin->bo);
665	}
666
667	job->num_unpins = 0;
668
669	if (job->gather_copy_size)
670		dma_free_wc(host->dev, job->gather_copy_size,
671			    job->gather_copy_mapped, job->gather_copy);
672}
673EXPORT_SYMBOL(host1x_job_unpin);
674
675/*
676 * Debug routine used to dump job entries
677 */
678void host1x_job_dump(struct device *dev, struct host1x_job *job)
679{
680	dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt->id);
681	dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
682	dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
683	dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
684	dev_dbg(dev, "    NUM_SLOTS   %d\n", job->num_slots);
685	dev_dbg(dev, "    NUM_HANDLES %d\n", job->num_unpins);
686}