Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
3// Copyright (c) 2018, Linaro Limited
4
5#include <linux/completion.h>
6#include <linux/device.h>
7#include <linux/dma-buf.h>
8#include <linux/dma-mapping.h>
9#include <linux/dma-resv.h>
10#include <linux/idr.h>
11#include <linux/list.h>
12#include <linux/miscdevice.h>
13#include <linux/module.h>
14#include <linux/of_address.h>
15#include <linux/of.h>
16#include <linux/sort.h>
17#include <linux/of_platform.h>
18#include <linux/rpmsg.h>
19#include <linux/scatterlist.h>
20#include <linux/slab.h>
21#include <linux/qcom_scm.h>
22#include <uapi/misc/fastrpc.h>
23#include <linux/of_reserved_mem.h>
24
25#define ADSP_DOMAIN_ID (0)
26#define MDSP_DOMAIN_ID (1)
27#define SDSP_DOMAIN_ID (2)
28#define CDSP_DOMAIN_ID (3)
29#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
30#define FASTRPC_MAX_SESSIONS 14
31#define FASTRPC_MAX_VMIDS 16
32#define FASTRPC_ALIGN 128
33#define FASTRPC_MAX_FDLIST 16
34#define FASTRPC_MAX_CRCLIST 64
35#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
36#define FASTRPC_CTX_MAX (256)
37#define FASTRPC_INIT_HANDLE 1
38#define FASTRPC_DSP_UTILITIES_HANDLE 2
39#define FASTRPC_CTXID_MASK (0xFF0)
40#define INIT_FILELEN_MAX (2 * 1024 * 1024)
41#define INIT_FILE_NAMELEN_MAX (128)
42#define FASTRPC_DEVICE_NAME "fastrpc"
43
44/* Add memory to static PD pool, protection thru XPU */
45#define ADSP_MMAP_HEAP_ADDR 4
46/* MAP static DMA buffer on DSP User PD */
47#define ADSP_MMAP_DMA_BUFFER 6
48/* Add memory to static PD pool protection thru hypervisor */
49#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
50/* Add memory to userPD pool, for user heap */
51#define ADSP_MMAP_ADD_PAGES 0x1000
52/* Add memory to userPD pool, for LLC heap */
53#define ADSP_MMAP_ADD_PAGES_LLC 0x3000,
54
55#define DSP_UNSUPPORTED_API (0x80000414)
56/* MAX NUMBER of DSP ATTRIBUTES SUPPORTED */
57#define FASTRPC_MAX_DSP_ATTRIBUTES (256)
58#define FASTRPC_MAX_DSP_ATTRIBUTES_LEN (sizeof(u32) * FASTRPC_MAX_DSP_ATTRIBUTES)
59
60/* Retrives number of input buffers from the scalars parameter */
61#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
62
63/* Retrives number of output buffers from the scalars parameter */
64#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
65
66/* Retrives number of input handles from the scalars parameter */
67#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
68
69/* Retrives number of output handles from the scalars parameter */
70#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
71
72#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
73 REMOTE_SCALARS_OUTBUFS(sc) + \
74 REMOTE_SCALARS_INHANDLES(sc)+ \
75 REMOTE_SCALARS_OUTHANDLES(sc))
76#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
77 (((attr & 0x07) << 29) | \
78 ((method & 0x1f) << 24) | \
79 ((in & 0xff) << 16) | \
80 ((out & 0xff) << 8) | \
81 ((oin & 0x0f) << 4) | \
82 (oout & 0x0f))
83
84#define FASTRPC_SCALARS(method, in, out) \
85 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
86
87#define FASTRPC_CREATE_PROCESS_NARGS 6
88#define FASTRPC_CREATE_STATIC_PROCESS_NARGS 3
89/* Remote Method id table */
90#define FASTRPC_RMID_INIT_ATTACH 0
91#define FASTRPC_RMID_INIT_RELEASE 1
92#define FASTRPC_RMID_INIT_MMAP 4
93#define FASTRPC_RMID_INIT_MUNMAP 5
94#define FASTRPC_RMID_INIT_CREATE 6
95#define FASTRPC_RMID_INIT_CREATE_ATTR 7
96#define FASTRPC_RMID_INIT_CREATE_STATIC 8
97#define FASTRPC_RMID_INIT_MEM_MAP 10
98#define FASTRPC_RMID_INIT_MEM_UNMAP 11
99
100/* Protection Domain(PD) ids */
101#define ROOT_PD (0)
102#define USER_PD (1)
103#define SENSORS_PD (2)
104
105#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
106
107static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
108 "sdsp", "cdsp"};
109struct fastrpc_phy_page {
110 u64 addr; /* physical address */
111 u64 size; /* size of contiguous region */
112};
113
114struct fastrpc_invoke_buf {
115 u32 num; /* number of contiguous regions */
116 u32 pgidx; /* index to start of contiguous region */
117};
118
119struct fastrpc_remote_dmahandle {
120 s32 fd; /* dma handle fd */
121 u32 offset; /* dma handle offset */
122 u32 len; /* dma handle length */
123};
124
125struct fastrpc_remote_buf {
126 u64 pv; /* buffer pointer */
127 u64 len; /* length of buffer */
128};
129
130union fastrpc_remote_arg {
131 struct fastrpc_remote_buf buf;
132 struct fastrpc_remote_dmahandle dma;
133};
134
135struct fastrpc_mmap_rsp_msg {
136 u64 vaddr;
137};
138
139struct fastrpc_mmap_req_msg {
140 s32 pgid;
141 u32 flags;
142 u64 vaddr;
143 s32 num;
144};
145
146struct fastrpc_mem_map_req_msg {
147 s32 pgid;
148 s32 fd;
149 s32 offset;
150 u32 flags;
151 u64 vaddrin;
152 s32 num;
153 s32 data_len;
154};
155
156struct fastrpc_munmap_req_msg {
157 s32 pgid;
158 u64 vaddr;
159 u64 size;
160};
161
162struct fastrpc_mem_unmap_req_msg {
163 s32 pgid;
164 s32 fd;
165 u64 vaddrin;
166 u64 len;
167};
168
169struct fastrpc_msg {
170 int pid; /* process group id */
171 int tid; /* thread id */
172 u64 ctx; /* invoke caller context */
173 u32 handle; /* handle to invoke */
174 u32 sc; /* scalars structure describing the data */
175 u64 addr; /* physical address */
176 u64 size; /* size of contiguous region */
177};
178
179struct fastrpc_invoke_rsp {
180 u64 ctx; /* invoke caller context */
181 int retval; /* invoke return value */
182};
183
184struct fastrpc_buf_overlap {
185 u64 start;
186 u64 end;
187 int raix;
188 u64 mstart;
189 u64 mend;
190 u64 offset;
191};
192
193struct fastrpc_buf {
194 struct fastrpc_user *fl;
195 struct dma_buf *dmabuf;
196 struct device *dev;
197 void *virt;
198 u64 phys;
199 u64 size;
200 /* Lock for dma buf attachments */
201 struct mutex lock;
202 struct list_head attachments;
203 /* mmap support */
204 struct list_head node; /* list of user requested mmaps */
205 uintptr_t raddr;
206};
207
208struct fastrpc_dma_buf_attachment {
209 struct device *dev;
210 struct sg_table sgt;
211 struct list_head node;
212};
213
214struct fastrpc_map {
215 struct list_head node;
216 struct fastrpc_user *fl;
217 int fd;
218 struct dma_buf *buf;
219 struct sg_table *table;
220 struct dma_buf_attachment *attach;
221 u64 phys;
222 u64 size;
223 void *va;
224 u64 len;
225 u64 raddr;
226 u32 attr;
227 struct kref refcount;
228};
229
230struct fastrpc_invoke_ctx {
231 int nscalars;
232 int nbufs;
233 int retval;
234 int pid;
235 int tgid;
236 u32 sc;
237 u32 *crc;
238 u64 ctxid;
239 u64 msg_sz;
240 struct kref refcount;
241 struct list_head node; /* list of ctxs */
242 struct completion work;
243 struct work_struct put_work;
244 struct fastrpc_msg msg;
245 struct fastrpc_user *fl;
246 union fastrpc_remote_arg *rpra;
247 struct fastrpc_map **maps;
248 struct fastrpc_buf *buf;
249 struct fastrpc_invoke_args *args;
250 struct fastrpc_buf_overlap *olaps;
251 struct fastrpc_channel_ctx *cctx;
252};
253
254struct fastrpc_session_ctx {
255 struct device *dev;
256 int sid;
257 bool used;
258 bool valid;
259};
260
261struct fastrpc_channel_ctx {
262 int domain_id;
263 int sesscount;
264 int vmcount;
265 u32 perms;
266 struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
267 struct rpmsg_device *rpdev;
268 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
269 spinlock_t lock;
270 struct idr ctx_idr;
271 struct list_head users;
272 struct kref refcount;
273 /* Flag if dsp attributes are cached */
274 bool valid_attributes;
275 u32 dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES];
276 struct fastrpc_device *secure_fdevice;
277 struct fastrpc_device *fdevice;
278 struct fastrpc_buf *remote_heap;
279 struct list_head invoke_interrupted_mmaps;
280 bool secure;
281 bool unsigned_support;
282 u64 dma_mask;
283};
284
285struct fastrpc_device {
286 struct fastrpc_channel_ctx *cctx;
287 struct miscdevice miscdev;
288 bool secure;
289};
290
291struct fastrpc_user {
292 struct list_head user;
293 struct list_head maps;
294 struct list_head pending;
295 struct list_head mmaps;
296
297 struct fastrpc_channel_ctx *cctx;
298 struct fastrpc_session_ctx *sctx;
299 struct fastrpc_buf *init_mem;
300
301 int tgid;
302 int pd;
303 bool is_secure_dev;
304 /* Lock for lists */
305 spinlock_t lock;
306 /* lock for allocations */
307 struct mutex mutex;
308};
309
310static void fastrpc_free_map(struct kref *ref)
311{
312 struct fastrpc_map *map;
313
314 map = container_of(ref, struct fastrpc_map, refcount);
315
316 if (map->table) {
317 if (map->attr & FASTRPC_ATTR_SECUREMAP) {
318 struct qcom_scm_vmperm perm;
319 int err = 0;
320
321 perm.vmid = QCOM_SCM_VMID_HLOS;
322 perm.perm = QCOM_SCM_PERM_RWX;
323 err = qcom_scm_assign_mem(map->phys, map->size,
324 &map->fl->cctx->perms, &perm, 1);
325 if (err) {
326 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
327 map->phys, map->size, err);
328 return;
329 }
330 }
331 dma_buf_unmap_attachment_unlocked(map->attach, map->table,
332 DMA_BIDIRECTIONAL);
333 dma_buf_detach(map->buf, map->attach);
334 dma_buf_put(map->buf);
335 }
336
337 if (map->fl) {
338 spin_lock(&map->fl->lock);
339 list_del(&map->node);
340 spin_unlock(&map->fl->lock);
341 map->fl = NULL;
342 }
343
344 kfree(map);
345}
346
347static void fastrpc_map_put(struct fastrpc_map *map)
348{
349 if (map)
350 kref_put(&map->refcount, fastrpc_free_map);
351}
352
353static int fastrpc_map_get(struct fastrpc_map *map)
354{
355 if (!map)
356 return -ENOENT;
357
358 return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
359}
360
361
362static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
363 struct fastrpc_map **ppmap, bool take_ref)
364{
365 struct fastrpc_session_ctx *sess = fl->sctx;
366 struct fastrpc_map *map = NULL;
367 int ret = -ENOENT;
368
369 spin_lock(&fl->lock);
370 list_for_each_entry(map, &fl->maps, node) {
371 if (map->fd != fd)
372 continue;
373
374 if (take_ref) {
375 ret = fastrpc_map_get(map);
376 if (ret) {
377 dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
378 __func__, fd, ret);
379 break;
380 }
381 }
382
383 *ppmap = map;
384 ret = 0;
385 break;
386 }
387 spin_unlock(&fl->lock);
388
389 return ret;
390}
391
392static void fastrpc_buf_free(struct fastrpc_buf *buf)
393{
394 dma_free_coherent(buf->dev, buf->size, buf->virt,
395 FASTRPC_PHYS(buf->phys));
396 kfree(buf);
397}
398
399static int __fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
400 u64 size, struct fastrpc_buf **obuf)
401{
402 struct fastrpc_buf *buf;
403
404 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
405 if (!buf)
406 return -ENOMEM;
407
408 INIT_LIST_HEAD(&buf->attachments);
409 INIT_LIST_HEAD(&buf->node);
410 mutex_init(&buf->lock);
411
412 buf->fl = fl;
413 buf->virt = NULL;
414 buf->phys = 0;
415 buf->size = size;
416 buf->dev = dev;
417 buf->raddr = 0;
418
419 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
420 GFP_KERNEL);
421 if (!buf->virt) {
422 mutex_destroy(&buf->lock);
423 kfree(buf);
424 return -ENOMEM;
425 }
426
427 *obuf = buf;
428
429 return 0;
430}
431
432static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
433 u64 size, struct fastrpc_buf **obuf)
434{
435 int ret;
436 struct fastrpc_buf *buf;
437
438 ret = __fastrpc_buf_alloc(fl, dev, size, obuf);
439 if (ret)
440 return ret;
441
442 buf = *obuf;
443
444 if (fl->sctx && fl->sctx->sid)
445 buf->phys += ((u64)fl->sctx->sid << 32);
446
447 return 0;
448}
449
450static int fastrpc_remote_heap_alloc(struct fastrpc_user *fl, struct device *dev,
451 u64 size, struct fastrpc_buf **obuf)
452{
453 struct device *rdev = &fl->cctx->rpdev->dev;
454
455 return __fastrpc_buf_alloc(fl, rdev, size, obuf);
456}
457
458static void fastrpc_channel_ctx_free(struct kref *ref)
459{
460 struct fastrpc_channel_ctx *cctx;
461
462 cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
463
464 kfree(cctx);
465}
466
467static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
468{
469 kref_get(&cctx->refcount);
470}
471
472static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
473{
474 kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
475}
476
477static void fastrpc_context_free(struct kref *ref)
478{
479 struct fastrpc_invoke_ctx *ctx;
480 struct fastrpc_channel_ctx *cctx;
481 unsigned long flags;
482 int i;
483
484 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
485 cctx = ctx->cctx;
486
487 for (i = 0; i < ctx->nbufs; i++)
488 fastrpc_map_put(ctx->maps[i]);
489
490 if (ctx->buf)
491 fastrpc_buf_free(ctx->buf);
492
493 spin_lock_irqsave(&cctx->lock, flags);
494 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
495 spin_unlock_irqrestore(&cctx->lock, flags);
496
497 kfree(ctx->maps);
498 kfree(ctx->olaps);
499 kfree(ctx);
500
501 fastrpc_channel_ctx_put(cctx);
502}
503
504static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
505{
506 kref_get(&ctx->refcount);
507}
508
509static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
510{
511 kref_put(&ctx->refcount, fastrpc_context_free);
512}
513
514static void fastrpc_context_put_wq(struct work_struct *work)
515{
516 struct fastrpc_invoke_ctx *ctx =
517 container_of(work, struct fastrpc_invoke_ctx, put_work);
518
519 fastrpc_context_put(ctx);
520}
521
522#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
523static int olaps_cmp(const void *a, const void *b)
524{
525 struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
526 struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
527 /* sort with lowest starting buffer first */
528 int st = CMP(pa->start, pb->start);
529 /* sort with highest ending buffer first */
530 int ed = CMP(pb->end, pa->end);
531
532 return st == 0 ? ed : st;
533}
534
535static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
536{
537 u64 max_end = 0;
538 int i;
539
540 for (i = 0; i < ctx->nbufs; ++i) {
541 ctx->olaps[i].start = ctx->args[i].ptr;
542 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
543 ctx->olaps[i].raix = i;
544 }
545
546 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
547
548 for (i = 0; i < ctx->nbufs; ++i) {
549 /* Falling inside previous range */
550 if (ctx->olaps[i].start < max_end) {
551 ctx->olaps[i].mstart = max_end;
552 ctx->olaps[i].mend = ctx->olaps[i].end;
553 ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
554
555 if (ctx->olaps[i].end > max_end) {
556 max_end = ctx->olaps[i].end;
557 } else {
558 ctx->olaps[i].mend = 0;
559 ctx->olaps[i].mstart = 0;
560 }
561
562 } else {
563 ctx->olaps[i].mend = ctx->olaps[i].end;
564 ctx->olaps[i].mstart = ctx->olaps[i].start;
565 ctx->olaps[i].offset = 0;
566 max_end = ctx->olaps[i].end;
567 }
568 }
569}
570
571static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
572 struct fastrpc_user *user, u32 kernel, u32 sc,
573 struct fastrpc_invoke_args *args)
574{
575 struct fastrpc_channel_ctx *cctx = user->cctx;
576 struct fastrpc_invoke_ctx *ctx = NULL;
577 unsigned long flags;
578 int ret;
579
580 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
581 if (!ctx)
582 return ERR_PTR(-ENOMEM);
583
584 INIT_LIST_HEAD(&ctx->node);
585 ctx->fl = user;
586 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
587 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
588 REMOTE_SCALARS_OUTBUFS(sc);
589
590 if (ctx->nscalars) {
591 ctx->maps = kcalloc(ctx->nscalars,
592 sizeof(*ctx->maps), GFP_KERNEL);
593 if (!ctx->maps) {
594 kfree(ctx);
595 return ERR_PTR(-ENOMEM);
596 }
597 ctx->olaps = kcalloc(ctx->nscalars,
598 sizeof(*ctx->olaps), GFP_KERNEL);
599 if (!ctx->olaps) {
600 kfree(ctx->maps);
601 kfree(ctx);
602 return ERR_PTR(-ENOMEM);
603 }
604 ctx->args = args;
605 fastrpc_get_buff_overlaps(ctx);
606 }
607
608 /* Released in fastrpc_context_put() */
609 fastrpc_channel_ctx_get(cctx);
610
611 ctx->sc = sc;
612 ctx->retval = -1;
613 ctx->pid = current->pid;
614 ctx->tgid = user->tgid;
615 ctx->cctx = cctx;
616 init_completion(&ctx->work);
617 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
618
619 spin_lock(&user->lock);
620 list_add_tail(&ctx->node, &user->pending);
621 spin_unlock(&user->lock);
622
623 spin_lock_irqsave(&cctx->lock, flags);
624 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
625 FASTRPC_CTX_MAX, GFP_ATOMIC);
626 if (ret < 0) {
627 spin_unlock_irqrestore(&cctx->lock, flags);
628 goto err_idr;
629 }
630 ctx->ctxid = ret << 4;
631 spin_unlock_irqrestore(&cctx->lock, flags);
632
633 kref_init(&ctx->refcount);
634
635 return ctx;
636err_idr:
637 spin_lock(&user->lock);
638 list_del(&ctx->node);
639 spin_unlock(&user->lock);
640 fastrpc_channel_ctx_put(cctx);
641 kfree(ctx->maps);
642 kfree(ctx->olaps);
643 kfree(ctx);
644
645 return ERR_PTR(ret);
646}
647
648static struct sg_table *
649fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
650 enum dma_data_direction dir)
651{
652 struct fastrpc_dma_buf_attachment *a = attachment->priv;
653 struct sg_table *table;
654 int ret;
655
656 table = &a->sgt;
657
658 ret = dma_map_sgtable(attachment->dev, table, dir, 0);
659 if (ret)
660 table = ERR_PTR(ret);
661 return table;
662}
663
664static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
665 struct sg_table *table,
666 enum dma_data_direction dir)
667{
668 dma_unmap_sgtable(attach->dev, table, dir, 0);
669}
670
671static void fastrpc_release(struct dma_buf *dmabuf)
672{
673 struct fastrpc_buf *buffer = dmabuf->priv;
674
675 fastrpc_buf_free(buffer);
676}
677
678static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
679 struct dma_buf_attachment *attachment)
680{
681 struct fastrpc_dma_buf_attachment *a;
682 struct fastrpc_buf *buffer = dmabuf->priv;
683 int ret;
684
685 a = kzalloc(sizeof(*a), GFP_KERNEL);
686 if (!a)
687 return -ENOMEM;
688
689 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
690 FASTRPC_PHYS(buffer->phys), buffer->size);
691 if (ret < 0) {
692 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
693 kfree(a);
694 return -EINVAL;
695 }
696
697 a->dev = attachment->dev;
698 INIT_LIST_HEAD(&a->node);
699 attachment->priv = a;
700
701 mutex_lock(&buffer->lock);
702 list_add(&a->node, &buffer->attachments);
703 mutex_unlock(&buffer->lock);
704
705 return 0;
706}
707
708static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
709 struct dma_buf_attachment *attachment)
710{
711 struct fastrpc_dma_buf_attachment *a = attachment->priv;
712 struct fastrpc_buf *buffer = dmabuf->priv;
713
714 mutex_lock(&buffer->lock);
715 list_del(&a->node);
716 mutex_unlock(&buffer->lock);
717 sg_free_table(&a->sgt);
718 kfree(a);
719}
720
721static int fastrpc_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
722{
723 struct fastrpc_buf *buf = dmabuf->priv;
724
725 iosys_map_set_vaddr(map, buf->virt);
726
727 return 0;
728}
729
730static int fastrpc_mmap(struct dma_buf *dmabuf,
731 struct vm_area_struct *vma)
732{
733 struct fastrpc_buf *buf = dmabuf->priv;
734 size_t size = vma->vm_end - vma->vm_start;
735
736 dma_resv_assert_held(dmabuf->resv);
737
738 return dma_mmap_coherent(buf->dev, vma, buf->virt,
739 FASTRPC_PHYS(buf->phys), size);
740}
741
742static const struct dma_buf_ops fastrpc_dma_buf_ops = {
743 .attach = fastrpc_dma_buf_attach,
744 .detach = fastrpc_dma_buf_detatch,
745 .map_dma_buf = fastrpc_map_dma_buf,
746 .unmap_dma_buf = fastrpc_unmap_dma_buf,
747 .mmap = fastrpc_mmap,
748 .vmap = fastrpc_vmap,
749 .release = fastrpc_release,
750};
751
752static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
753 u64 len, u32 attr, struct fastrpc_map **ppmap)
754{
755 struct fastrpc_session_ctx *sess = fl->sctx;
756 struct fastrpc_map *map = NULL;
757 int err = 0;
758
759 if (!fastrpc_map_lookup(fl, fd, ppmap, true))
760 return 0;
761
762 map = kzalloc(sizeof(*map), GFP_KERNEL);
763 if (!map)
764 return -ENOMEM;
765
766 INIT_LIST_HEAD(&map->node);
767 kref_init(&map->refcount);
768
769 map->fl = fl;
770 map->fd = fd;
771 map->buf = dma_buf_get(fd);
772 if (IS_ERR(map->buf)) {
773 err = PTR_ERR(map->buf);
774 goto get_err;
775 }
776
777 map->attach = dma_buf_attach(map->buf, sess->dev);
778 if (IS_ERR(map->attach)) {
779 dev_err(sess->dev, "Failed to attach dmabuf\n");
780 err = PTR_ERR(map->attach);
781 goto attach_err;
782 }
783
784 map->table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
785 if (IS_ERR(map->table)) {
786 err = PTR_ERR(map->table);
787 goto map_err;
788 }
789
790 map->phys = sg_dma_address(map->table->sgl);
791 map->phys += ((u64)fl->sctx->sid << 32);
792 map->size = len;
793 map->va = sg_virt(map->table->sgl);
794 map->len = len;
795
796 if (attr & FASTRPC_ATTR_SECUREMAP) {
797 /*
798 * If subsystem VMIDs are defined in DTSI, then do
799 * hyp_assign from HLOS to those VM(s)
800 */
801 map->attr = attr;
802 err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms,
803 fl->cctx->vmperms, fl->cctx->vmcount);
804 if (err) {
805 dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
806 map->phys, map->size, err);
807 goto map_err;
808 }
809 }
810 spin_lock(&fl->lock);
811 list_add_tail(&map->node, &fl->maps);
812 spin_unlock(&fl->lock);
813 *ppmap = map;
814
815 return 0;
816
817map_err:
818 dma_buf_detach(map->buf, map->attach);
819attach_err:
820 dma_buf_put(map->buf);
821get_err:
822 fastrpc_map_put(map);
823
824 return err;
825}
826
827/*
828 * Fastrpc payload buffer with metadata looks like:
829 *
830 * >>>>>> START of METADATA <<<<<<<<<
831 * +---------------------------------+
832 * | Arguments |
833 * | type:(union fastrpc_remote_arg)|
834 * | (0 - N) |
835 * +---------------------------------+
836 * | Invoke Buffer list |
837 * | type:(struct fastrpc_invoke_buf)|
838 * | (0 - N) |
839 * +---------------------------------+
840 * | Page info list |
841 * | type:(struct fastrpc_phy_page) |
842 * | (0 - N) |
843 * +---------------------------------+
844 * | Optional info |
845 * |(can be specific to SoC/Firmware)|
846 * +---------------------------------+
847 * >>>>>>>> END of METADATA <<<<<<<<<
848 * +---------------------------------+
849 * | Inline ARGS |
850 * | (0-N) |
851 * +---------------------------------+
852 */
853
854static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
855{
856 int size = 0;
857
858 size = (sizeof(struct fastrpc_remote_buf) +
859 sizeof(struct fastrpc_invoke_buf) +
860 sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
861 sizeof(u64) * FASTRPC_MAX_FDLIST +
862 sizeof(u32) * FASTRPC_MAX_CRCLIST;
863
864 return size;
865}
866
867static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
868{
869 u64 size = 0;
870 int oix;
871
872 size = ALIGN(metalen, FASTRPC_ALIGN);
873 for (oix = 0; oix < ctx->nbufs; oix++) {
874 int i = ctx->olaps[oix].raix;
875
876 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
877
878 if (ctx->olaps[oix].offset == 0)
879 size = ALIGN(size, FASTRPC_ALIGN);
880
881 size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
882 }
883 }
884
885 return size;
886}
887
888static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
889{
890 struct device *dev = ctx->fl->sctx->dev;
891 int i, err;
892
893 for (i = 0; i < ctx->nscalars; ++i) {
894
895 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
896 ctx->args[i].length == 0)
897 continue;
898
899 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
900 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
901 if (err) {
902 dev_err(dev, "Error Creating map %d\n", err);
903 return -EINVAL;
904 }
905
906 }
907 return 0;
908}
909
910static struct fastrpc_invoke_buf *fastrpc_invoke_buf_start(union fastrpc_remote_arg *pra, int len)
911{
912 return (struct fastrpc_invoke_buf *)(&pra[len]);
913}
914
915static struct fastrpc_phy_page *fastrpc_phy_page_start(struct fastrpc_invoke_buf *buf, int len)
916{
917 return (struct fastrpc_phy_page *)(&buf[len]);
918}
919
920static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
921{
922 struct device *dev = ctx->fl->sctx->dev;
923 union fastrpc_remote_arg *rpra;
924 struct fastrpc_invoke_buf *list;
925 struct fastrpc_phy_page *pages;
926 int inbufs, i, oix, err = 0;
927 u64 len, rlen, pkt_size;
928 u64 pg_start, pg_end;
929 uintptr_t args;
930 int metalen;
931
932 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
933 metalen = fastrpc_get_meta_size(ctx);
934 pkt_size = fastrpc_get_payload_size(ctx, metalen);
935
936 err = fastrpc_create_maps(ctx);
937 if (err)
938 return err;
939
940 ctx->msg_sz = pkt_size;
941
942 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
943 if (err)
944 return err;
945
946 rpra = ctx->buf->virt;
947 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
948 pages = fastrpc_phy_page_start(list, ctx->nscalars);
949 args = (uintptr_t)ctx->buf->virt + metalen;
950 rlen = pkt_size - metalen;
951 ctx->rpra = rpra;
952
953 for (oix = 0; oix < ctx->nbufs; ++oix) {
954 int mlen;
955
956 i = ctx->olaps[oix].raix;
957 len = ctx->args[i].length;
958
959 rpra[i].buf.pv = 0;
960 rpra[i].buf.len = len;
961 list[i].num = len ? 1 : 0;
962 list[i].pgidx = i;
963
964 if (!len)
965 continue;
966
967 if (ctx->maps[i]) {
968 struct vm_area_struct *vma = NULL;
969
970 rpra[i].buf.pv = (u64) ctx->args[i].ptr;
971 pages[i].addr = ctx->maps[i]->phys;
972
973 mmap_read_lock(current->mm);
974 vma = find_vma(current->mm, ctx->args[i].ptr);
975 if (vma)
976 pages[i].addr += ctx->args[i].ptr -
977 vma->vm_start;
978 mmap_read_unlock(current->mm);
979
980 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
981 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
982 PAGE_SHIFT;
983 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
984
985 } else {
986
987 if (ctx->olaps[oix].offset == 0) {
988 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
989 args = ALIGN(args, FASTRPC_ALIGN);
990 }
991
992 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
993
994 if (rlen < mlen)
995 goto bail;
996
997 rpra[i].buf.pv = args - ctx->olaps[oix].offset;
998 pages[i].addr = ctx->buf->phys -
999 ctx->olaps[oix].offset +
1000 (pkt_size - rlen);
1001 pages[i].addr = pages[i].addr & PAGE_MASK;
1002
1003 pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
1004 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
1005 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
1006 args = args + mlen;
1007 rlen -= mlen;
1008 }
1009
1010 if (i < inbufs && !ctx->maps[i]) {
1011 void *dst = (void *)(uintptr_t)rpra[i].buf.pv;
1012 void *src = (void *)(uintptr_t)ctx->args[i].ptr;
1013
1014 if (!kernel) {
1015 if (copy_from_user(dst, (void __user *)src,
1016 len)) {
1017 err = -EFAULT;
1018 goto bail;
1019 }
1020 } else {
1021 memcpy(dst, src, len);
1022 }
1023 }
1024 }
1025
1026 for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
1027 list[i].num = ctx->args[i].length ? 1 : 0;
1028 list[i].pgidx = i;
1029 if (ctx->maps[i]) {
1030 pages[i].addr = ctx->maps[i]->phys;
1031 pages[i].size = ctx->maps[i]->size;
1032 }
1033 rpra[i].dma.fd = ctx->args[i].fd;
1034 rpra[i].dma.len = ctx->args[i].length;
1035 rpra[i].dma.offset = (u64) ctx->args[i].ptr;
1036 }
1037
1038bail:
1039 if (err)
1040 dev_err(dev, "Error: get invoke args failed:%d\n", err);
1041
1042 return err;
1043}
1044
1045static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
1046 u32 kernel)
1047{
1048 union fastrpc_remote_arg *rpra = ctx->rpra;
1049 struct fastrpc_user *fl = ctx->fl;
1050 struct fastrpc_map *mmap = NULL;
1051 struct fastrpc_invoke_buf *list;
1052 struct fastrpc_phy_page *pages;
1053 u64 *fdlist;
1054 int i, inbufs, outbufs, handles;
1055
1056 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1057 outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1058 handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc);
1059 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
1060 pages = fastrpc_phy_page_start(list, ctx->nscalars);
1061 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1062
1063 for (i = inbufs; i < ctx->nbufs; ++i) {
1064 if (!ctx->maps[i]) {
1065 void *src = (void *)(uintptr_t)rpra[i].buf.pv;
1066 void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
1067 u64 len = rpra[i].buf.len;
1068
1069 if (!kernel) {
1070 if (copy_to_user((void __user *)dst, src, len))
1071 return -EFAULT;
1072 } else {
1073 memcpy(dst, src, len);
1074 }
1075 }
1076 }
1077
1078 for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
1079 if (!fdlist[i])
1080 break;
1081 if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
1082 fastrpc_map_put(mmap);
1083 }
1084
1085 return 0;
1086}
1087
1088static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
1089 struct fastrpc_invoke_ctx *ctx,
1090 u32 kernel, uint32_t handle)
1091{
1092 struct fastrpc_channel_ctx *cctx;
1093 struct fastrpc_user *fl = ctx->fl;
1094 struct fastrpc_msg *msg = &ctx->msg;
1095 int ret;
1096
1097 cctx = fl->cctx;
1098 msg->pid = fl->tgid;
1099 msg->tid = current->pid;
1100
1101 if (kernel)
1102 msg->pid = 0;
1103
1104 msg->ctx = ctx->ctxid | fl->pd;
1105 msg->handle = handle;
1106 msg->sc = ctx->sc;
1107 msg->addr = ctx->buf ? ctx->buf->phys : 0;
1108 msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
1109 fastrpc_context_get(ctx);
1110
1111 ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
1112
1113 if (ret)
1114 fastrpc_context_put(ctx);
1115
1116 return ret;
1117
1118}
1119
1120static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
1121 u32 handle, u32 sc,
1122 struct fastrpc_invoke_args *args)
1123{
1124 struct fastrpc_invoke_ctx *ctx = NULL;
1125 struct fastrpc_buf *buf, *b;
1126
1127 int err = 0;
1128
1129 if (!fl->sctx)
1130 return -EINVAL;
1131
1132 if (!fl->cctx->rpdev)
1133 return -EPIPE;
1134
1135 if (handle == FASTRPC_INIT_HANDLE && !kernel) {
1136 dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
1137 return -EPERM;
1138 }
1139
1140 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
1141 if (IS_ERR(ctx))
1142 return PTR_ERR(ctx);
1143
1144 if (ctx->nscalars) {
1145 err = fastrpc_get_args(kernel, ctx);
1146 if (err)
1147 goto bail;
1148 }
1149
1150 /* make sure that all CPU memory writes are seen by DSP */
1151 dma_wmb();
1152 /* Send invoke buffer to remote dsp */
1153 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
1154 if (err)
1155 goto bail;
1156
1157 if (kernel) {
1158 if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
1159 err = -ETIMEDOUT;
1160 } else {
1161 err = wait_for_completion_interruptible(&ctx->work);
1162 }
1163
1164 if (err)
1165 goto bail;
1166
1167 /* Check the response from remote dsp */
1168 err = ctx->retval;
1169 if (err)
1170 goto bail;
1171
1172 if (ctx->nscalars) {
1173 /* make sure that all memory writes by DSP are seen by CPU */
1174 dma_rmb();
1175 /* populate all the output buffers with results */
1176 err = fastrpc_put_args(ctx, kernel);
1177 if (err)
1178 goto bail;
1179 }
1180
1181bail:
1182 if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
1183 /* We are done with this compute context */
1184 spin_lock(&fl->lock);
1185 list_del(&ctx->node);
1186 spin_unlock(&fl->lock);
1187 fastrpc_context_put(ctx);
1188 }
1189
1190 if (err == -ERESTARTSYS) {
1191 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1192 list_del(&buf->node);
1193 list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps);
1194 }
1195 }
1196
1197 if (err)
1198 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
1199
1200 return err;
1201}
1202
1203static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request)
1204{
1205 /* Check if the device node is non-secure and channel is secure*/
1206 if (!fl->is_secure_dev && fl->cctx->secure) {
1207 /*
1208 * Allow untrusted applications to offload only to Unsigned PD when
1209 * channel is configured as secure and block untrusted apps on channel
1210 * that does not support unsigned PD offload
1211 */
1212 if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
1213 dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD");
1214 return true;
1215 }
1216 }
1217
1218 return false;
1219}
1220
1221static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
1222 char __user *argp)
1223{
1224 struct fastrpc_init_create_static init;
1225 struct fastrpc_invoke_args *args;
1226 struct fastrpc_phy_page pages[1];
1227 char *name;
1228 int err;
1229 struct {
1230 int pgid;
1231 u32 namelen;
1232 u32 pageslen;
1233 } inbuf;
1234 u32 sc;
1235
1236 args = kcalloc(FASTRPC_CREATE_STATIC_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1237 if (!args)
1238 return -ENOMEM;
1239
1240 if (copy_from_user(&init, argp, sizeof(init))) {
1241 err = -EFAULT;
1242 goto err;
1243 }
1244
1245 if (init.namelen > INIT_FILE_NAMELEN_MAX) {
1246 err = -EINVAL;
1247 goto err;
1248 }
1249
1250 name = kzalloc(init.namelen, GFP_KERNEL);
1251 if (!name) {
1252 err = -ENOMEM;
1253 goto err;
1254 }
1255
1256 if (copy_from_user(name, (void __user *)(uintptr_t)init.name, init.namelen)) {
1257 err = -EFAULT;
1258 goto err_name;
1259 }
1260
1261 if (!fl->cctx->remote_heap) {
1262 err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen,
1263 &fl->cctx->remote_heap);
1264 if (err)
1265 goto err_name;
1266
1267 /* Map if we have any heap VMIDs associated with this ADSP Static Process. */
1268 if (fl->cctx->vmcount) {
1269 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
1270 (u64)fl->cctx->remote_heap->size,
1271 &fl->cctx->perms,
1272 fl->cctx->vmperms, fl->cctx->vmcount);
1273 if (err) {
1274 dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
1275 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
1276 goto err_map;
1277 }
1278 }
1279 }
1280
1281 inbuf.pgid = fl->tgid;
1282 inbuf.namelen = init.namelen;
1283 inbuf.pageslen = 0;
1284 fl->pd = USER_PD;
1285
1286 args[0].ptr = (u64)(uintptr_t)&inbuf;
1287 args[0].length = sizeof(inbuf);
1288 args[0].fd = -1;
1289
1290 args[1].ptr = (u64)(uintptr_t)name;
1291 args[1].length = inbuf.namelen;
1292 args[1].fd = -1;
1293
1294 pages[0].addr = fl->cctx->remote_heap->phys;
1295 pages[0].size = fl->cctx->remote_heap->size;
1296
1297 args[2].ptr = (u64)(uintptr_t) pages;
1298 args[2].length = sizeof(*pages);
1299 args[2].fd = -1;
1300
1301 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_STATIC, 3, 0);
1302
1303 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1304 sc, args);
1305 if (err)
1306 goto err_invoke;
1307
1308 kfree(args);
1309
1310 return 0;
1311err_invoke:
1312 if (fl->cctx->vmcount) {
1313 struct qcom_scm_vmperm perm;
1314
1315 perm.vmid = QCOM_SCM_VMID_HLOS;
1316 perm.perm = QCOM_SCM_PERM_RWX;
1317 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
1318 (u64)fl->cctx->remote_heap->size,
1319 &fl->cctx->perms, &perm, 1);
1320 if (err)
1321 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
1322 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
1323 }
1324err_map:
1325 fastrpc_buf_free(fl->cctx->remote_heap);
1326err_name:
1327 kfree(name);
1328err:
1329 kfree(args);
1330
1331 return err;
1332}
1333
1334static int fastrpc_init_create_process(struct fastrpc_user *fl,
1335 char __user *argp)
1336{
1337 struct fastrpc_init_create init;
1338 struct fastrpc_invoke_args *args;
1339 struct fastrpc_phy_page pages[1];
1340 struct fastrpc_map *map = NULL;
1341 struct fastrpc_buf *imem = NULL;
1342 int memlen;
1343 int err;
1344 struct {
1345 int pgid;
1346 u32 namelen;
1347 u32 filelen;
1348 u32 pageslen;
1349 u32 attrs;
1350 u32 siglen;
1351 } inbuf;
1352 u32 sc;
1353 bool unsigned_module = false;
1354
1355 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1356 if (!args)
1357 return -ENOMEM;
1358
1359 if (copy_from_user(&init, argp, sizeof(init))) {
1360 err = -EFAULT;
1361 goto err;
1362 }
1363
1364 if (init.attrs & FASTRPC_MODE_UNSIGNED_MODULE)
1365 unsigned_module = true;
1366
1367 if (is_session_rejected(fl, unsigned_module)) {
1368 err = -ECONNREFUSED;
1369 goto err;
1370 }
1371
1372 if (init.filelen > INIT_FILELEN_MAX) {
1373 err = -EINVAL;
1374 goto err;
1375 }
1376
1377 inbuf.pgid = fl->tgid;
1378 inbuf.namelen = strlen(current->comm) + 1;
1379 inbuf.filelen = init.filelen;
1380 inbuf.pageslen = 1;
1381 inbuf.attrs = init.attrs;
1382 inbuf.siglen = init.siglen;
1383 fl->pd = USER_PD;
1384
1385 if (init.filelen && init.filefd) {
1386 err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
1387 if (err)
1388 goto err;
1389 }
1390
1391 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1392 1024 * 1024);
1393 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1394 &imem);
1395 if (err)
1396 goto err_alloc;
1397
1398 fl->init_mem = imem;
1399 args[0].ptr = (u64)(uintptr_t)&inbuf;
1400 args[0].length = sizeof(inbuf);
1401 args[0].fd = -1;
1402
1403 args[1].ptr = (u64)(uintptr_t)current->comm;
1404 args[1].length = inbuf.namelen;
1405 args[1].fd = -1;
1406
1407 args[2].ptr = (u64) init.file;
1408 args[2].length = inbuf.filelen;
1409 args[2].fd = init.filefd;
1410
1411 pages[0].addr = imem->phys;
1412 pages[0].size = imem->size;
1413
1414 args[3].ptr = (u64)(uintptr_t) pages;
1415 args[3].length = 1 * sizeof(*pages);
1416 args[3].fd = -1;
1417
1418 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1419 args[4].length = sizeof(inbuf.attrs);
1420 args[4].fd = -1;
1421
1422 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1423 args[5].length = sizeof(inbuf.siglen);
1424 args[5].fd = -1;
1425
1426 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1427 if (init.attrs)
1428 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
1429
1430 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1431 sc, args);
1432 if (err)
1433 goto err_invoke;
1434
1435 kfree(args);
1436
1437 return 0;
1438
1439err_invoke:
1440 fl->init_mem = NULL;
1441 fastrpc_buf_free(imem);
1442err_alloc:
1443 fastrpc_map_put(map);
1444err:
1445 kfree(args);
1446
1447 return err;
1448}
1449
1450static struct fastrpc_session_ctx *fastrpc_session_alloc(
1451 struct fastrpc_channel_ctx *cctx)
1452{
1453 struct fastrpc_session_ctx *session = NULL;
1454 unsigned long flags;
1455 int i;
1456
1457 spin_lock_irqsave(&cctx->lock, flags);
1458 for (i = 0; i < cctx->sesscount; i++) {
1459 if (!cctx->session[i].used && cctx->session[i].valid) {
1460 cctx->session[i].used = true;
1461 session = &cctx->session[i];
1462 break;
1463 }
1464 }
1465 spin_unlock_irqrestore(&cctx->lock, flags);
1466
1467 return session;
1468}
1469
1470static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1471 struct fastrpc_session_ctx *session)
1472{
1473 unsigned long flags;
1474
1475 spin_lock_irqsave(&cctx->lock, flags);
1476 session->used = false;
1477 spin_unlock_irqrestore(&cctx->lock, flags);
1478}
1479
1480static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1481{
1482 struct fastrpc_invoke_args args[1];
1483 int tgid = 0;
1484 u32 sc;
1485
1486 tgid = fl->tgid;
1487 args[0].ptr = (u64)(uintptr_t) &tgid;
1488 args[0].length = sizeof(tgid);
1489 args[0].fd = -1;
1490 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1491
1492 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1493 sc, &args[0]);
1494}
1495
1496static int fastrpc_device_release(struct inode *inode, struct file *file)
1497{
1498 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1499 struct fastrpc_channel_ctx *cctx = fl->cctx;
1500 struct fastrpc_invoke_ctx *ctx, *n;
1501 struct fastrpc_map *map, *m;
1502 struct fastrpc_buf *buf, *b;
1503 unsigned long flags;
1504
1505 fastrpc_release_current_dsp_process(fl);
1506
1507 spin_lock_irqsave(&cctx->lock, flags);
1508 list_del(&fl->user);
1509 spin_unlock_irqrestore(&cctx->lock, flags);
1510
1511 if (fl->init_mem)
1512 fastrpc_buf_free(fl->init_mem);
1513
1514 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1515 list_del(&ctx->node);
1516 fastrpc_context_put(ctx);
1517 }
1518
1519 list_for_each_entry_safe(map, m, &fl->maps, node)
1520 fastrpc_map_put(map);
1521
1522 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1523 list_del(&buf->node);
1524 fastrpc_buf_free(buf);
1525 }
1526
1527 fastrpc_session_free(cctx, fl->sctx);
1528 fastrpc_channel_ctx_put(cctx);
1529
1530 mutex_destroy(&fl->mutex);
1531 kfree(fl);
1532 file->private_data = NULL;
1533
1534 return 0;
1535}
1536
1537static int fastrpc_device_open(struct inode *inode, struct file *filp)
1538{
1539 struct fastrpc_channel_ctx *cctx;
1540 struct fastrpc_device *fdevice;
1541 struct fastrpc_user *fl = NULL;
1542 unsigned long flags;
1543
1544 fdevice = miscdev_to_fdevice(filp->private_data);
1545 cctx = fdevice->cctx;
1546
1547 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1548 if (!fl)
1549 return -ENOMEM;
1550
1551 /* Released in fastrpc_device_release() */
1552 fastrpc_channel_ctx_get(cctx);
1553
1554 filp->private_data = fl;
1555 spin_lock_init(&fl->lock);
1556 mutex_init(&fl->mutex);
1557 INIT_LIST_HEAD(&fl->pending);
1558 INIT_LIST_HEAD(&fl->maps);
1559 INIT_LIST_HEAD(&fl->mmaps);
1560 INIT_LIST_HEAD(&fl->user);
1561 fl->tgid = current->tgid;
1562 fl->cctx = cctx;
1563 fl->is_secure_dev = fdevice->secure;
1564
1565 fl->sctx = fastrpc_session_alloc(cctx);
1566 if (!fl->sctx) {
1567 dev_err(&cctx->rpdev->dev, "No session available\n");
1568 mutex_destroy(&fl->mutex);
1569 kfree(fl);
1570
1571 return -EBUSY;
1572 }
1573
1574 spin_lock_irqsave(&cctx->lock, flags);
1575 list_add_tail(&fl->user, &cctx->users);
1576 spin_unlock_irqrestore(&cctx->lock, flags);
1577
1578 return 0;
1579}
1580
1581static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1582{
1583 struct fastrpc_alloc_dma_buf bp;
1584 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1585 struct fastrpc_buf *buf = NULL;
1586 int err;
1587
1588 if (copy_from_user(&bp, argp, sizeof(bp)))
1589 return -EFAULT;
1590
1591 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1592 if (err)
1593 return err;
1594 exp_info.ops = &fastrpc_dma_buf_ops;
1595 exp_info.size = bp.size;
1596 exp_info.flags = O_RDWR;
1597 exp_info.priv = buf;
1598 buf->dmabuf = dma_buf_export(&exp_info);
1599 if (IS_ERR(buf->dmabuf)) {
1600 err = PTR_ERR(buf->dmabuf);
1601 fastrpc_buf_free(buf);
1602 return err;
1603 }
1604
1605 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1606 if (bp.fd < 0) {
1607 dma_buf_put(buf->dmabuf);
1608 return -EINVAL;
1609 }
1610
1611 if (copy_to_user(argp, &bp, sizeof(bp))) {
1612 /*
1613 * The usercopy failed, but we can't do much about it, as
1614 * dma_buf_fd() already called fd_install() and made the
1615 * file descriptor accessible for the current process. It
1616 * might already be closed and dmabuf no longer valid when
1617 * we reach this point. Therefore "leak" the fd and rely on
1618 * the process exit path to do any required cleanup.
1619 */
1620 return -EFAULT;
1621 }
1622
1623 return 0;
1624}
1625
1626static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
1627{
1628 struct fastrpc_invoke_args args[1];
1629 int tgid = fl->tgid;
1630 u32 sc;
1631
1632 args[0].ptr = (u64)(uintptr_t) &tgid;
1633 args[0].length = sizeof(tgid);
1634 args[0].fd = -1;
1635 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1636 fl->pd = pd;
1637
1638 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1639 sc, &args[0]);
1640}
1641
1642static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1643{
1644 struct fastrpc_invoke_args *args = NULL;
1645 struct fastrpc_invoke inv;
1646 u32 nscalars;
1647 int err;
1648
1649 if (copy_from_user(&inv, argp, sizeof(inv)))
1650 return -EFAULT;
1651
1652 /* nscalars is truncated here to max supported value */
1653 nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1654 if (nscalars) {
1655 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1656 if (!args)
1657 return -ENOMEM;
1658
1659 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1660 nscalars * sizeof(*args))) {
1661 kfree(args);
1662 return -EFAULT;
1663 }
1664 }
1665
1666 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1667 kfree(args);
1668
1669 return err;
1670}
1671
1672static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf,
1673 uint32_t dsp_attr_buf_len)
1674{
1675 struct fastrpc_invoke_args args[2] = { 0 };
1676
1677 /* Capability filled in userspace */
1678 dsp_attr_buf[0] = 0;
1679
1680 args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
1681 args[0].length = sizeof(dsp_attr_buf_len);
1682 args[0].fd = -1;
1683 args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
1684 args[1].length = dsp_attr_buf_len;
1685 args[1].fd = -1;
1686 fl->pd = USER_PD;
1687
1688 return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
1689 FASTRPC_SCALARS(0, 1, 1), args);
1690}
1691
1692static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
1693 struct fastrpc_user *fl)
1694{
1695 struct fastrpc_channel_ctx *cctx = fl->cctx;
1696 uint32_t attribute_id = cap->attribute_id;
1697 uint32_t *dsp_attributes;
1698 unsigned long flags;
1699 uint32_t domain = cap->domain;
1700 int err;
1701
1702 spin_lock_irqsave(&cctx->lock, flags);
1703 /* check if we already have queried dsp for attributes */
1704 if (cctx->valid_attributes) {
1705 spin_unlock_irqrestore(&cctx->lock, flags);
1706 goto done;
1707 }
1708 spin_unlock_irqrestore(&cctx->lock, flags);
1709
1710 dsp_attributes = kzalloc(FASTRPC_MAX_DSP_ATTRIBUTES_LEN, GFP_KERNEL);
1711 if (!dsp_attributes)
1712 return -ENOMEM;
1713
1714 err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
1715 if (err == DSP_UNSUPPORTED_API) {
1716 dev_info(&cctx->rpdev->dev,
1717 "Warning: DSP capabilities not supported on domain: %d\n", domain);
1718 kfree(dsp_attributes);
1719 return -EOPNOTSUPP;
1720 } else if (err) {
1721 dev_err(&cctx->rpdev->dev, "Error: dsp information is incorrect err: %d\n", err);
1722 kfree(dsp_attributes);
1723 return err;
1724 }
1725
1726 spin_lock_irqsave(&cctx->lock, flags);
1727 memcpy(cctx->dsp_attributes, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
1728 cctx->valid_attributes = true;
1729 spin_unlock_irqrestore(&cctx->lock, flags);
1730 kfree(dsp_attributes);
1731done:
1732 cap->capability = cctx->dsp_attributes[attribute_id];
1733 return 0;
1734}
1735
1736static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
1737{
1738 struct fastrpc_ioctl_capability cap = {0};
1739 int err = 0;
1740
1741 if (copy_from_user(&cap, argp, sizeof(cap)))
1742 return -EFAULT;
1743
1744 cap.capability = 0;
1745 if (cap.domain >= FASTRPC_DEV_MAX) {
1746 dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n",
1747 cap.domain, err);
1748 return -ECHRNG;
1749 }
1750
1751 /* Fastrpc Capablities does not support modem domain */
1752 if (cap.domain == MDSP_DOMAIN_ID) {
1753 dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err);
1754 return -ECHRNG;
1755 }
1756
1757 if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) {
1758 dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
1759 cap.attribute_id, err);
1760 return -EOVERFLOW;
1761 }
1762
1763 err = fastrpc_get_info_from_kernel(&cap, fl);
1764 if (err)
1765 return err;
1766
1767 if (copy_to_user(argp, &cap.capability, sizeof(cap.capability)))
1768 return -EFAULT;
1769
1770 return 0;
1771}
1772
1773static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf)
1774{
1775 struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
1776 struct fastrpc_munmap_req_msg req_msg;
1777 struct device *dev = fl->sctx->dev;
1778 int err;
1779 u32 sc;
1780
1781 req_msg.pgid = fl->tgid;
1782 req_msg.size = buf->size;
1783 req_msg.vaddr = buf->raddr;
1784
1785 args[0].ptr = (u64) (uintptr_t) &req_msg;
1786 args[0].length = sizeof(req_msg);
1787
1788 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
1789 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1790 &args[0]);
1791 if (!err) {
1792 dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
1793 spin_lock(&fl->lock);
1794 list_del(&buf->node);
1795 spin_unlock(&fl->lock);
1796 fastrpc_buf_free(buf);
1797 } else {
1798 dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
1799 }
1800
1801 return err;
1802}
1803
1804static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
1805{
1806 struct fastrpc_buf *buf = NULL, *iter, *b;
1807 struct fastrpc_req_munmap req;
1808 struct device *dev = fl->sctx->dev;
1809
1810 if (copy_from_user(&req, argp, sizeof(req)))
1811 return -EFAULT;
1812
1813 spin_lock(&fl->lock);
1814 list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
1815 if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
1816 buf = iter;
1817 break;
1818 }
1819 }
1820 spin_unlock(&fl->lock);
1821
1822 if (!buf) {
1823 dev_err(dev, "mmap\t\tpt 0x%09llx [len 0x%08llx] not in list\n",
1824 req.vaddrout, req.size);
1825 return -EINVAL;
1826 }
1827
1828 return fastrpc_req_munmap_impl(fl, buf);
1829}
1830
1831static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
1832{
1833 struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
1834 struct fastrpc_buf *buf = NULL;
1835 struct fastrpc_mmap_req_msg req_msg;
1836 struct fastrpc_mmap_rsp_msg rsp_msg;
1837 struct fastrpc_phy_page pages;
1838 struct fastrpc_req_mmap req;
1839 struct device *dev = fl->sctx->dev;
1840 int err;
1841 u32 sc;
1842
1843 if (copy_from_user(&req, argp, sizeof(req)))
1844 return -EFAULT;
1845
1846 if (req.flags != ADSP_MMAP_ADD_PAGES && req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR) {
1847 dev_err(dev, "flag not supported 0x%x\n", req.flags);
1848
1849 return -EINVAL;
1850 }
1851
1852 if (req.vaddrin) {
1853 dev_err(dev, "adding user allocated pages is not supported\n");
1854 return -EINVAL;
1855 }
1856
1857 err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
1858 if (err) {
1859 dev_err(dev, "failed to allocate buffer\n");
1860 return err;
1861 }
1862
1863 req_msg.pgid = fl->tgid;
1864 req_msg.flags = req.flags;
1865 req_msg.vaddr = req.vaddrin;
1866 req_msg.num = sizeof(pages);
1867
1868 args[0].ptr = (u64) (uintptr_t) &req_msg;
1869 args[0].length = sizeof(req_msg);
1870
1871 pages.addr = buf->phys;
1872 pages.size = buf->size;
1873
1874 args[1].ptr = (u64) (uintptr_t) &pages;
1875 args[1].length = sizeof(pages);
1876
1877 args[2].ptr = (u64) (uintptr_t) &rsp_msg;
1878 args[2].length = sizeof(rsp_msg);
1879
1880 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
1881 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1882 &args[0]);
1883 if (err) {
1884 dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
1885 goto err_invoke;
1886 }
1887
1888 /* update the buffer to be able to deallocate the memory on the DSP */
1889 buf->raddr = (uintptr_t) rsp_msg.vaddr;
1890
1891 /* let the client know the address to use */
1892 req.vaddrout = rsp_msg.vaddr;
1893
1894 /* Add memory to static PD pool, protection thru hypervisor */
1895 if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
1896 struct qcom_scm_vmperm perm;
1897
1898 perm.vmid = QCOM_SCM_VMID_HLOS;
1899 perm.perm = QCOM_SCM_PERM_RWX;
1900 err = qcom_scm_assign_mem(buf->phys, buf->size,
1901 &fl->cctx->perms, &perm, 1);
1902 if (err) {
1903 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
1904 buf->phys, buf->size, err);
1905 goto err_assign;
1906 }
1907 }
1908
1909 spin_lock(&fl->lock);
1910 list_add_tail(&buf->node, &fl->mmaps);
1911 spin_unlock(&fl->lock);
1912
1913 if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
1914 err = -EFAULT;
1915 goto err_assign;
1916 }
1917
1918 dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
1919 buf->raddr, buf->size);
1920
1921 return 0;
1922
1923err_assign:
1924 fastrpc_req_munmap_impl(fl, buf);
1925err_invoke:
1926 fastrpc_buf_free(buf);
1927
1928 return err;
1929}
1930
1931static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
1932{
1933 struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
1934 struct fastrpc_map *map = NULL, *iter, *m;
1935 struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
1936 int err = 0;
1937 u32 sc;
1938 struct device *dev = fl->sctx->dev;
1939
1940 spin_lock(&fl->lock);
1941 list_for_each_entry_safe(iter, m, &fl->maps, node) {
1942 if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
1943 map = iter;
1944 break;
1945 }
1946 }
1947
1948 spin_unlock(&fl->lock);
1949
1950 if (!map) {
1951 dev_err(dev, "map not in list\n");
1952 return -EINVAL;
1953 }
1954
1955 req_msg.pgid = fl->tgid;
1956 req_msg.len = map->len;
1957 req_msg.vaddrin = map->raddr;
1958 req_msg.fd = map->fd;
1959
1960 args[0].ptr = (u64) (uintptr_t) &req_msg;
1961 args[0].length = sizeof(req_msg);
1962
1963 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
1964 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1965 &args[0]);
1966 fastrpc_map_put(map);
1967 if (err)
1968 dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr);
1969
1970 return err;
1971}
1972
1973static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
1974{
1975 struct fastrpc_mem_unmap req;
1976
1977 if (copy_from_user(&req, argp, sizeof(req)))
1978 return -EFAULT;
1979
1980 return fastrpc_req_mem_unmap_impl(fl, &req);
1981}
1982
1983static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
1984{
1985 struct fastrpc_invoke_args args[4] = { [0 ... 3] = { 0 } };
1986 struct fastrpc_mem_map_req_msg req_msg = { 0 };
1987 struct fastrpc_mmap_rsp_msg rsp_msg = { 0 };
1988 struct fastrpc_mem_unmap req_unmap = { 0 };
1989 struct fastrpc_phy_page pages = { 0 };
1990 struct fastrpc_mem_map req;
1991 struct device *dev = fl->sctx->dev;
1992 struct fastrpc_map *map = NULL;
1993 int err;
1994 u32 sc;
1995
1996 if (copy_from_user(&req, argp, sizeof(req)))
1997 return -EFAULT;
1998
1999 /* create SMMU mapping */
2000 err = fastrpc_map_create(fl, req.fd, req.length, 0, &map);
2001 if (err) {
2002 dev_err(dev, "failed to map buffer, fd = %d\n", req.fd);
2003 return err;
2004 }
2005
2006 req_msg.pgid = fl->tgid;
2007 req_msg.fd = req.fd;
2008 req_msg.offset = req.offset;
2009 req_msg.vaddrin = req.vaddrin;
2010 map->va = (void *) (uintptr_t) req.vaddrin;
2011 req_msg.flags = req.flags;
2012 req_msg.num = sizeof(pages);
2013 req_msg.data_len = 0;
2014
2015 args[0].ptr = (u64) (uintptr_t) &req_msg;
2016 args[0].length = sizeof(req_msg);
2017
2018 pages.addr = map->phys;
2019 pages.size = map->size;
2020
2021 args[1].ptr = (u64) (uintptr_t) &pages;
2022 args[1].length = sizeof(pages);
2023
2024 args[2].ptr = (u64) (uintptr_t) &pages;
2025 args[2].length = 0;
2026
2027 args[3].ptr = (u64) (uintptr_t) &rsp_msg;
2028 args[3].length = sizeof(rsp_msg);
2029
2030 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
2031 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
2032 if (err) {
2033 dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
2034 req.fd, req.vaddrin, map->size);
2035 goto err_invoke;
2036 }
2037
2038 /* update the buffer to be able to deallocate the memory on the DSP */
2039 map->raddr = rsp_msg.vaddr;
2040
2041 /* let the client know the address to use */
2042 req.vaddrout = rsp_msg.vaddr;
2043
2044 if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
2045 /* unmap the memory and release the buffer */
2046 req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
2047 req_unmap.length = map->size;
2048 fastrpc_req_mem_unmap_impl(fl, &req_unmap);
2049 return -EFAULT;
2050 }
2051
2052 return 0;
2053
2054err_invoke:
2055 fastrpc_map_put(map);
2056
2057 return err;
2058}
2059
2060static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
2061 unsigned long arg)
2062{
2063 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
2064 char __user *argp = (char __user *)arg;
2065 int err;
2066
2067 switch (cmd) {
2068 case FASTRPC_IOCTL_INVOKE:
2069 err = fastrpc_invoke(fl, argp);
2070 break;
2071 case FASTRPC_IOCTL_INIT_ATTACH:
2072 err = fastrpc_init_attach(fl, ROOT_PD);
2073 break;
2074 case FASTRPC_IOCTL_INIT_ATTACH_SNS:
2075 err = fastrpc_init_attach(fl, SENSORS_PD);
2076 break;
2077 case FASTRPC_IOCTL_INIT_CREATE_STATIC:
2078 err = fastrpc_init_create_static_process(fl, argp);
2079 break;
2080 case FASTRPC_IOCTL_INIT_CREATE:
2081 err = fastrpc_init_create_process(fl, argp);
2082 break;
2083 case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
2084 err = fastrpc_dmabuf_alloc(fl, argp);
2085 break;
2086 case FASTRPC_IOCTL_MMAP:
2087 err = fastrpc_req_mmap(fl, argp);
2088 break;
2089 case FASTRPC_IOCTL_MUNMAP:
2090 err = fastrpc_req_munmap(fl, argp);
2091 break;
2092 case FASTRPC_IOCTL_MEM_MAP:
2093 err = fastrpc_req_mem_map(fl, argp);
2094 break;
2095 case FASTRPC_IOCTL_MEM_UNMAP:
2096 err = fastrpc_req_mem_unmap(fl, argp);
2097 break;
2098 case FASTRPC_IOCTL_GET_DSP_INFO:
2099 err = fastrpc_get_dsp_info(fl, argp);
2100 break;
2101 default:
2102 err = -ENOTTY;
2103 break;
2104 }
2105
2106 return err;
2107}
2108
2109static const struct file_operations fastrpc_fops = {
2110 .open = fastrpc_device_open,
2111 .release = fastrpc_device_release,
2112 .unlocked_ioctl = fastrpc_device_ioctl,
2113 .compat_ioctl = fastrpc_device_ioctl,
2114};
2115
2116static int fastrpc_cb_probe(struct platform_device *pdev)
2117{
2118 struct fastrpc_channel_ctx *cctx;
2119 struct fastrpc_session_ctx *sess;
2120 struct device *dev = &pdev->dev;
2121 int i, sessions = 0;
2122 unsigned long flags;
2123 int rc;
2124
2125 cctx = dev_get_drvdata(dev->parent);
2126 if (!cctx)
2127 return -EINVAL;
2128
2129 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
2130
2131 spin_lock_irqsave(&cctx->lock, flags);
2132 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
2133 dev_err(&pdev->dev, "too many sessions\n");
2134 spin_unlock_irqrestore(&cctx->lock, flags);
2135 return -ENOSPC;
2136 }
2137 sess = &cctx->session[cctx->sesscount++];
2138 sess->used = false;
2139 sess->valid = true;
2140 sess->dev = dev;
2141 dev_set_drvdata(dev, sess);
2142
2143 if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
2144 dev_info(dev, "FastRPC Session ID not specified in DT\n");
2145
2146 if (sessions > 0) {
2147 struct fastrpc_session_ctx *dup_sess;
2148
2149 for (i = 1; i < sessions; i++) {
2150 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
2151 break;
2152 dup_sess = &cctx->session[cctx->sesscount++];
2153 memcpy(dup_sess, sess, sizeof(*dup_sess));
2154 }
2155 }
2156 spin_unlock_irqrestore(&cctx->lock, flags);
2157 rc = dma_set_mask(dev, DMA_BIT_MASK(32));
2158 if (rc) {
2159 dev_err(dev, "32-bit DMA enable failed\n");
2160 return rc;
2161 }
2162
2163 return 0;
2164}
2165
2166static int fastrpc_cb_remove(struct platform_device *pdev)
2167{
2168 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
2169 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
2170 unsigned long flags;
2171 int i;
2172
2173 spin_lock_irqsave(&cctx->lock, flags);
2174 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
2175 if (cctx->session[i].sid == sess->sid) {
2176 cctx->session[i].valid = false;
2177 cctx->sesscount--;
2178 }
2179 }
2180 spin_unlock_irqrestore(&cctx->lock, flags);
2181
2182 return 0;
2183}
2184
2185static const struct of_device_id fastrpc_match_table[] = {
2186 { .compatible = "qcom,fastrpc-compute-cb", },
2187 {}
2188};
2189
2190static struct platform_driver fastrpc_cb_driver = {
2191 .probe = fastrpc_cb_probe,
2192 .remove = fastrpc_cb_remove,
2193 .driver = {
2194 .name = "qcom,fastrpc-cb",
2195 .of_match_table = fastrpc_match_table,
2196 .suppress_bind_attrs = true,
2197 },
2198};
2199
2200static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ctx *cctx,
2201 bool is_secured, const char *domain)
2202{
2203 struct fastrpc_device *fdev;
2204 int err;
2205
2206 fdev = devm_kzalloc(dev, sizeof(*fdev), GFP_KERNEL);
2207 if (!fdev)
2208 return -ENOMEM;
2209
2210 fdev->secure = is_secured;
2211 fdev->cctx = cctx;
2212 fdev->miscdev.minor = MISC_DYNAMIC_MINOR;
2213 fdev->miscdev.fops = &fastrpc_fops;
2214 fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s",
2215 domain, is_secured ? "-secure" : "");
2216 err = misc_register(&fdev->miscdev);
2217 if (!err) {
2218 if (is_secured)
2219 cctx->secure_fdevice = fdev;
2220 else
2221 cctx->fdevice = fdev;
2222 }
2223
2224 return err;
2225}
2226
2227static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
2228{
2229 struct device *rdev = &rpdev->dev;
2230 struct fastrpc_channel_ctx *data;
2231 int i, err, domain_id = -1, vmcount;
2232 const char *domain;
2233 bool secure_dsp;
2234 unsigned int vmids[FASTRPC_MAX_VMIDS];
2235
2236 err = of_property_read_string(rdev->of_node, "label", &domain);
2237 if (err) {
2238 dev_info(rdev, "FastRPC Domain not specified in DT\n");
2239 return err;
2240 }
2241
2242 for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
2243 if (!strcmp(domains[i], domain)) {
2244 domain_id = i;
2245 break;
2246 }
2247 }
2248
2249 if (domain_id < 0) {
2250 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
2251 return -EINVAL;
2252 }
2253
2254 if (of_reserved_mem_device_init_by_idx(rdev, rdev->of_node, 0))
2255 dev_info(rdev, "no reserved DMA memory for FASTRPC\n");
2256
2257 vmcount = of_property_read_variable_u32_array(rdev->of_node,
2258 "qcom,vmids", &vmids[0], 0, FASTRPC_MAX_VMIDS);
2259 if (vmcount < 0)
2260 vmcount = 0;
2261 else if (!qcom_scm_is_available())
2262 return -EPROBE_DEFER;
2263
2264 data = kzalloc(sizeof(*data), GFP_KERNEL);
2265 if (!data)
2266 return -ENOMEM;
2267
2268 if (vmcount) {
2269 data->vmcount = vmcount;
2270 data->perms = BIT(QCOM_SCM_VMID_HLOS);
2271 for (i = 0; i < data->vmcount; i++) {
2272 data->vmperms[i].vmid = vmids[i];
2273 data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
2274 }
2275 }
2276
2277 secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain"));
2278 data->secure = secure_dsp;
2279
2280 switch (domain_id) {
2281 case ADSP_DOMAIN_ID:
2282 case MDSP_DOMAIN_ID:
2283 case SDSP_DOMAIN_ID:
2284 /* Unsigned PD offloading is only supported on CDSP*/
2285 data->unsigned_support = false;
2286 err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
2287 if (err)
2288 goto fdev_error;
2289 break;
2290 case CDSP_DOMAIN_ID:
2291 data->unsigned_support = true;
2292 /* Create both device nodes so that we can allow both Signed and Unsigned PD */
2293 err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
2294 if (err)
2295 goto fdev_error;
2296
2297 err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
2298 if (err)
2299 goto fdev_error;
2300 break;
2301 default:
2302 err = -EINVAL;
2303 goto fdev_error;
2304 }
2305
2306 kref_init(&data->refcount);
2307
2308 dev_set_drvdata(&rpdev->dev, data);
2309 rdev->dma_mask = &data->dma_mask;
2310 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
2311 INIT_LIST_HEAD(&data->users);
2312 INIT_LIST_HEAD(&data->invoke_interrupted_mmaps);
2313 spin_lock_init(&data->lock);
2314 idr_init(&data->ctx_idr);
2315 data->domain_id = domain_id;
2316 data->rpdev = rpdev;
2317
2318 return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
2319fdev_error:
2320 kfree(data);
2321 return err;
2322}
2323
2324static void fastrpc_notify_users(struct fastrpc_user *user)
2325{
2326 struct fastrpc_invoke_ctx *ctx;
2327
2328 spin_lock(&user->lock);
2329 list_for_each_entry(ctx, &user->pending, node)
2330 complete(&ctx->work);
2331 spin_unlock(&user->lock);
2332}
2333
2334static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
2335{
2336 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
2337 struct fastrpc_buf *buf, *b;
2338 struct fastrpc_user *user;
2339 unsigned long flags;
2340
2341 spin_lock_irqsave(&cctx->lock, flags);
2342 list_for_each_entry(user, &cctx->users, user)
2343 fastrpc_notify_users(user);
2344 spin_unlock_irqrestore(&cctx->lock, flags);
2345
2346 if (cctx->fdevice)
2347 misc_deregister(&cctx->fdevice->miscdev);
2348
2349 if (cctx->secure_fdevice)
2350 misc_deregister(&cctx->secure_fdevice->miscdev);
2351
2352 list_for_each_entry_safe(buf, b, &cctx->invoke_interrupted_mmaps, node)
2353 list_del(&buf->node);
2354
2355 if (cctx->remote_heap)
2356 fastrpc_buf_free(cctx->remote_heap);
2357
2358 of_platform_depopulate(&rpdev->dev);
2359
2360 cctx->rpdev = NULL;
2361 fastrpc_channel_ctx_put(cctx);
2362}
2363
2364static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
2365 int len, void *priv, u32 addr)
2366{
2367 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
2368 struct fastrpc_invoke_rsp *rsp = data;
2369 struct fastrpc_invoke_ctx *ctx;
2370 unsigned long flags;
2371 unsigned long ctxid;
2372
2373 if (len < sizeof(*rsp))
2374 return -EINVAL;
2375
2376 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2377
2378 spin_lock_irqsave(&cctx->lock, flags);
2379 ctx = idr_find(&cctx->ctx_idr, ctxid);
2380 spin_unlock_irqrestore(&cctx->lock, flags);
2381
2382 if (!ctx) {
2383 dev_err(&rpdev->dev, "No context ID matches response\n");
2384 return -ENOENT;
2385 }
2386
2387 ctx->retval = rsp->retval;
2388 complete(&ctx->work);
2389
2390 /*
2391 * The DMA buffer associated with the context cannot be freed in
2392 * interrupt context so schedule it through a worker thread to
2393 * avoid a kernel BUG.
2394 */
2395 schedule_work(&ctx->put_work);
2396
2397 return 0;
2398}
2399
2400static const struct of_device_id fastrpc_rpmsg_of_match[] = {
2401 { .compatible = "qcom,fastrpc" },
2402 { },
2403};
2404MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
2405
2406static struct rpmsg_driver fastrpc_driver = {
2407 .probe = fastrpc_rpmsg_probe,
2408 .remove = fastrpc_rpmsg_remove,
2409 .callback = fastrpc_rpmsg_callback,
2410 .drv = {
2411 .name = "qcom,fastrpc",
2412 .of_match_table = fastrpc_rpmsg_of_match,
2413 },
2414};
2415
2416static int fastrpc_init(void)
2417{
2418 int ret;
2419
2420 ret = platform_driver_register(&fastrpc_cb_driver);
2421 if (ret < 0) {
2422 pr_err("fastrpc: failed to register cb driver\n");
2423 return ret;
2424 }
2425
2426 ret = register_rpmsg_driver(&fastrpc_driver);
2427 if (ret < 0) {
2428 pr_err("fastrpc: failed to register rpmsg driver\n");
2429 platform_driver_unregister(&fastrpc_cb_driver);
2430 return ret;
2431 }
2432
2433 return 0;
2434}
2435module_init(fastrpc_init);
2436
2437static void fastrpc_exit(void)
2438{
2439 platform_driver_unregister(&fastrpc_cb_driver);
2440 unregister_rpmsg_driver(&fastrpc_driver);
2441}
2442module_exit(fastrpc_exit);
2443
2444MODULE_LICENSE("GPL v2");
2445MODULE_IMPORT_NS(DMA_BUF);