Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 | /* * cmm.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * The Communication(Shared) Memory Management(CMM) module provides * shared memory management services for DSP/BIOS Bridge data streaming * and messaging. * * Multiple shared memory segments can be registered with CMM. * Each registered SM segment is represented by a SM "allocator" that * describes a block of physically contiguous shared memory used for * future allocations by CMM. * * Memory is coalesced back to the appropriate heap when a buffer is * freed. * * Notes: * Va: Virtual address. * Pa: Physical or kernel system address. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> #include <linux/list.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- OS Adaptation Layer */ #include <dspbridge/sync.h> /* ----------------------------------- Platform Manager */ #include <dspbridge/dev.h> #include <dspbridge/proc.h> /* ----------------------------------- This */ #include <dspbridge/cmm.h> /* ----------------------------------- Defines, Data Structures, Typedefs */ #define NEXT_PA(pnode) (pnode->pa + pnode->size) /* Other bus/platform translations */ #define DSPPA2GPPPA(base, x, y) ((x)+(y)) #define GPPPA2DSPPA(base, x, y) ((x)-(y)) /* * Allocators define a block of contiguous memory used for future allocations. * * sma - shared memory allocator. * vma - virtual memory allocator.(not used). */ struct cmm_allocator { /* sma */ unsigned int shm_base; /* Start of physical SM block */ u32 sm_size; /* Size of SM block in bytes */ unsigned int vm_base; /* Start of VM block. (Dev driver * context for 'sma') */ u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this * SM space */ s8 c_factor; /* DSPPa to GPPPa Conversion Factor */ unsigned int dsp_base; /* DSP virt base byte address */ u32 dsp_size; /* DSP seg size in bytes */ struct cmm_object *cmm_mgr; /* back ref to parent mgr */ /* node list of available memory */ struct list_head free_list; /* node list of memory in use */ struct list_head in_use_list; }; struct cmm_xlator { /* Pa<->Va translator object */ /* CMM object this translator associated */ struct cmm_object *cmm_mgr; /* * Client process virtual base address that corresponds to phys SM * base address for translator's seg_id. * Only 1 segment ID currently supported. */ unsigned int virt_base; /* virtual base address */ u32 virt_size; /* size of virt space in bytes */ u32 seg_id; /* Segment Id */ }; /* CMM Mgr */ struct cmm_object { /* * Cmm Lock is used to serialize access mem manager for multi-threads. */ struct mutex cmm_lock; /* Lock to access cmm mgr */ struct list_head node_free_list; /* Free list of memory nodes */ u32 min_block_size; /* Min SM block; default 16 bytes */ u32 page_size; /* Memory Page size (1k/4k) */ /* GPP SM segment ptrs */ struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS]; }; /* Default CMM Mgr attributes */ static struct cmm_mgrattrs cmm_dfltmgrattrs = { /* min_block_size, min block size(bytes) allocated by cmm mgr */ 16 }; /* Default allocation attributes */ static struct cmm_attrs cmm_dfltalctattrs = { 1 /* seg_id, default segment Id for allocator */ }; /* Address translator default attrs */ static struct cmm_xlatorattrs cmm_dfltxlatorattrs = { /* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */ 1, 0, /* dsp_bufs */ 0, /* dsp_buf_size */ NULL, /* vm_base */ 0, /* vm_size */ }; /* SM node representing a block of memory. */ struct cmm_mnode { struct list_head link; /* must be 1st element */ u32 pa; /* Phys addr */ u32 va; /* Virtual address in device process context */ u32 size; /* SM block size in bytes */ u32 client_proc; /* Process that allocated this mem block */ }; /* ----------------------------------- Function Prototypes */ static void add_to_free_list(struct cmm_allocator *allocator, struct cmm_mnode *pnode); static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj, u32 ul_seg_id); static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator, u32 usize); static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa, u32 dw_va, u32 ul_size); /* get available slot for new allocator */ static s32 get_slot(struct cmm_object *cmm_mgr_obj); static void un_register_gppsm_seg(struct cmm_allocator *psma); /* * ======== cmm_calloc_buf ======== * Purpose: * Allocate a SM buffer, zero contents, and return the physical address * and optional driver context virtual address(pp_buf_va). * * The freelist is sorted in increasing size order. Get the first * block that satifies the request and sort the remaining back on * the freelist; if large enough. The kept block is placed on the * inUseList. */ void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize, struct cmm_attrs *pattrs, void **pp_buf_va) { struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; void *buf_pa = NULL; struct cmm_mnode *pnode = NULL; struct cmm_mnode *new_node = NULL; struct cmm_allocator *allocator = NULL; u32 delta_size; u8 *pbyte = NULL; s32 cnt; if (pattrs == NULL) pattrs = &cmm_dfltalctattrs; if (pp_buf_va != NULL) *pp_buf_va = NULL; if (cmm_mgr_obj && (usize != 0)) { if (pattrs->seg_id > 0) { /* SegId > 0 is SM */ /* get the allocator object for this segment id */ allocator = get_allocator(cmm_mgr_obj, pattrs->seg_id); /* keep block size a multiple of min_block_size */ usize = ((usize - 1) & ~(cmm_mgr_obj->min_block_size - 1)) + cmm_mgr_obj->min_block_size; mutex_lock(&cmm_mgr_obj->cmm_lock); pnode = get_free_block(allocator, usize); } if (pnode) { delta_size = (pnode->size - usize); if (delta_size >= cmm_mgr_obj->min_block_size) { /* create a new block with the leftovers and * add to freelist */ new_node = get_node(cmm_mgr_obj, pnode->pa + usize, pnode->va + usize, (u32) delta_size); /* leftovers go free */ add_to_free_list(allocator, new_node); /* adjust our node's size */ pnode->size = usize; } /* Tag node with client process requesting allocation * We'll need to free up a process's alloc'd SM if the * client process goes away. */ /* Return TGID instead of process handle */ pnode->client_proc = current->tgid; /* put our node on InUse list */ list_add_tail(&pnode->link, &allocator->in_use_list); buf_pa = (void *)pnode->pa; /* physical address */ /* clear mem */ pbyte = (u8 *) pnode->va; for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++) *pbyte = 0; if (pp_buf_va != NULL) { /* Virtual address */ *pp_buf_va = (void *)pnode->va; } } mutex_unlock(&cmm_mgr_obj->cmm_lock); } return buf_pa; } /* * ======== cmm_create ======== * Purpose: * Create a communication memory manager object. */ int cmm_create(struct cmm_object **ph_cmm_mgr, struct dev_object *hdev_obj, const struct cmm_mgrattrs *mgr_attrts) { struct cmm_object *cmm_obj = NULL; int status = 0; *ph_cmm_mgr = NULL; /* create, zero, and tag a cmm mgr object */ cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL); if (!cmm_obj) return -ENOMEM; if (mgr_attrts == NULL) mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */ /* save away smallest block allocation for this cmm mgr */ cmm_obj->min_block_size = mgr_attrts->min_block_size; cmm_obj->page_size = PAGE_SIZE; /* create node free list */ INIT_LIST_HEAD(&cmm_obj->node_free_list); mutex_init(&cmm_obj->cmm_lock); *ph_cmm_mgr = cmm_obj; return status; } /* * ======== cmm_destroy ======== * Purpose: * Release the communication memory manager resources. */ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force) { struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; struct cmm_info temp_info; int status = 0; s32 slot_seg; struct cmm_mnode *node, *tmp; if (!hcmm_mgr) { status = -EFAULT; return status; } mutex_lock(&cmm_mgr_obj->cmm_lock); /* If not force then fail if outstanding allocations exist */ if (!force) { /* Check for outstanding memory allocations */ status = cmm_get_info(hcmm_mgr, &temp_info); if (!status) { if (temp_info.total_in_use_cnt > 0) { /* outstanding allocations */ status = -EPERM; } } } if (!status) { /* UnRegister SM allocator */ for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) { if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) { un_register_gppsm_seg (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]); /* Set slot to NULL for future reuse */ cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL; } } } list_for_each_entry_safe(node, tmp, &cmm_mgr_obj->node_free_list, link) { list_del(&node->link); kfree(node); } mutex_unlock(&cmm_mgr_obj->cmm_lock); if (!status) { /* delete CS & cmm mgr object */ mutex_destroy(&cmm_mgr_obj->cmm_lock); kfree(cmm_mgr_obj); } return status; } /* * ======== cmm_free_buf ======== * Purpose: * Free the given buffer. */ int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id) { struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; int status = -EFAULT; struct cmm_mnode *curr, *tmp; struct cmm_allocator *allocator; struct cmm_attrs *pattrs; if (ul_seg_id == 0) { pattrs = &cmm_dfltalctattrs; ul_seg_id = pattrs->seg_id; } if (!hcmm_mgr || !(ul_seg_id > 0)) { status = -EFAULT; return status; } allocator = get_allocator(cmm_mgr_obj, ul_seg_id); if (!allocator) return status; mutex_lock(&cmm_mgr_obj->cmm_lock); list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) { if (curr->pa == (u32) buf_pa) { list_del(&curr->link); add_to_free_list(allocator, curr); status = 0; break; } } mutex_unlock(&cmm_mgr_obj->cmm_lock); return status; } /* * ======== cmm_get_handle ======== * Purpose: * Return the communication memory manager object for this device. * This is typically called from the client process. */ int cmm_get_handle(void *hprocessor, struct cmm_object **ph_cmm_mgr) { int status = 0; struct dev_object *hdev_obj; if (hprocessor != NULL) status = proc_get_dev_object(hprocessor, &hdev_obj); else hdev_obj = dev_get_first(); /* default */ if (!status) status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr); return status; } /* * ======== cmm_get_info ======== * Purpose: * Return the current memory utilization information. */ int cmm_get_info(struct cmm_object *hcmm_mgr, struct cmm_info *cmm_info_obj) { struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; u32 ul_seg; int status = 0; struct cmm_allocator *altr; struct cmm_mnode *curr; if (!hcmm_mgr) { status = -EFAULT; return status; } mutex_lock(&cmm_mgr_obj->cmm_lock); cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */ /* Total # of outstanding alloc */ cmm_info_obj->total_in_use_cnt = 0; /* min block size */ cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size; /* check SM memory segments */ for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) { /* get the allocator object for this segment id */ altr = get_allocator(cmm_mgr_obj, ul_seg); if (!altr) continue; cmm_info_obj->num_gppsm_segs++; cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa = altr->shm_base - altr->dsp_size; cmm_info_obj->seg_info[ul_seg - 1].total_seg_size = altr->dsp_size + altr->sm_size; cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa = altr->shm_base; cmm_info_obj->seg_info[ul_seg - 1].gpp_size = altr->sm_size; cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va = altr->dsp_base; cmm_info_obj->seg_info[ul_seg - 1].dsp_size = altr->dsp_size; cmm_info_obj->seg_info[ul_seg - 1].seg_base_va = altr->vm_base - altr->dsp_size; cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0; list_for_each_entry(curr, &altr->in_use_list, link) { cmm_info_obj->total_in_use_cnt++; cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++; } } mutex_unlock(&cmm_mgr_obj->cmm_lock); return status; } /* * ======== cmm_register_gppsm_seg ======== * Purpose: * Register a block of SM with the CMM to be used for later GPP SM * allocations. */ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr, u32 dw_gpp_base_pa, u32 ul_size, u32 dsp_addr_offset, s8 c_factor, u32 dw_dsp_base, u32 ul_dsp_size, u32 *sgmt_id, u32 gpp_base_va) { struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; struct cmm_allocator *psma = NULL; int status = 0; struct cmm_mnode *new_node; s32 slot_seg; dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", __func__, dw_gpp_base_pa, ul_size, dsp_addr_offset, dw_dsp_base, ul_dsp_size, gpp_base_va); if (!hcmm_mgr) return -EFAULT; /* make sure we have room for another allocator */ mutex_lock(&cmm_mgr_obj->cmm_lock); slot_seg = get_slot(cmm_mgr_obj); if (slot_seg < 0) { status = -EPERM; goto func_end; } /* Check if input ul_size is big enough to alloc at least one block */ if (ul_size < cmm_mgr_obj->min_block_size) { status = -EINVAL; goto func_end; } /* create, zero, and tag an SM allocator object */ psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL); if (!psma) { status = -ENOMEM; goto func_end; } psma->cmm_mgr = hcmm_mgr; /* ref to parent */ psma->shm_base = dw_gpp_base_pa; /* SM Base phys */ psma->sm_size = ul_size; /* SM segment size in bytes */ psma->vm_base = gpp_base_va; psma->dsp_phys_addr_offset = dsp_addr_offset; psma->c_factor = c_factor; psma->dsp_base = dw_dsp_base; psma->dsp_size = ul_dsp_size; if (psma->vm_base == 0) { status = -EPERM; goto func_end; } /* return the actual segment identifier */ *sgmt_id = (u32) slot_seg + 1; INIT_LIST_HEAD(&psma->free_list); INIT_LIST_HEAD(&psma->in_use_list); /* Get a mem node for this hunk-o-memory */ new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa, psma->vm_base, ul_size); /* Place node on the SM allocator's free list */ if (new_node) { list_add_tail(&new_node->link, &psma->free_list); } else { status = -ENOMEM; goto func_end; } /* make entry */ cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma; func_end: /* Cleanup allocator */ if (status && psma) un_register_gppsm_seg(psma); mutex_unlock(&cmm_mgr_obj->cmm_lock); return status; } /* * ======== cmm_un_register_gppsm_seg ======== * Purpose: * UnRegister GPP SM segments with the CMM. */ int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr, u32 ul_seg_id) { struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; int status = 0; struct cmm_allocator *psma; u32 ul_id = ul_seg_id; if (!hcmm_mgr) return -EFAULT; if (ul_seg_id == CMM_ALLSEGMENTS) ul_id = 1; if ((ul_id <= 0) || (ul_id > CMM_MAXGPPSEGS)) return -EINVAL; /* * FIXME: CMM_MAXGPPSEGS == 1. why use a while cycle? Seems to me like * the ul_seg_id is not needed here. It must be always 1. */ while (ul_id <= CMM_MAXGPPSEGS) { mutex_lock(&cmm_mgr_obj->cmm_lock); /* slot = seg_id-1 */ psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1]; if (psma != NULL) { un_register_gppsm_seg(psma); /* Set alctr ptr to NULL for future reuse */ cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1] = NULL; } else if (ul_seg_id != CMM_ALLSEGMENTS) { status = -EPERM; } mutex_unlock(&cmm_mgr_obj->cmm_lock); if (ul_seg_id != CMM_ALLSEGMENTS) break; ul_id++; } /* end while */ return status; } /* * ======== un_register_gppsm_seg ======== * Purpose: * UnRegister the SM allocator by freeing all its resources and * nulling cmm mgr table entry. * Note: * This routine is always called within cmm lock crit sect. */ static void un_register_gppsm_seg(struct cmm_allocator *psma) { struct cmm_mnode *curr, *tmp; /* free nodes on free list */ list_for_each_entry_safe(curr, tmp, &psma->free_list, link) { list_del(&curr->link); kfree(curr); } /* free nodes on InUse list */ list_for_each_entry_safe(curr, tmp, &psma->in_use_list, link) { list_del(&curr->link); kfree(curr); } if ((void *)psma->vm_base != NULL) MEM_UNMAP_LINEAR_ADDRESS((void *)psma->vm_base); /* Free allocator itself */ kfree(psma); } /* * ======== get_slot ======== * Purpose: * An available slot # is returned. Returns negative on failure. */ static s32 get_slot(struct cmm_object *cmm_mgr_obj) { s32 slot_seg = -1; /* neg on failure */ /* get first available slot in cmm mgr SMSegTab[] */ for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) { if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL) break; } if (slot_seg == CMM_MAXGPPSEGS) slot_seg = -1; /* failed */ return slot_seg; } /* * ======== get_node ======== * Purpose: * Get a memory node from freelist or create a new one. */ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa, u32 dw_va, u32 ul_size) { struct cmm_mnode *pnode; /* Check cmm mgr's node freelist */ if (list_empty(&cmm_mgr_obj->node_free_list)) { pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL); if (!pnode) return NULL; } else { /* surely a valid element */ pnode = list_first_entry(&cmm_mgr_obj->node_free_list, struct cmm_mnode, link); list_del_init(&pnode->link); } pnode->pa = dw_pa; pnode->va = dw_va; pnode->size = ul_size; return pnode; } /* * ======== delete_node ======== * Purpose: * Put a memory node on the cmm nodelist for later use. * Doesn't actually delete the node. Heap thrashing friendly. */ static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode) { list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list); } /* * ====== get_free_block ======== * Purpose: * Scan the free block list and return the first block that satisfies * the size. */ static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator, u32 usize) { struct cmm_mnode *node, *tmp; if (!allocator) return NULL; list_for_each_entry_safe(node, tmp, &allocator->free_list, link) { if (usize <= node->size) { list_del(&node->link); return node; } } return NULL; } /* * ======== add_to_free_list ======== * Purpose: * Coalesce node into the freelist in ascending size order. */ static void add_to_free_list(struct cmm_allocator *allocator, struct cmm_mnode *node) { struct cmm_mnode *curr; if (!node) { pr_err("%s: failed - node is NULL\n", __func__); return; } list_for_each_entry(curr, &allocator->free_list, link) { if (NEXT_PA(curr) == node->pa) { curr->size += node->size; delete_node(allocator->cmm_mgr, node); return; } if (curr->pa == NEXT_PA(node)) { curr->pa = node->pa; curr->va = node->va; curr->size += node->size; delete_node(allocator->cmm_mgr, node); return; } } list_for_each_entry(curr, &allocator->free_list, link) { if (curr->size >= node->size) { list_add_tail(&node->link, &curr->link); return; } } list_add_tail(&node->link, &allocator->free_list); } /* * ======== get_allocator ======== * Purpose: * Return the allocator for the given SM Segid. * SegIds: 1,2,3..max. */ static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj, u32 ul_seg_id) { return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1]; } /* * The CMM_Xlator[xxx] routines below are used by Node and Stream * to perform SM address translation to the client process address space. * A "translator" object is created by a node/stream for each SM seg used. */ /* * ======== cmm_xlator_create ======== * Purpose: * Create an address translator object. */ int cmm_xlator_create(struct cmm_xlatorobject **xlator, struct cmm_object *hcmm_mgr, struct cmm_xlatorattrs *xlator_attrs) { struct cmm_xlator *xlator_object = NULL; int status = 0; *xlator = NULL; if (xlator_attrs == NULL) xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */ xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL); if (xlator_object != NULL) { xlator_object->cmm_mgr = hcmm_mgr; /* ref back to CMM */ /* SM seg_id */ xlator_object->seg_id = xlator_attrs->seg_id; } else { status = -ENOMEM; } if (!status) *xlator = (struct cmm_xlatorobject *)xlator_object; return status; } /* * ======== cmm_xlator_alloc_buf ======== */ void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf, u32 pa_size) { struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; void *pbuf = NULL; void *tmp_va_buff; struct cmm_attrs attrs; if (xlator_obj) { attrs.seg_id = xlator_obj->seg_id; __raw_writel(0, va_buf); /* Alloc SM */ pbuf = cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL); if (pbuf) { /* convert to translator(node/strm) process Virtual * address */ tmp_va_buff = cmm_xlator_translate(xlator, pbuf, CMM_PA2VA); __raw_writel((u32)tmp_va_buff, va_buf); } } return pbuf; } /* * ======== cmm_xlator_free_buf ======== * Purpose: * Free the given SM buffer and descriptor. * Does not free virtual memory. */ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va) { struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; int status = -EPERM; void *buf_pa = NULL; if (xlator_obj) { /* convert Va to Pa so we can free it. */ buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA); if (buf_pa) { status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa, xlator_obj->seg_id); if (status) { /* Uh oh, this shouldn't happen. Descriptor * gone! */ pr_err("%s, line %d: Assertion failed\n", __FILE__, __LINE__); } } } return status; } /* * ======== cmm_xlator_info ======== * Purpose: * Set/Get translator info. */ int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 **paddr, u32 ul_size, u32 segm_id, bool set_info) { struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; int status = 0; if (xlator_obj) { if (set_info) { /* set translators virtual address range */ xlator_obj->virt_base = (u32) *paddr; xlator_obj->virt_size = ul_size; } else { /* return virt base address */ *paddr = (u8 *) xlator_obj->virt_base; } } else { status = -EFAULT; } return status; } /* * ======== cmm_xlator_translate ======== */ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr, enum cmm_xlatetype xtype) { u32 dw_addr_xlate = 0; struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; struct cmm_object *cmm_mgr_obj = NULL; struct cmm_allocator *allocator = NULL; u32 dw_offset = 0; if (!xlator_obj) goto loop_cont; cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr; /* get this translator's default SM allocator */ allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1]; if (!allocator) goto loop_cont; if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) || (xtype == CMM_PA2VA)) { if (xtype == CMM_PA2VA) { /* Gpp Va = Va Base + offset */ dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base - allocator-> dsp_size); dw_addr_xlate = xlator_obj->virt_base + dw_offset; /* Check if translated Va base is in range */ if ((dw_addr_xlate < xlator_obj->virt_base) || (dw_addr_xlate >= (xlator_obj->virt_base + xlator_obj->virt_size))) { dw_addr_xlate = 0; /* bad address */ } } else { /* Gpp PA = Gpp Base + offset */ dw_offset = (u8 *) paddr - (u8 *) xlator_obj->virt_base; dw_addr_xlate = allocator->shm_base - allocator->dsp_size + dw_offset; } } else { dw_addr_xlate = (u32) paddr; } /*Now convert address to proper target physical address if needed */ if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) { /* Got Gpp Pa now, convert to DSP Pa */ dw_addr_xlate = GPPPA2DSPPA((allocator->shm_base - allocator->dsp_size), dw_addr_xlate, allocator->dsp_phys_addr_offset * allocator->c_factor); } else if (xtype == CMM_DSPPA2PA) { /* Got DSP Pa, convert to GPP Pa */ dw_addr_xlate = DSPPA2GPPPA(allocator->shm_base - allocator->dsp_size, dw_addr_xlate, allocator->dsp_phys_addr_offset * allocator->c_factor); } loop_cont: return (void *)dw_addr_xlate; } |