Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* arch/sparc64/mm/tsb.c
3 *
4 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
5 */
6
7#include <linux/kernel.h>
8#include <linux/preempt.h>
9#include <linux/slab.h>
10#include <linux/mm_types.h>
11#include <linux/pgtable.h>
12
13#include <asm/page.h>
14#include <asm/mmu_context.h>
15#include <asm/setup.h>
16#include <asm/tsb.h>
17#include <asm/tlb.h>
18#include <asm/oplib.h>
19
20extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
21
22static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
23{
24 vaddr >>= hash_shift;
25 return vaddr & (nentries - 1);
26}
27
28static inline int tag_compare(unsigned long tag, unsigned long vaddr)
29{
30 return (tag == (vaddr >> 22));
31}
32
33static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
34{
35 unsigned long idx;
36
37 for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
38 struct tsb *ent = &swapper_tsb[idx];
39 unsigned long match = idx << 13;
40
41 match |= (ent->tag << 22);
42 if (match >= start && match < end)
43 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
44 }
45}
46
47/* TSB flushes need only occur on the processor initiating the address
48 * space modification, not on each cpu the address space has run on.
49 * Only the TLB flush needs that treatment.
50 */
51
52void flush_tsb_kernel_range(unsigned long start, unsigned long end)
53{
54 unsigned long v;
55
56 if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
57 return flush_tsb_kernel_range_scan(start, end);
58
59 for (v = start; v < end; v += PAGE_SIZE) {
60 unsigned long hash = tsb_hash(v, PAGE_SHIFT,
61 KERNEL_TSB_NENTRIES);
62 struct tsb *ent = &swapper_tsb[hash];
63
64 if (tag_compare(ent->tag, v))
65 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
66 }
67}
68
69static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
70 unsigned long hash_shift,
71 unsigned long nentries)
72{
73 unsigned long tag, ent, hash;
74
75 v &= ~0x1UL;
76 hash = tsb_hash(v, hash_shift, nentries);
77 ent = tsb + (hash * sizeof(struct tsb));
78 tag = (v >> 22UL);
79
80 tsb_flush(ent, tag);
81}
82
83static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
84 unsigned long tsb, unsigned long nentries)
85{
86 unsigned long i;
87
88 for (i = 0; i < tb->tlb_nr; i++)
89 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
90}
91
92#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
93static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v,
94 unsigned long hash_shift,
95 unsigned long nentries,
96 unsigned int hugepage_shift)
97{
98 unsigned int hpage_entries;
99 unsigned int i;
100
101 hpage_entries = 1 << (hugepage_shift - hash_shift);
102 for (i = 0; i < hpage_entries; i++)
103 __flush_tsb_one_entry(tsb, v + (i << hash_shift), hash_shift,
104 nentries);
105}
106
107static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
108 unsigned long tsb, unsigned long nentries,
109 unsigned int hugepage_shift)
110{
111 unsigned long i;
112
113 for (i = 0; i < tb->tlb_nr; i++)
114 __flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift,
115 nentries, hugepage_shift);
116}
117#endif
118
119void flush_tsb_user(struct tlb_batch *tb)
120{
121 struct mm_struct *mm = tb->mm;
122 unsigned long nentries, base, flags;
123
124 spin_lock_irqsave(&mm->context.lock, flags);
125
126 if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
127 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
128 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
129 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
130 base = __pa(base);
131 if (tb->hugepage_shift == PAGE_SHIFT)
132 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
133#if defined(CONFIG_HUGETLB_PAGE)
134 else
135 __flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries,
136 tb->hugepage_shift);
137#endif
138 }
139#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
140 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
141 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
142 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
143 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
144 base = __pa(base);
145 __flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries,
146 tb->hugepage_shift);
147 }
148#endif
149 spin_unlock_irqrestore(&mm->context.lock, flags);
150}
151
152void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
153 unsigned int hugepage_shift)
154{
155 unsigned long nentries, base, flags;
156
157 spin_lock_irqsave(&mm->context.lock, flags);
158
159 if (hugepage_shift < REAL_HPAGE_SHIFT) {
160 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
161 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
162 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
163 base = __pa(base);
164 if (hugepage_shift == PAGE_SHIFT)
165 __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT,
166 nentries);
167#if defined(CONFIG_HUGETLB_PAGE)
168 else
169 __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT,
170 nentries, hugepage_shift);
171#endif
172 }
173#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
174 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
175 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
176 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
177 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
178 base = __pa(base);
179 __flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT,
180 nentries, hugepage_shift);
181 }
182#endif
183 spin_unlock_irqrestore(&mm->context.lock, flags);
184}
185
186#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
187#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
188
189#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
190#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
191#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
192#endif
193
194static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
195{
196 unsigned long tsb_reg, base, tsb_paddr;
197 unsigned long page_sz, tte;
198
199 mm->context.tsb_block[tsb_idx].tsb_nentries =
200 tsb_bytes / sizeof(struct tsb);
201
202 switch (tsb_idx) {
203 case MM_TSB_BASE:
204 base = TSBMAP_8K_BASE;
205 break;
206#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
207 case MM_TSB_HUGE:
208 base = TSBMAP_4M_BASE;
209 break;
210#endif
211 default:
212 BUG();
213 }
214
215 tte = pgprot_val(PAGE_KERNEL_LOCKED);
216 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
217 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
218
219 /* Use the smallest page size that can map the whole TSB
220 * in one TLB entry.
221 */
222 switch (tsb_bytes) {
223 case 8192 << 0:
224 tsb_reg = 0x0UL;
225#ifdef DCACHE_ALIASING_POSSIBLE
226 base += (tsb_paddr & 8192);
227#endif
228 page_sz = 8192;
229 break;
230
231 case 8192 << 1:
232 tsb_reg = 0x1UL;
233 page_sz = 64 * 1024;
234 break;
235
236 case 8192 << 2:
237 tsb_reg = 0x2UL;
238 page_sz = 64 * 1024;
239 break;
240
241 case 8192 << 3:
242 tsb_reg = 0x3UL;
243 page_sz = 64 * 1024;
244 break;
245
246 case 8192 << 4:
247 tsb_reg = 0x4UL;
248 page_sz = 512 * 1024;
249 break;
250
251 case 8192 << 5:
252 tsb_reg = 0x5UL;
253 page_sz = 512 * 1024;
254 break;
255
256 case 8192 << 6:
257 tsb_reg = 0x6UL;
258 page_sz = 512 * 1024;
259 break;
260
261 case 8192 << 7:
262 tsb_reg = 0x7UL;
263 page_sz = 4 * 1024 * 1024;
264 break;
265
266 default:
267 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
268 current->comm, current->pid, tsb_bytes);
269 BUG();
270 }
271 tte |= pte_sz_bits(page_sz);
272
273 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
274 /* Physical mapping, no locked TLB entry for TSB. */
275 tsb_reg |= tsb_paddr;
276
277 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
278 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
279 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
280 } else {
281 tsb_reg |= base;
282 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
283 tte |= (tsb_paddr & ~(page_sz - 1UL));
284
285 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
286 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
287 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
288 }
289
290 /* Setup the Hypervisor TSB descriptor. */
291 if (tlb_type == hypervisor) {
292 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
293
294 switch (tsb_idx) {
295 case MM_TSB_BASE:
296 hp->pgsz_idx = HV_PGSZ_IDX_BASE;
297 break;
298#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
299 case MM_TSB_HUGE:
300 hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
301 break;
302#endif
303 default:
304 BUG();
305 }
306 hp->assoc = 1;
307 hp->num_ttes = tsb_bytes / 16;
308 hp->ctx_idx = 0;
309 switch (tsb_idx) {
310 case MM_TSB_BASE:
311 hp->pgsz_mask = HV_PGSZ_MASK_BASE;
312 break;
313#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
314 case MM_TSB_HUGE:
315 hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
316 break;
317#endif
318 default:
319 BUG();
320 }
321 hp->tsb_base = tsb_paddr;
322 hp->resv = 0;
323 }
324}
325
326struct kmem_cache *pgtable_cache __read_mostly;
327
328static struct kmem_cache *tsb_caches[8] __read_mostly;
329
330static const char *tsb_cache_names[8] = {
331 "tsb_8KB",
332 "tsb_16KB",
333 "tsb_32KB",
334 "tsb_64KB",
335 "tsb_128KB",
336 "tsb_256KB",
337 "tsb_512KB",
338 "tsb_1MB",
339};
340
341void __init pgtable_cache_init(void)
342{
343 unsigned long i;
344
345 pgtable_cache = kmem_cache_create("pgtable_cache",
346 PAGE_SIZE, PAGE_SIZE,
347 0,
348 _clear_page);
349 if (!pgtable_cache) {
350 prom_printf("pgtable_cache_init(): Could not create!\n");
351 prom_halt();
352 }
353
354 for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
355 unsigned long size = 8192 << i;
356 const char *name = tsb_cache_names[i];
357
358 tsb_caches[i] = kmem_cache_create(name,
359 size, size,
360 0, NULL);
361 if (!tsb_caches[i]) {
362 prom_printf("Could not create %s cache\n", name);
363 prom_halt();
364 }
365 }
366}
367
368int sysctl_tsb_ratio = -2;
369
370static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
371{
372 unsigned long num_ents = (new_size / sizeof(struct tsb));
373
374 if (sysctl_tsb_ratio < 0)
375 return num_ents - (num_ents >> -sysctl_tsb_ratio);
376 else
377 return num_ents + (num_ents >> sysctl_tsb_ratio);
378}
379
380/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
381 * do_sparc64_fault() invokes this routine to try and grow it.
382 *
383 * When we reach the maximum TSB size supported, we stick ~0UL into
384 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
385 * will not trigger any longer.
386 *
387 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
388 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
389 * must be 512K aligned. It also must be physically contiguous, so we
390 * cannot use vmalloc().
391 *
392 * The idea here is to grow the TSB when the RSS of the process approaches
393 * the number of entries that the current TSB can hold at once. Currently,
394 * we trigger when the RSS hits 3/4 of the TSB capacity.
395 */
396void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
397{
398 unsigned long max_tsb_size = 1 * 1024 * 1024;
399 unsigned long new_size, old_size, flags;
400 struct tsb *old_tsb, *new_tsb;
401 unsigned long new_cache_index, old_cache_index;
402 unsigned long new_rss_limit;
403 gfp_t gfp_flags;
404
405 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
406 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
407
408 new_cache_index = 0;
409 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
410 new_rss_limit = tsb_size_to_rss_limit(new_size);
411 if (new_rss_limit > rss)
412 break;
413 new_cache_index++;
414 }
415
416 if (new_size == max_tsb_size)
417 new_rss_limit = ~0UL;
418
419retry_tsb_alloc:
420 gfp_flags = GFP_KERNEL;
421 if (new_size > (PAGE_SIZE * 2))
422 gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
423
424 new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
425 gfp_flags, numa_node_id());
426 if (unlikely(!new_tsb)) {
427 /* Not being able to fork due to a high-order TSB
428 * allocation failure is very bad behavior. Just back
429 * down to a 0-order allocation and force no TSB
430 * growing for this address space.
431 */
432 if (mm->context.tsb_block[tsb_index].tsb == NULL &&
433 new_cache_index > 0) {
434 new_cache_index = 0;
435 new_size = 8192;
436 new_rss_limit = ~0UL;
437 goto retry_tsb_alloc;
438 }
439
440 /* If we failed on a TSB grow, we are under serious
441 * memory pressure so don't try to grow any more.
442 */
443 if (mm->context.tsb_block[tsb_index].tsb != NULL)
444 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
445 return;
446 }
447
448 /* Mark all tags as invalid. */
449 tsb_init(new_tsb, new_size);
450
451 /* Ok, we are about to commit the changes. If we are
452 * growing an existing TSB the locking is very tricky,
453 * so WATCH OUT!
454 *
455 * We have to hold mm->context.lock while committing to the
456 * new TSB, this synchronizes us with processors in
457 * flush_tsb_user() and switch_mm() for this address space.
458 *
459 * But even with that lock held, processors run asynchronously
460 * accessing the old TSB via TLB miss handling. This is OK
461 * because those actions are just propagating state from the
462 * Linux page tables into the TSB, page table mappings are not
463 * being changed. If a real fault occurs, the processor will
464 * synchronize with us when it hits flush_tsb_user(), this is
465 * also true for the case where vmscan is modifying the page
466 * tables. The only thing we need to be careful with is to
467 * skip any locked TSB entries during copy_tsb().
468 *
469 * When we finish committing to the new TSB, we have to drop
470 * the lock and ask all other cpus running this address space
471 * to run tsb_context_switch() to see the new TSB table.
472 */
473 spin_lock_irqsave(&mm->context.lock, flags);
474
475 old_tsb = mm->context.tsb_block[tsb_index].tsb;
476 old_cache_index =
477 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
478 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
479 sizeof(struct tsb));
480
481
482 /* Handle multiple threads trying to grow the TSB at the same time.
483 * One will get in here first, and bump the size and the RSS limit.
484 * The others will get in here next and hit this check.
485 */
486 if (unlikely(old_tsb &&
487 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
488 spin_unlock_irqrestore(&mm->context.lock, flags);
489
490 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
491 return;
492 }
493
494 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
495
496 if (old_tsb) {
497 extern void copy_tsb(unsigned long old_tsb_base,
498 unsigned long old_tsb_size,
499 unsigned long new_tsb_base,
500 unsigned long new_tsb_size,
501 unsigned long page_size_shift);
502 unsigned long old_tsb_base = (unsigned long) old_tsb;
503 unsigned long new_tsb_base = (unsigned long) new_tsb;
504
505 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
506 old_tsb_base = __pa(old_tsb_base);
507 new_tsb_base = __pa(new_tsb_base);
508 }
509 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
510 tsb_index == MM_TSB_BASE ?
511 PAGE_SHIFT : REAL_HPAGE_SHIFT);
512 }
513
514 mm->context.tsb_block[tsb_index].tsb = new_tsb;
515 setup_tsb_params(mm, tsb_index, new_size);
516
517 spin_unlock_irqrestore(&mm->context.lock, flags);
518
519 /* If old_tsb is NULL, we're being invoked for the first time
520 * from init_new_context().
521 */
522 if (old_tsb) {
523 /* Reload it on the local cpu. */
524 tsb_context_switch(mm);
525
526 /* Now force other processors to do the same. */
527 preempt_disable();
528 smp_tsb_sync(mm);
529 preempt_enable();
530
531 /* Now it is safe to free the old tsb. */
532 kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
533 }
534}
535
536int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
537{
538 unsigned long mm_rss = get_mm_rss(mm);
539#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
540 unsigned long saved_hugetlb_pte_count;
541 unsigned long saved_thp_pte_count;
542#endif
543 unsigned int i;
544
545 spin_lock_init(&mm->context.lock);
546
547 mm->context.sparc64_ctx_val = 0UL;
548
549 mm->context.tag_store = NULL;
550 spin_lock_init(&mm->context.tag_lock);
551
552#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
553 /* We reset them to zero because the fork() page copying
554 * will re-increment the counters as the parent PTEs are
555 * copied into the child address space.
556 */
557 saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
558 saved_thp_pte_count = mm->context.thp_pte_count;
559 mm->context.hugetlb_pte_count = 0;
560 mm->context.thp_pte_count = 0;
561
562 mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
563#endif
564
565 /* copy_mm() copies over the parent's mm_struct before calling
566 * us, so we need to zero out the TSB pointer or else tsb_grow()
567 * will be confused and think there is an older TSB to free up.
568 */
569 for (i = 0; i < MM_NUM_TSBS; i++)
570 mm->context.tsb_block[i].tsb = NULL;
571
572 /* If this is fork, inherit the parent's TSB size. We would
573 * grow it to that size on the first page fault anyways.
574 */
575 tsb_grow(mm, MM_TSB_BASE, mm_rss);
576
577#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
578 if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
579 tsb_grow(mm, MM_TSB_HUGE,
580 (saved_hugetlb_pte_count + saved_thp_pte_count) *
581 REAL_HPAGE_PER_HPAGE);
582#endif
583
584 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
585 return -ENOMEM;
586
587 return 0;
588}
589
590static void tsb_destroy_one(struct tsb_config *tp)
591{
592 unsigned long cache_index;
593
594 if (!tp->tsb)
595 return;
596 cache_index = tp->tsb_reg_val & 0x7UL;
597 kmem_cache_free(tsb_caches[cache_index], tp->tsb);
598 tp->tsb = NULL;
599 tp->tsb_reg_val = 0UL;
600}
601
602void destroy_context(struct mm_struct *mm)
603{
604 unsigned long flags, i;
605
606 for (i = 0; i < MM_NUM_TSBS; i++)
607 tsb_destroy_one(&mm->context.tsb_block[i]);
608
609 spin_lock_irqsave(&ctx_alloc_lock, flags);
610
611 if (CTX_VALID(mm->context)) {
612 unsigned long nr = CTX_NRBITS(mm->context);
613 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
614 }
615
616 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
617
618 /* If ADI tag storage was allocated for this task, free it */
619 if (mm->context.tag_store) {
620 tag_storage_desc_t *tag_desc;
621 unsigned long max_desc;
622 unsigned char *tags;
623
624 tag_desc = mm->context.tag_store;
625 max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
626 for (i = 0; i < max_desc; i++) {
627 tags = tag_desc->tags;
628 tag_desc->tags = NULL;
629 kfree(tags);
630 tag_desc++;
631 }
632 kfree(mm->context.tag_store);
633 mm->context.tag_store = NULL;
634 }
635}
1/* arch/sparc64/mm/tsb.c
2 *
3 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <linux/preempt.h>
8#include <linux/slab.h>
9#include <asm/page.h>
10#include <asm/pgtable.h>
11#include <asm/mmu_context.h>
12#include <asm/tsb.h>
13#include <asm/tlb.h>
14#include <asm/oplib.h>
15
16extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
17
18static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
19{
20 vaddr >>= hash_shift;
21 return vaddr & (nentries - 1);
22}
23
24static inline int tag_compare(unsigned long tag, unsigned long vaddr)
25{
26 return (tag == (vaddr >> 22));
27}
28
29/* TSB flushes need only occur on the processor initiating the address
30 * space modification, not on each cpu the address space has run on.
31 * Only the TLB flush needs that treatment.
32 */
33
34void flush_tsb_kernel_range(unsigned long start, unsigned long end)
35{
36 unsigned long v;
37
38 for (v = start; v < end; v += PAGE_SIZE) {
39 unsigned long hash = tsb_hash(v, PAGE_SHIFT,
40 KERNEL_TSB_NENTRIES);
41 struct tsb *ent = &swapper_tsb[hash];
42
43 if (tag_compare(ent->tag, v))
44 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
45 }
46}
47
48static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
49 unsigned long hash_shift,
50 unsigned long nentries)
51{
52 unsigned long tag, ent, hash;
53
54 v &= ~0x1UL;
55 hash = tsb_hash(v, hash_shift, nentries);
56 ent = tsb + (hash * sizeof(struct tsb));
57 tag = (v >> 22UL);
58
59 tsb_flush(ent, tag);
60}
61
62static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
63 unsigned long tsb, unsigned long nentries)
64{
65 unsigned long i;
66
67 for (i = 0; i < tb->tlb_nr; i++)
68 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
69}
70
71void flush_tsb_user(struct tlb_batch *tb)
72{
73 struct mm_struct *mm = tb->mm;
74 unsigned long nentries, base, flags;
75
76 spin_lock_irqsave(&mm->context.lock, flags);
77
78 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
79 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
80 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
81 base = __pa(base);
82 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
83
84#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
85 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
86 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
87 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
88 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
89 base = __pa(base);
90 __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries);
91 }
92#endif
93 spin_unlock_irqrestore(&mm->context.lock, flags);
94}
95
96void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
97{
98 unsigned long nentries, base, flags;
99
100 spin_lock_irqsave(&mm->context.lock, flags);
101
102 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
103 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
104 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
105 base = __pa(base);
106 __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
107
108#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
109 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
110 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
111 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
112 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
113 base = __pa(base);
114 __flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries);
115 }
116#endif
117 spin_unlock_irqrestore(&mm->context.lock, flags);
118}
119
120#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
121#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
122
123#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
124#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
125#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
126#endif
127
128static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
129{
130 unsigned long tsb_reg, base, tsb_paddr;
131 unsigned long page_sz, tte;
132
133 mm->context.tsb_block[tsb_idx].tsb_nentries =
134 tsb_bytes / sizeof(struct tsb);
135
136 switch (tsb_idx) {
137 case MM_TSB_BASE:
138 base = TSBMAP_8K_BASE;
139 break;
140#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
141 case MM_TSB_HUGE:
142 base = TSBMAP_4M_BASE;
143 break;
144#endif
145 default:
146 BUG();
147 }
148
149 tte = pgprot_val(PAGE_KERNEL_LOCKED);
150 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
151 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
152
153 /* Use the smallest page size that can map the whole TSB
154 * in one TLB entry.
155 */
156 switch (tsb_bytes) {
157 case 8192 << 0:
158 tsb_reg = 0x0UL;
159#ifdef DCACHE_ALIASING_POSSIBLE
160 base += (tsb_paddr & 8192);
161#endif
162 page_sz = 8192;
163 break;
164
165 case 8192 << 1:
166 tsb_reg = 0x1UL;
167 page_sz = 64 * 1024;
168 break;
169
170 case 8192 << 2:
171 tsb_reg = 0x2UL;
172 page_sz = 64 * 1024;
173 break;
174
175 case 8192 << 3:
176 tsb_reg = 0x3UL;
177 page_sz = 64 * 1024;
178 break;
179
180 case 8192 << 4:
181 tsb_reg = 0x4UL;
182 page_sz = 512 * 1024;
183 break;
184
185 case 8192 << 5:
186 tsb_reg = 0x5UL;
187 page_sz = 512 * 1024;
188 break;
189
190 case 8192 << 6:
191 tsb_reg = 0x6UL;
192 page_sz = 512 * 1024;
193 break;
194
195 case 8192 << 7:
196 tsb_reg = 0x7UL;
197 page_sz = 4 * 1024 * 1024;
198 break;
199
200 default:
201 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
202 current->comm, current->pid, tsb_bytes);
203 do_exit(SIGSEGV);
204 }
205 tte |= pte_sz_bits(page_sz);
206
207 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
208 /* Physical mapping, no locked TLB entry for TSB. */
209 tsb_reg |= tsb_paddr;
210
211 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
212 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
213 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
214 } else {
215 tsb_reg |= base;
216 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
217 tte |= (tsb_paddr & ~(page_sz - 1UL));
218
219 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
220 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
221 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
222 }
223
224 /* Setup the Hypervisor TSB descriptor. */
225 if (tlb_type == hypervisor) {
226 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
227
228 switch (tsb_idx) {
229 case MM_TSB_BASE:
230 hp->pgsz_idx = HV_PGSZ_IDX_BASE;
231 break;
232#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
233 case MM_TSB_HUGE:
234 hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
235 break;
236#endif
237 default:
238 BUG();
239 }
240 hp->assoc = 1;
241 hp->num_ttes = tsb_bytes / 16;
242 hp->ctx_idx = 0;
243 switch (tsb_idx) {
244 case MM_TSB_BASE:
245 hp->pgsz_mask = HV_PGSZ_MASK_BASE;
246 break;
247#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
248 case MM_TSB_HUGE:
249 hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
250 break;
251#endif
252 default:
253 BUG();
254 }
255 hp->tsb_base = tsb_paddr;
256 hp->resv = 0;
257 }
258}
259
260struct kmem_cache *pgtable_cache __read_mostly;
261
262static struct kmem_cache *tsb_caches[8] __read_mostly;
263
264static const char *tsb_cache_names[8] = {
265 "tsb_8KB",
266 "tsb_16KB",
267 "tsb_32KB",
268 "tsb_64KB",
269 "tsb_128KB",
270 "tsb_256KB",
271 "tsb_512KB",
272 "tsb_1MB",
273};
274
275void __init pgtable_cache_init(void)
276{
277 unsigned long i;
278
279 pgtable_cache = kmem_cache_create("pgtable_cache",
280 PAGE_SIZE, PAGE_SIZE,
281 0,
282 _clear_page);
283 if (!pgtable_cache) {
284 prom_printf("pgtable_cache_init(): Could not create!\n");
285 prom_halt();
286 }
287
288 for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
289 unsigned long size = 8192 << i;
290 const char *name = tsb_cache_names[i];
291
292 tsb_caches[i] = kmem_cache_create(name,
293 size, size,
294 0, NULL);
295 if (!tsb_caches[i]) {
296 prom_printf("Could not create %s cache\n", name);
297 prom_halt();
298 }
299 }
300}
301
302int sysctl_tsb_ratio = -2;
303
304static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
305{
306 unsigned long num_ents = (new_size / sizeof(struct tsb));
307
308 if (sysctl_tsb_ratio < 0)
309 return num_ents - (num_ents >> -sysctl_tsb_ratio);
310 else
311 return num_ents + (num_ents >> sysctl_tsb_ratio);
312}
313
314/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
315 * do_sparc64_fault() invokes this routine to try and grow it.
316 *
317 * When we reach the maximum TSB size supported, we stick ~0UL into
318 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
319 * will not trigger any longer.
320 *
321 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
322 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
323 * must be 512K aligned. It also must be physically contiguous, so we
324 * cannot use vmalloc().
325 *
326 * The idea here is to grow the TSB when the RSS of the process approaches
327 * the number of entries that the current TSB can hold at once. Currently,
328 * we trigger when the RSS hits 3/4 of the TSB capacity.
329 */
330void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
331{
332 unsigned long max_tsb_size = 1 * 1024 * 1024;
333 unsigned long new_size, old_size, flags;
334 struct tsb *old_tsb, *new_tsb;
335 unsigned long new_cache_index, old_cache_index;
336 unsigned long new_rss_limit;
337 gfp_t gfp_flags;
338
339 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
340 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
341
342 new_cache_index = 0;
343 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
344 new_rss_limit = tsb_size_to_rss_limit(new_size);
345 if (new_rss_limit > rss)
346 break;
347 new_cache_index++;
348 }
349
350 if (new_size == max_tsb_size)
351 new_rss_limit = ~0UL;
352
353retry_tsb_alloc:
354 gfp_flags = GFP_KERNEL;
355 if (new_size > (PAGE_SIZE * 2))
356 gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
357
358 new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
359 gfp_flags, numa_node_id());
360 if (unlikely(!new_tsb)) {
361 /* Not being able to fork due to a high-order TSB
362 * allocation failure is very bad behavior. Just back
363 * down to a 0-order allocation and force no TSB
364 * growing for this address space.
365 */
366 if (mm->context.tsb_block[tsb_index].tsb == NULL &&
367 new_cache_index > 0) {
368 new_cache_index = 0;
369 new_size = 8192;
370 new_rss_limit = ~0UL;
371 goto retry_tsb_alloc;
372 }
373
374 /* If we failed on a TSB grow, we are under serious
375 * memory pressure so don't try to grow any more.
376 */
377 if (mm->context.tsb_block[tsb_index].tsb != NULL)
378 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
379 return;
380 }
381
382 /* Mark all tags as invalid. */
383 tsb_init(new_tsb, new_size);
384
385 /* Ok, we are about to commit the changes. If we are
386 * growing an existing TSB the locking is very tricky,
387 * so WATCH OUT!
388 *
389 * We have to hold mm->context.lock while committing to the
390 * new TSB, this synchronizes us with processors in
391 * flush_tsb_user() and switch_mm() for this address space.
392 *
393 * But even with that lock held, processors run asynchronously
394 * accessing the old TSB via TLB miss handling. This is OK
395 * because those actions are just propagating state from the
396 * Linux page tables into the TSB, page table mappings are not
397 * being changed. If a real fault occurs, the processor will
398 * synchronize with us when it hits flush_tsb_user(), this is
399 * also true for the case where vmscan is modifying the page
400 * tables. The only thing we need to be careful with is to
401 * skip any locked TSB entries during copy_tsb().
402 *
403 * When we finish committing to the new TSB, we have to drop
404 * the lock and ask all other cpus running this address space
405 * to run tsb_context_switch() to see the new TSB table.
406 */
407 spin_lock_irqsave(&mm->context.lock, flags);
408
409 old_tsb = mm->context.tsb_block[tsb_index].tsb;
410 old_cache_index =
411 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
412 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
413 sizeof(struct tsb));
414
415
416 /* Handle multiple threads trying to grow the TSB at the same time.
417 * One will get in here first, and bump the size and the RSS limit.
418 * The others will get in here next and hit this check.
419 */
420 if (unlikely(old_tsb &&
421 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
422 spin_unlock_irqrestore(&mm->context.lock, flags);
423
424 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
425 return;
426 }
427
428 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
429
430 if (old_tsb) {
431 extern void copy_tsb(unsigned long old_tsb_base,
432 unsigned long old_tsb_size,
433 unsigned long new_tsb_base,
434 unsigned long new_tsb_size);
435 unsigned long old_tsb_base = (unsigned long) old_tsb;
436 unsigned long new_tsb_base = (unsigned long) new_tsb;
437
438 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
439 old_tsb_base = __pa(old_tsb_base);
440 new_tsb_base = __pa(new_tsb_base);
441 }
442 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
443 }
444
445 mm->context.tsb_block[tsb_index].tsb = new_tsb;
446 setup_tsb_params(mm, tsb_index, new_size);
447
448 spin_unlock_irqrestore(&mm->context.lock, flags);
449
450 /* If old_tsb is NULL, we're being invoked for the first time
451 * from init_new_context().
452 */
453 if (old_tsb) {
454 /* Reload it on the local cpu. */
455 tsb_context_switch(mm);
456
457 /* Now force other processors to do the same. */
458 preempt_disable();
459 smp_tsb_sync(mm);
460 preempt_enable();
461
462 /* Now it is safe to free the old tsb. */
463 kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
464 }
465}
466
467int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
468{
469#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
470 unsigned long huge_pte_count;
471#endif
472 unsigned int i;
473
474 spin_lock_init(&mm->context.lock);
475
476 mm->context.sparc64_ctx_val = 0UL;
477
478#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
479 /* We reset it to zero because the fork() page copying
480 * will re-increment the counters as the parent PTEs are
481 * copied into the child address space.
482 */
483 huge_pte_count = mm->context.huge_pte_count;
484 mm->context.huge_pte_count = 0;
485#endif
486
487 /* copy_mm() copies over the parent's mm_struct before calling
488 * us, so we need to zero out the TSB pointer or else tsb_grow()
489 * will be confused and think there is an older TSB to free up.
490 */
491 for (i = 0; i < MM_NUM_TSBS; i++)
492 mm->context.tsb_block[i].tsb = NULL;
493
494 /* If this is fork, inherit the parent's TSB size. We would
495 * grow it to that size on the first page fault anyways.
496 */
497 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
498
499#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
500 if (unlikely(huge_pte_count))
501 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
502#endif
503
504 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
505 return -ENOMEM;
506
507 return 0;
508}
509
510static void tsb_destroy_one(struct tsb_config *tp)
511{
512 unsigned long cache_index;
513
514 if (!tp->tsb)
515 return;
516 cache_index = tp->tsb_reg_val & 0x7UL;
517 kmem_cache_free(tsb_caches[cache_index], tp->tsb);
518 tp->tsb = NULL;
519 tp->tsb_reg_val = 0UL;
520}
521
522void destroy_context(struct mm_struct *mm)
523{
524 unsigned long flags, i;
525
526 for (i = 0; i < MM_NUM_TSBS; i++)
527 tsb_destroy_one(&mm->context.tsb_block[i]);
528
529 spin_lock_irqsave(&ctx_alloc_lock, flags);
530
531 if (CTX_VALID(mm->context)) {
532 unsigned long nr = CTX_NRBITS(mm->context);
533 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
534 }
535
536 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
537}