diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-07 19:34:17 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-07 19:34:17 -0700 |
| commit | d3511f53bb2475f2a4e8460bee5a1ae6dea2a433 (patch) | |
| tree | 8141260e4fb705d8f94548150572433c685e6829 /arch/parisc/include/asm/pgtable.h | |
| parent | 02aff8db6438ce29371fd9cd54c57213f4bb4536 (diff) | |
| parent | 62217beb394e654bbd2bb87c533dadd2d8bf62c6 (diff) | |
| download | linux-d3511f53bb2475f2a4e8460bee5a1ae6dea2a433.tar.gz linux-d3511f53bb2475f2a4e8460bee5a1ae6dea2a433.tar.bz2 linux-d3511f53bb2475f2a4e8460bee5a1ae6dea2a433.zip | |
Merge branch 'parisc-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull parisc updates from Helge Deller:
"Many great new features, fixes and optimizations, including:
- Convert page table updates to use per-pagetable spinlocks which
overall improves performance on SMP machines a lot, by Mikulas
Patocka
- Kernel debugger (KGDB) support, by Sven Schnelle
- KPROBES support, by Sven Schnelle
- Lots of TLB lock/flush improvements, by Dave Anglin
- Drop DISCONTIGMEM and switch to SPARSEMEM
- Added JUMP_LABEL, branch runtime-patching support
- Lots of other small speedups and cleanups, e.g. for QEMU, stack
randomization, avoidance of name clashes, documentation updates,
etc ..."
* 'parisc-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: (28 commits)
parisc: Add static branch and JUMP_LABEL feature
parisc: Use PA_ASM_LEVEL in boot code
parisc: Rename LEVEL to PA_ASM_LEVEL to avoid name clash with DRBD code
parisc: Update huge TLB page support to use per-pagetable spinlock
parisc: Use per-pagetable spinlock
parisc: Allow live-patching of __meminit functions
parisc: Add memory barrier to asm pdc and sync instructions
parisc: Add memory clobber to TLB purges
parisc: Use ldcw instruction for SMP spinlock release barrier
parisc: Remove lock code to serialize TLB operations in pacache.S
parisc: Switch from DISCONTIGMEM to SPARSEMEM
parisc: enable wide mode early
parisc: update feature lists
parisc: Show n/a if product number not available
parisc: remove unused flags parameter in __patch_text()
doc: update kprobes supported architecture list
parisc: Implement kretprobes
parisc: remove kprobes.h from generic-y
parisc: Implement kprobes
parisc: add functions required by KPROBE_EVENTS
...
Diffstat (limited to 'arch/parisc/include/asm/pgtable.h')
| -rw-r--r-- | arch/parisc/include/asm/pgtable.h | 69 |
1 files changed, 54 insertions, 15 deletions
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index c7bb74e22436..a39b079e73f2 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -17,7 +17,7 @@ #include <asm/processor.h> #include <asm/cache.h> -extern spinlock_t pa_tlb_lock; +static inline spinlock_t *pgd_spinlock(pgd_t *); /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel @@ -34,16 +34,46 @@ extern spinlock_t pa_tlb_lock; */ #define kern_addr_valid(addr) (1) -/* Purge data and instruction TLB entries. Must be called holding - * the pa_tlb_lock. The TLB purge instructions are slow on SMP - * machines since the purge must be broadcast to all CPUs. +/* This is for the serialization of PxTLB broadcasts. At least on the N class + * systems, only one PxTLB inter processor broadcast can be active at any one + * time on the Merced bus. + + * PTE updates are protected by locks in the PMD. + */ +extern spinlock_t pa_tlb_flush_lock; +extern spinlock_t pa_swapper_pg_lock; +#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) +extern int pa_serialize_tlb_flushes; +#else +#define pa_serialize_tlb_flushes (0) +#endif + +#define purge_tlb_start(flags) do { \ + if (pa_serialize_tlb_flushes) \ + spin_lock_irqsave(&pa_tlb_flush_lock, flags); \ + else \ + local_irq_save(flags); \ + } while (0) +#define purge_tlb_end(flags) do { \ + if (pa_serialize_tlb_flushes) \ + spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \ + else \ + local_irq_restore(flags); \ + } while (0) + +/* Purge data and instruction TLB entries. The TLB purge instructions + * are slow on SMP machines since the purge must be broadcast to all CPUs. */ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) { + unsigned long flags; + + purge_tlb_start(flags); mtsp(mm->context, 1); pdtlb(addr); pitlb(addr); + purge_tlb_end(flags); } /* Certain architectures need to do special things when PTEs @@ -59,11 +89,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) do { \ pte_t old_pte; \ unsigned long flags; \ - spin_lock_irqsave(&pa_tlb_lock, flags); \ + spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\ old_pte = *ptep; \ set_pte(ptep, pteval); \ purge_tlb_entries(mm, addr); \ - spin_unlock_irqrestore(&pa_tlb_lock, flags); \ + spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\ } while (0) #endif /* !__ASSEMBLY__ */ @@ -88,10 +118,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #if CONFIG_PGTABLE_LEVELS == 3 #define PGD_ORDER 1 /* Number of pages per pgd */ #define PMD_ORDER 1 /* Number of pages per pmd */ -#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */ +#define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */ #else #define PGD_ORDER 1 /* Number of pages per pgd */ -#define PGD_ALLOC_ORDER PGD_ORDER +#define PGD_ALLOC_ORDER (PGD_ORDER + 1) #endif /* Definitions for 3rd level (we use PLD here for Page Lower directory @@ -459,6 +489,15 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +static inline spinlock_t *pgd_spinlock(pgd_t *pgd) +{ + if (unlikely(pgd == swapper_pg_dir)) + return &pa_swapper_pg_lock; + return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1))); +} + + static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; @@ -467,15 +506,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned if (!pte_young(*ptep)) return 0; - spin_lock_irqsave(&pa_tlb_lock, flags); + spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags); pte = *ptep; if (!pte_young(pte)) { - spin_unlock_irqrestore(&pa_tlb_lock, flags); + spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); return 0; } set_pte(ptep, pte_mkold(pte)); purge_tlb_entries(vma->vm_mm, addr); - spin_unlock_irqrestore(&pa_tlb_lock, flags); + spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); return 1; } @@ -485,11 +524,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t old_pte; unsigned long flags; - spin_lock_irqsave(&pa_tlb_lock, flags); + spin_lock_irqsave(pgd_spinlock(mm->pgd), flags); old_pte = *ptep; set_pte(ptep, __pte(0)); purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(&pa_tlb_lock, flags); + spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags); return old_pte; } @@ -497,10 +536,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long flags; - spin_lock_irqsave(&pa_tlb_lock, flags); + spin_lock_irqsave(pgd_spinlock(mm->pgd), flags); set_pte(ptep, pte_wrprotect(*ptep)); purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(&pa_tlb_lock, flags); + spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags); } #define pte_same(A,B) (pte_val(A) == pte_val(B)) |
