diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/icswx.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 16 | ||||
-rw-r--r-- | arch/powerpc/kernel/paca.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_utils.c | 25 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/pkeys.c | 12 | ||||
-rw-r--r-- | arch/powerpc/perf/core-book3s.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/powernv/vas-fault.c | 2 |
7 files changed, 54 insertions, 11 deletions
diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h index 965b1f39b2a5..b0c70a35fd0e 100644 --- a/arch/powerpc/include/asm/icswx.h +++ b/arch/powerpc/include/asm/icswx.h @@ -77,6 +77,8 @@ struct coprocessor_completion_block { #define CSB_CC_CHAIN (37) #define CSB_CC_SEQUENCE (38) #define CSB_CC_HW (39) +/* P9 DD2 NX Workbook 3.2 (Table 4-36): Address translation fault */ +#define CSB_CC_FAULT_ADDRESS (250) #define CSB_SIZE (0x10) #define CSB_ALIGN CSB_SIZE diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index fa080694e581..446e54c3f71e 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -2551,7 +2551,7 @@ EXC_VIRT_NONE(0x5400, 0x100) INT_DEFINE_BEGIN(denorm_exception) IVEC=0x1500 IHSRR=1 - IBRANCH_COMMON=0 + IBRANCH_TO_COMMON=0 IKVM_REAL=1 INT_DEFINE_END(denorm_exception) @@ -3072,10 +3072,18 @@ do_hash_page: ori r0,r0,DSISR_BAD_FAULT_64S@l and. r0,r5,r0 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ + + /* + * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then + * don't call hash_page, just fail the fault. This is required to + * prevent re-entrancy problems in the hash code, namely perf + * interrupts hitting while something holds H_PAGE_BUSY, and taking a + * hash fault. See the comment in hash_preload(). + */ ld r11, PACA_THREAD_INFO(r13) - lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ - andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ - bne 77f /* then don't call hash_page now */ + lwz r0,TI_PREEMPT(r11) + andis. r0,r0,NMI_MASK@h + bne 77f /* * r3 contains the trap number diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 2168372b792d..74da65aacbc9 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -87,7 +87,7 @@ static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align, * This is very early in boot, so no harm done if the kernel crashes at * this point. */ - BUG_ON(shared_lppaca_size >= shared_lppaca_total_size); + BUG_ON(shared_lppaca_size > shared_lppaca_total_size); return ptr; } diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 468169e33c86..9b9f92ad0e7a 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, pgd_t *pgdir; int rc, ssize, update_flags = 0; unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0); + unsigned long flags; BUG_ON(get_region_id(ea) != USER_REGION_ID); @@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, return; #endif /* CONFIG_PPC_64K_PAGES */ + /* + * __hash_page_* must run with interrupts off, as it sets the + * H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any + * time and may take a hash fault reading the user stack, see + * read_user_stack_slow() in the powerpc/perf code. + * + * If that takes a hash fault on the same page as we lock here, it + * will bail out when seeing H_PAGE_BUSY set, and retry the access + * leading to an infinite loop. + * + * Disabling interrupts here does not prevent perf interrupts, but it + * will prevent them taking hash faults (see the NMI test in + * do_hash_page), then read_user_stack's copy_from_user_nofault will + * fail and perf will fall back to read_user_stack_slow(), which + * walks the Linux page tables. + * + * Interrupts must also be off for the duration of the + * mm_is_thread_local test and update, to prevent preempt running the + * mm on another CPU (XXX: this may be racy vs kthread_use_mm). + */ + local_irq_save(flags); + /* Is that local to this CPU ? */ if (mm_is_thread_local(mm)) update_flags |= HPTE_LOCAL_UPDATE; @@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, mm_ctx_user_psize(&mm->context), mm_ctx_user_psize(&mm->context), pte_val(*ptep)); + + local_irq_restore(flags); } /* diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index ca5fcb4bff32..d174106bab67 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -354,12 +354,14 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute) u64 amr; pkey_shift = pkeyshift(pkey); - if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift))) - return true; + if (execute) + return !(read_iamr() & (IAMR_EX_BIT << pkey_shift)); + + amr = read_amr(); + if (write) + return !(amr & (AMR_WR_BIT << pkey_shift)); - amr = read_amr(); /* Delay reading amr until absolutely needed */ - return ((!write && !(amr & (AMR_RD_BIT << pkey_shift))) || - (write && !(amr & (AMR_WR_BIT << pkey_shift)))); + return !(amr & (AMR_RD_BIT << pkey_shift)); } bool arch_pte_access_permitted(u64 pte, bool write, bool execute) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index cd6a742ac6ef..01d70280d287 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2179,6 +2179,12 @@ static void __perf_event_interrupt(struct pt_regs *regs) perf_read_regs(regs); + /* + * If perf interrupts hit in a local_irq_disable (soft-masked) region, + * we consider them as NMIs. This is required to prevent hash faults on + * user addresses when reading callchains. See the NMI test in + * do_hash_page. + */ nmi = perf_intr_is_nmi(regs); if (nmi) nmi_enter(); diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c index 266a6ca5e15e..3d21fce254b7 100644 --- a/arch/powerpc/platforms/powernv/vas-fault.c +++ b/arch/powerpc/platforms/powernv/vas-fault.c @@ -79,7 +79,7 @@ static void update_csb(struct vas_window *window, csb_addr = (void __user *)be64_to_cpu(crb->csb_addr); memset(&csb, 0, sizeof(csb)); - csb.cc = CSB_CC_TRANSLATION; + csb.cc = CSB_CC_FAULT_ADDRESS; csb.ce = CSB_CE_TERMINATION; csb.cs = 0; csb.count = 0; |