diff options
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/fiq.h | 23 | ||||
-rw-r--r-- | arch/arm/include/asm/page.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/smp.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/tlb.h | 53 | ||||
-rw-r--r-- | arch/arm/include/asm/unistd.h | 1 |
5 files changed, 61 insertions, 25 deletions
diff --git a/arch/arm/include/asm/fiq.h b/arch/arm/include/asm/fiq.h index 2242ce22ec6..d493d0b742a 100644 --- a/arch/arm/include/asm/fiq.h +++ b/arch/arm/include/asm/fiq.h @@ -4,6 +4,13 @@ * Support for FIQ on ARM architectures. * Written by Philip Blundell <philb@gnu.org>, 1998 * Re-written by Russell King + * + * NOTE: The FIQ mode registers are not magically preserved across + * suspend/resume. + * + * Drivers which require these registers to be preserved across power + * management operations must implement appropriate suspend/resume handlers to + * save and restore them. */ #ifndef __ASM_FIQ_H @@ -29,9 +36,21 @@ struct fiq_handler { extern int claim_fiq(struct fiq_handler *f); extern void release_fiq(struct fiq_handler *f); extern void set_fiq_handler(void *start, unsigned int length); -extern void set_fiq_regs(struct pt_regs *regs); -extern void get_fiq_regs(struct pt_regs *regs); extern void enable_fiq(int fiq); extern void disable_fiq(int fiq); +/* helpers defined in fiqasm.S: */ +extern void __set_fiq_regs(unsigned long const *regs); +extern void __get_fiq_regs(unsigned long *regs); + +static inline void set_fiq_regs(struct pt_regs const *regs) +{ + __set_fiq_regs(®s->ARM_r8); +} + +static inline void get_fiq_regs(struct pt_regs *regs) +{ + __get_fiq_regs(®s->ARM_r8); +} + #endif diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index f51a69595f6..ac75d084888 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -197,7 +197,7 @@ typedef unsigned long pgprot_t; typedef struct page *pgtable_t; -#ifndef CONFIG_SPARSEMEM +#ifdef CONFIG_HAVE_ARCH_PFN_VALID extern int pfn_valid(unsigned long); #endif diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index a87664f54f9..e42d96a45d3 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -20,12 +20,6 @@ #define raw_smp_processor_id() (current_thread_info()->cpu) -/* - * at the moment, there's not a big penalty for changing CPUs - * (the >big< penalty is running SMP in the first place) - */ -#define PROC_CHANGE_PENALTY 15 - struct seq_file; /* @@ -76,6 +70,7 @@ extern void platform_smp_prepare_cpus(unsigned int); */ struct secondary_data { unsigned long pgdir; + unsigned long swapper_pg_dir; void *stack; }; extern struct secondary_data secondary_data; diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 82dfe5d0c41..265f908c4a6 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -41,12 +41,12 @@ */ #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) #define tlb_fast_mode(tlb) 0 -#define FREE_PTE_NR 500 #else #define tlb_fast_mode(tlb) 1 -#define FREE_PTE_NR 0 #endif +#define MMU_GATHER_BUNDLE 8 + /* * TLB handling. This allows us to remove pages from the page * tables, and efficiently handle the TLB issues. @@ -58,7 +58,9 @@ struct mmu_gather { unsigned long range_start; unsigned long range_end; unsigned int nr; - struct page *pages[FREE_PTE_NR]; + unsigned int max; + struct page **pages; + struct page *local[MMU_GATHER_BUNDLE]; }; DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); @@ -97,26 +99,37 @@ static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) } } +static inline void __tlb_alloc_page(struct mmu_gather *tlb) +{ + unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); + + if (addr) { + tlb->pages = (void *)addr; + tlb->max = PAGE_SIZE / sizeof(struct page *); + } +} + static inline void tlb_flush_mmu(struct mmu_gather *tlb) { tlb_flush(tlb); if (!tlb_fast_mode(tlb)) { free_pages_and_swap_cache(tlb->pages, tlb->nr); tlb->nr = 0; + if (tlb->pages == tlb->local) + __tlb_alloc_page(tlb); } } -static inline struct mmu_gather * -tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) +static inline void +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) { - struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); - tlb->mm = mm; - tlb->fullmm = full_mm_flush; + tlb->fullmm = fullmm; tlb->vma = NULL; + tlb->max = ARRAY_SIZE(tlb->local); + tlb->pages = tlb->local; tlb->nr = 0; - - return tlb; + __tlb_alloc_page(tlb); } static inline void @@ -127,7 +140,8 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) /* keep the page table cache within bounds */ check_pgt_cache(); - put_cpu_var(mmu_gathers); + if (tlb->pages != tlb->local) + free_pages((unsigned long)tlb->pages, 0); } /* @@ -162,15 +176,22 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) tlb_flush(tlb); } -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { if (tlb_fast_mode(tlb)) { free_page_and_swap_cache(page); - } else { - tlb->pages[tlb->nr++] = page; - if (tlb->nr >= FREE_PTE_NR) - tlb_flush_mmu(tlb); + return 1; /* avoid calling tlb_flush_mmu */ } + + tlb->pages[tlb->nr++] = page; + VM_BUG_ON(tlb->nr > tlb->max); + return tlb->max - tlb->nr; +} + +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + if (!__tlb_remove_page(tlb, page)) + tlb_flush_mmu(tlb); } static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 87dbe3e2197..3de689aa6f6 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -400,6 +400,7 @@ #define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371) #define __NR_clock_adjtime (__NR_SYSCALL_BASE+372) #define __NR_syncfs (__NR_SYSCALL_BASE+373) +#define __NR_sendmmsg (__NR_SYSCALL_BASE+374) /* * The following SWIs are ARM private. |