From 79725df5786d2fa48f582b116ea1d74193cc96ca Mon Sep 17 00:00:00 2001 From: GuanXuetao Date: Sat, 15 Jan 2011 18:15:01 +0800 Subject: unicore32 core architecture: processor and system headers This patch includes processor and system headers. System call interface is here. We used the syscall interface the same as asm-generic version. Signed-off-by: Guan Xuetao Reviewed-by: Arnd Bergmann --- arch/unicore32/mm/proc-macros.S | 145 ++++++++++++++++++++++++++++++++++++++++ arch/unicore32/mm/proc-ucv2.S | 134 +++++++++++++++++++++++++++++++++++++ 2 files changed, 279 insertions(+) create mode 100644 arch/unicore32/mm/proc-macros.S create mode 100644 arch/unicore32/mm/proc-ucv2.S (limited to 'arch/unicore32/mm') diff --git a/arch/unicore32/mm/proc-macros.S b/arch/unicore32/mm/proc-macros.S new file mode 100644 index 00000000000..51560d68c89 --- /dev/null +++ b/arch/unicore32/mm/proc-macros.S @@ -0,0 +1,145 @@ +/* + * linux/arch/unicore32/mm/proc-macros.S + * + * Code specific to PKUnity SoC and UniCore ISA + * + * Copyright (C) 2001-2010 GUAN Xue-tao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * We need constants.h for: + * VMA_VM_MM + * VMA_VM_FLAGS + * VM_EXEC + */ +#include +#include +#include + +/* + * the cache line sizes of the I and D cache are the same + */ +#define CACHE_LINESIZE 32 + +/* + * This is the maximum size of an area which will be invalidated + * using the single invalidate entry instructions. Anything larger + * than this, and we go for the whole cache. + * + * This value should be chosen such that we choose the cheapest + * alternative. + */ +#ifdef CONFIG_CPU_UCV2 +#define MAX_AREA_SIZE 0x800 /* 64 cache line */ +#endif + +/* + * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) + */ + .macro vma_vm_mm, rd, rn + ldw \rd, [\rn+], #VMA_VM_MM + .endm + +/* + * vma_vm_flags - get vma->vm_flags + */ + .macro vma_vm_flags, rd, rn + ldw \rd, [\rn+], #VMA_VM_FLAGS + .endm + + .macro tsk_mm, rd, rn + ldw \rd, [\rn+], #TI_TASK + ldw \rd, [\rd+], #TSK_ACTIVE_MM + .endm + +/* + * act_mm - get current->active_mm + */ + .macro act_mm, rd + andn \rd, sp, #8128 + andn \rd, \rd, #63 + ldw \rd, [\rd+], #TI_TASK + ldw \rd, [\rd+], #TSK_ACTIVE_MM + .endm + +/* + * mmid - get context id from mm pointer (mm->context.id) + */ + .macro mmid, rd, rn + ldw \rd, [\rn+], #MM_CONTEXT_ID + .endm + +/* + * mask_asid - mask the ASID from the context ID + */ + .macro asid, rd, rn + and \rd, \rn, #255 + .endm + + .macro crval, clear, mmuset, ucset + .word \clear + .word \mmuset + .endm + +#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE +/* + * va2pa va, pa, tbl, msk, off, err + * This macro is used to translate virtual address to its physical address. + * + * va: virtual address + * pa: physical address, result is stored in this register + * tbl, msk, off: temp registers, will be destroyed + * err: jump to error label if the physical address not exist + * NOTE: all regs must be different + */ + .macro va2pa, va, pa, tbl, msk, off, err=990f + movc \pa, p0.c2, #0 + mov \off, \va >> #22 @ off <- index of 1st page table + adr \tbl, 910f @ tbl <- table of 1st page table +900: @ ---- handle 1, 2 page table + add \pa, \pa, #PAGE_OFFSET @ pa <- virt addr of page table + ldw \pa, [\pa+], \off << #2 @ pa <- the content of pt + cand.a \pa, #4 @ test exist bit + beq \err @ if not exist + and \off, \pa, #3 @ off <- the last 2 bits + add \tbl, \tbl, \off << #3 @ cmove table pointer + ldw \msk, [\tbl+], #0 @ get the mask + ldw pc, [\tbl+], #4 +930: @ ---- handle 2nd page table + and \pa, \pa, \msk @ pa <- phys addr of 2nd pt + mov \off, \va << #10 + cntlo \tbl, \msk @ use tbl as temp reg + mov \off, \off >> \tbl + mov \off, \off >> #2 @ off <- index of 2nd pt + adr \tbl, 920f @ tbl <- table of 2nd pt + b 900b +910: @ 1st level page table + .word 0xfffff000, 930b @ second level page table + .word 0xfffffc00, 930b @ second level large page table + .word 0x00000000, \err @ invalid + .word 0xffc00000, 980f @ super page + +920: @ 2nd level page table + .word 0xfffff000, 980f @ page + .word 0xffffc000, 980f @ middle page + .word 0xffff0000, 980f @ large page + .word 0x00000000, \err @ invalid +980: + andn \tbl, \va, \msk + and \pa, \pa, \msk + or \pa, \pa, \tbl +990: + .endm +#endif + + .macro dcacheline_flush, addr, t1, t2 + mov \t1, \addr << #20 + ldw \t2, =_stext @ _stext must ALIGN(4096) + add \t2, \t2, \t1 >> #20 + ldw \t1, [\t2+], #0x0000 + ldw \t1, [\t2+], #0x1000 + ldw \t1, [\t2+], #0x2000 + ldw \t1, [\t2+], #0x3000 + .endm diff --git a/arch/unicore32/mm/proc-ucv2.S b/arch/unicore32/mm/proc-ucv2.S new file mode 100644 index 00000000000..9d296092e36 --- /dev/null +++ b/arch/unicore32/mm/proc-ucv2.S @@ -0,0 +1,134 @@ +/* + * linux/arch/unicore32/mm/proc-ucv2.S + * + * Code specific to PKUnity SoC and UniCore ISA + * + * Copyright (C) 2001-2010 GUAN Xue-tao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include + +#include "proc-macros.S" + +ENTRY(cpu_proc_fin) + stm.w (lr), [sp-] + mov ip, #PSR_R_BIT | PSR_I_BIT | PRIV_MODE + mov.a asr, ip + b.l __cpuc_flush_kern_all + ldm.w (pc), [sp]+ + +/* + * cpu_reset(loc) + * + * Perform a soft reset of the system. Put the CPU into the + * same state as it would be if it had been reset, and branch + * to what would be the reset vector. + * + * - loc - location to jump to for soft reset + */ + .align 5 +ENTRY(cpu_reset) + mov ip, #0 + movc p0.c5, ip, #28 @ Cache invalidate all + nop8 + + movc p0.c6, ip, #6 @ TLB invalidate all + nop8 + + movc ip, p0.c1, #0 @ ctrl register + or ip, ip, #0x2000 @ vector base address + andn ip, ip, #0x000f @ ............idam + movc p0.c1, ip, #0 @ disable caches and mmu + nop + mov pc, r0 @ jump to loc + nop8 + +/* + * cpu_do_idle() + * + * Idle the processor (eg, wait for interrupt). + * + * IRQs are already disabled. + */ +ENTRY(cpu_do_idle) + mov r0, #0 @ PCI address + .rept 8 + ldw r1, [r0] + .endr + mov pc, lr + +ENTRY(cpu_dcache_clean_area) +#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE + csub.a r1, #MAX_AREA_SIZE + bsg 101f + mov r9, #PAGE_SZ + sub r9, r9, #1 @ PAGE_MASK +1: va2pa r0, r10, r11, r12, r13 @ r10 is PA + b 3f +2: cand.a r0, r9 + beq 1b +3: movc p0.c5, r10, #11 @ clean D entry + nop8 + add r0, r0, #CACHE_LINESIZE + add r10, r10, #CACHE_LINESIZE + sub.a r1, r1, #CACHE_LINESIZE + bua 2b + mov pc, lr +#endif +101: mov ip, #0 + movc p0.c5, ip, #10 @ Dcache clean all + nop8 + + mov pc, lr + +/* + * cpu_do_switch_mm(pgd_phys) + * + * Set the translation table base pointer to be pgd_phys + * + * - pgd_phys - physical address of new pgd + * + * It is assumed that: + * - we are not using split page tables + */ + .align 5 +ENTRY(cpu_do_switch_mm) + movc p0.c2, r0, #0 @ update page table ptr + nop8 + + movc p0.c6, ip, #6 @ TLB invalidate all + nop8 + + mov pc, lr + +/* + * cpu_set_pte(ptep, pte) + * + * Set a level 2 translation table entry. + * + * - ptep - pointer to level 2 translation table entry + * - pte - PTE value to store + */ + .align 5 +ENTRY(cpu_set_pte) + stw r1, [r0] +#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE + sub r2, r0, #PAGE_OFFSET + movc p0.c5, r2, #11 @ Dcache clean line + nop8 +#else + mov ip, #0 + movc p0.c5, ip, #10 @ Dcache clean all + nop8 + @dcacheline_flush r0, r2, ip +#endif + mov pc, lr + -- cgit v1.2.3