diff options
author | Bjorn Helgaas <bjorn.helgaas@hp.com> | 2006-05-05 17:19:50 -0600 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2006-05-08 16:32:05 -0700 |
commit | 32e62c636a728cb39c0b3bd191286f2ca65d4028 (patch) | |
tree | 656454a01e720819103c172daae15b5f2fd85d68 /include/asm-ia64 | |
parent | 6810b548b25114607e0814612d84125abccc0a4f (diff) | |
download | kernel_samsung_smdk4412-32e62c636a728cb39c0b3bd191286f2ca65d4028.tar.gz kernel_samsung_smdk4412-32e62c636a728cb39c0b3bd191286f2ca65d4028.tar.bz2 kernel_samsung_smdk4412-32e62c636a728cb39c0b3bd191286f2ca65d4028.zip |
[IA64] rework memory attribute aliasing
This closes a couple holes in our attribute aliasing avoidance scheme:
- The current kernel fails mmaps of some /dev/mem MMIO regions because
they don't appear in the EFI memory map. This keeps X from working
on the Intel Tiger box.
- The current kernel allows UC mmap of the 0-1MB region of
/sys/.../legacy_mem even when the chipset doesn't support UC
access. This causes an MCA when starting X on HP rx7620 and rx8620
boxes in the default configuration.
There's more detail in the Documentation/ia64/aliasing.txt file this
adds, but the general idea is that if a region might be covered by
a granule-sized kernel identity mapping, any access via /dev/mem or
mmap must use the same attribute as the identity mapping.
Otherwise, we fall back to using an attribute that is supported
according to the EFI memory map, or to using UC if the EFI memory
map doesn't mention the region.
Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64')
-rw-r--r-- | include/asm-ia64/io.h | 1 | ||||
-rw-r--r-- | include/asm-ia64/pgtable.h | 22 |
2 files changed, 11 insertions, 12 deletions
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index c2e3742108b..781ee2c7e8c 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h @@ -88,6 +88,7 @@ phys_to_virt (unsigned long address) } #define ARCH_HAS_VALID_PHYS_ADDR_RANGE +extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size); extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */ extern int valid_mmap_phys_addr_range (unsigned long addr, size_t count); diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index c0f8144f234..90f3a232923 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -317,22 +317,20 @@ ia64_phys_addr_valid (unsigned long addr) #define pte_mkhuge(pte) (__pte(pte_val(pte))) /* - * Macro to a page protection value as "uncacheable". Note that "protection" is really a - * misnomer here as the protection value contains the memory attribute bits, dirty bits, - * and various other bits as well. + * Make page protection values cacheable, uncacheable, or write- + * combining. Note that "protection" is really a misnomer here as the + * protection value contains the memory attribute bits, dirty bits, and + * various other bits as well. */ +#define pgprot_cacheable(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WB) #define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC) - -/* - * Macro to make mark a page protection value as "write-combining". - * Note that "protection" is really a misnomer here as the protection - * value contains the memory attribute bits, dirty bits, and various - * other bits as well. Accesses through a write-combining translation - * works bypasses the caches, but does allow for consecutive writes to - * be combined into single (but larger) write transactions. - */ #define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC) +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#define __HAVE_PHYS_MEM_ACCESS_PROT + static inline unsigned long pgd_index (unsigned long address) { |