aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2007-12-06 17:24:48 +1100
committerPaul Mackerras <paulus@samba.org>2007-12-11 13:45:56 +1100
commit584f8b71a2e8abdaeb4b6f4fddaf542b61392453 (patch)
treec14f26334e3a3524046f0790f96564a4a4f22d92 /arch
parent44ef339073f67d4abcc62ae52a5fbc069d7a4d29 (diff)
downloadkernel_samsung_smdk4412-584f8b71a2e8abdaeb4b6f4fddaf542b61392453.tar.gz
kernel_samsung_smdk4412-584f8b71a2e8abdaeb4b6f4fddaf542b61392453.tar.bz2
kernel_samsung_smdk4412-584f8b71a2e8abdaeb4b6f4fddaf542b61392453.zip
[POWERPC] Use SLB size from the device tree
Currently we hardwire the number of SLBs to 64, but PAPR says we should use the ibm,slb-size property to obtain the number of SLB entries. This uses this property instead of assuming 64. If no property is found, we assume 64 entries as before. This soft patches the SLB handler, so it shouldn't change performance at all. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/prom.c15
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/slb.c3
-rw-r--r--arch/powerpc/mm/slb_low.S5
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c3
-rw-r--r--arch/powerpc/xmon/xmon.c2
6 files changed, 25 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index acc0d247d3c..6c2d8836f77 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -583,6 +583,20 @@ static void __init check_cpu_pa_features(unsigned long node)
ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
}
+#ifdef CONFIG_PPC64
+static void __init check_cpu_slb_size(unsigned long node)
+{
+ u32 *slb_size_ptr;
+
+ slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
+ if (slb_size_ptr != NULL) {
+ mmu_slb_size = *slb_size_ptr;
+ }
+}
+#else
+#define check_cpu_slb_size(node) do { } while(0)
+#endif
+
static struct feature_property {
const char *name;
u32 min_value;
@@ -713,6 +727,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
check_cpu_feature_properties(node);
check_cpu_pa_features(node);
+ check_cpu_slb_size(node);
#ifdef CONFIG_PPC_PSERIES
if (nthreads > 1)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f09730bf3a3..cbbd8b0bc8f 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -96,6 +96,7 @@ int mmu_vmalloc_psize = MMU_PAGE_4K;
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
+u16 mmu_slb_size = 64;
#ifdef CONFIG_HUGETLB_PAGE
int mmu_huge_psize = MMU_PAGE_16M;
unsigned int HPAGE_SHIFT;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 27922dff8b9..3cf0802cd2b 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -256,6 +256,7 @@ void slb_initialize(void)
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
+ extern unsigned int *slb_compare_rr_to_size;
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
@@ -269,6 +270,8 @@ void slb_initialize(void)
SLB_VSID_KERNEL | linear_llp);
patch_slb_encoding(slb_miss_kernel_load_io,
SLB_VSID_KERNEL | io_llp);
+ patch_slb_encoding(slb_compare_rr_to_size,
+ mmu_slb_size);
DBG("SLB: linear LLP = %04x\n", linear_llp);
DBG("SLB: io LLP = %04x\n", io_llp);
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 1328a81a84a..657f6b37e9d 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -227,8 +227,9 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
7: ld r10,PACASTABRR(r13)
addi r10,r10,1
- /* use a cpu feature mask if we ever change our slb size */
- cmpldi r10,SLB_NUM_ENTRIES
+ /* This gets soft patched on boot. */
+_GLOBAL(slb_compare_rr_to_size)
+ cmpldi r10,0
blt+ 4f
li r10,SLB_NUM_BOLTED
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 5748194a667..6d7d068ceba 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -36,6 +36,7 @@
#include <asm/mpic.h>
#include <asm/smp.h>
#include <asm/time.h>
+#include <asm/mmu.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
@@ -302,7 +303,7 @@ static int pas_machine_check_handler(struct pt_regs *regs)
int i;
printk(KERN_ERR "slb contents:\n");
- for (i = 0; i < SLB_NUM_ENTRIES; i++) {
+ for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
printk(KERN_ERR "%02d %016lx %016lx\n", i, e, v);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 381d467cf55..c60d123e9f1 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2543,7 +2543,7 @@ static void dump_slb(void)
printf("SLB contents of cpu %x\n", smp_processor_id());
- for (i = 0; i < SLB_NUM_ENTRIES; i++) {
+ for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (tmp) : "r" (i));
printf("%02d %016lx ", i, tmp);