aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/barrier.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-02-15 12:38:38 -0800
committerDavid S. Miller <davem@davemloft.net>2019-02-15 12:38:38 -0800
commit3313da8188cc346a205783c22c37e821b4b7016d (patch)
tree5697cd985220def9bc2e35dfbb832dad04c2d051 /arch/mips/include/asm/barrier.h
parent50f444aa50a4f3fab35a04f56d6bb83dc1e8c875 (diff)
parent24f0a48743a256bdec1bcb80708bc309da4aa261 (diff)
downloadkernel_replicant_linux-3313da8188cc346a205783c22c37e821b4b7016d.tar.gz
kernel_replicant_linux-3313da8188cc346a205783c22c37e821b4b7016d.tar.bz2
kernel_replicant_linux-3313da8188cc346a205783c22c37e821b4b7016d.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The netfilter conflicts were rather simple overlapping changes. However, the cls_tcindex.c stuff was a bit more complex. On the 'net' side, Cong is fixing several races and memory leaks. Whilst on the 'net-next' side we have Vlad adding the rtnl-ness support. What I've decided to do, in order to resolve this, is revert the conversion over to using a workqueue that Cong did, bringing us back to pure RCU. I did it this way because I believe that either Cong's races don't apply with have Vlad did things, or Cong will have to implement the race fix slightly differently. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/mips/include/asm/barrier.h')
-rw-r--r--arch/mips/include/asm/barrier.h36
1 files changed, 36 insertions, 0 deletions
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index a5eb1bb199a7..b7f6ac5e513c 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -222,6 +222,42 @@
#define __smp_mb__before_atomic() __smp_mb__before_llsc()
#define __smp_mb__after_atomic() smp_llsc_mb()
+/*
+ * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
+ * store or pref) in between an ll & sc can cause the sc instruction to
+ * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
+ * containing such sequences, this bug bites harder than we might otherwise
+ * expect due to reordering & speculation:
+ *
+ * 1) A memory access appearing prior to the ll in program order may actually
+ * be executed after the ll - this is the reordering case.
+ *
+ * In order to avoid this we need to place a memory barrier (ie. a sync
+ * instruction) prior to every ll instruction, in between it & any earlier
+ * memory access instructions. Many of these cases are already covered by
+ * smp_mb__before_llsc() but for the remaining cases, typically ones in
+ * which multiple CPUs may operate on a memory location but ordering is not
+ * usually guaranteed, we use loongson_llsc_mb() below.
+ *
+ * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
+ *
+ * 2) If a conditional branch exists between an ll & sc with a target outside
+ * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg()
+ * or similar, then misprediction of the branch may allow speculative
+ * execution of memory accesses from outside of the ll-sc loop.
+ *
+ * In order to avoid this we need a memory barrier (ie. a sync instruction)
+ * at each affected branch target, for which we also use loongson_llsc_mb()
+ * defined below.
+ *
+ * This case affects all current Loongson 3 CPUs.
+ */
+#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
+#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+#else
+#define loongson_llsc_mb() do { } while (0)
+#endif
+
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */