aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--libc/Android.mk60
-rw-r--r--libc/arch-arm/bionic/arm_memcpy.S123
-rw-r--r--libc/arch-arm/bionic/atexit.S69
-rw-r--r--libc/arch-arm/bionic/crtbegin_dynamic.S1
-rw-r--r--libc/arch-arm/bionic/crtbegin_so.S6
-rw-r--r--libc/arch-arm/bionic/crtbegin_static.S1
-rw-r--r--libc/arch-arm/bionic/ffs.S35
-rw-r--r--libc/arch-arm/bionic/memcmp.S81
-rw-r--r--libc/arch-arm/bionic/memcpy.S126
-rw-r--r--libc/arch-arm/bionic/memmove.S356
-rw-r--r--libc/arch-arm/bionic/memset.S87
-rw-r--r--libc/arch-arm/bionic/strlen-armv7.S111
-rw-r--r--libc/arch-x86/bionic/__stack_chk_fail_local.S48
-rw-r--r--libc/arch-x86/bionic/atexit.S66
-rw-r--r--libc/arch-x86/bionic/atomics_x86.S97
-rw-r--r--libc/arch-x86/bionic/clone.S5
-rw-r--r--libc/arch-x86/bionic/crtbegin_dynamic.S64
-rw-r--r--libc/arch-x86/bionic/crtbegin_so.S118
-rw-r--r--libc/arch-x86/bionic/crtbegin_static.S56
-rw-r--r--libc/arch-x86/bionic/crtend.S10
-rw-r--r--libc/arch-x86/bionic/crtend_so.S49
-rw-r--r--libc/arch-x86/include/endian.h12
-rw-r--r--libc/arch-x86/include/machine/_types.h7
-rw-r--r--libc/arch-x86/include/sys/atomics.h65
-rw-r--r--libc/arch-x86/string/memcmp_wrapper.S2
-rw-r--r--libc/arch-x86/string/sse2-memset5-atom.S13
-rw-r--r--libc/arch-x86/string/sse2-strlen-atom.S369
-rw-r--r--libc/arch-x86/string/ssse3-memcmp3-new.S (renamed from libc/arch-x86/string/ssse3-memcmp3.S)142
-rw-r--r--libc/arch-x86/string/ssse3-memcpy5.S58
-rw-r--r--libc/arch-x86/string/ssse3-strcmp-latest.S (renamed from libc/arch-x86/string/ssse3-strcmp.S)29
-rw-r--r--libc/arch-x86/string/strcmp_wrapper.S2
-rw-r--r--libc/arch-x86/string/strlen_wrapper.S40
-rw-r--r--libc/arch-x86/string/strncmp_wrapper.S2
-rw-r--r--libc/bionic/cpuacct.c24
-rw-r--r--libc/bionic/dlmalloc.c89
-rw-r--r--libc/bionic/libc_init_common.h2
-rw-r--r--libc/bionic/libc_init_static.c2
-rw-r--r--libc/bionic/pthread.c8
-rw-r--r--libc/bionic/ptrace.c131
-rw-r--r--libc/bionic/sha1.c186
-rw-r--r--libc/include/errno.h1
-rw-r--r--libc/include/pthread.h1
-rw-r--r--libc/include/resolv.h18
-rw-r--r--libc/include/sha1.h15
-rw-r--r--libc/include/sys/_system_properties.h3
-rw-r--r--libc/kernel/common/linux/if_ether.h1
-rw-r--r--libc/kernel/common/linux/socket.h4
-rw-r--r--libc/kernel/common/linux/tty.h2
-rw-r--r--libc/kernel/common/linux/videodev2.h3
-rw-r--r--libc/netbsd/net/dnsproxyd_lock.h15
-rw-r--r--libc/netbsd/net/getaddrinfo.c210
-rw-r--r--libc/netbsd/net/getnameinfo.c122
-rw-r--r--libc/netbsd/resolv/res_cache.c622
-rw-r--r--libc/netbsd/resolv/res_init.c6
-rw-r--r--libc/netbsd/resolv/res_send.c2
-rw-r--r--libc/netbsd/resolv/res_state.c99
-rw-r--r--libc/private/__dso_handle.S5
-rw-r--r--libc/private/__dso_handle_so.S38
-rw-r--r--libc/private/resolv_cache.h38
-rw-r--r--libc/stdlib/atexit.c2
-rw-r--r--libc/string/memmove.c6
-rwxr-xr-xlibc/tools/checksyscalls.py14
-rw-r--r--libc/tzcode/localtime.c1
-rw-r--r--libc/unistd/exec.c6
-rw-r--r--libc/unistd/open.c2
-rw-r--r--libc/unistd/openat.c2
-rw-r--r--libc/unistd/opendir.c6
-rw-r--r--libc/unistd/sigsetmask.c2
-rw-r--r--libc/zoneinfo/zoneinfo.datbin503608 -> 484401 bytes
-rw-r--r--libc/zoneinfo/zoneinfo.idxbin29536 -> 29900 bytes
-rw-r--r--libc/zoneinfo/zoneinfo.version2
-rw-r--r--libm/i387/fenv.c3
-rw-r--r--libm/i387/fenv.h240
-rw-r--r--libm/include/i387/fenv.h5
-rw-r--r--libm/src/s_logb.c4
-rw-r--r--libm/src/s_remquo.c217
-rw-r--r--libm/src/s_remquof.c153
-rw-r--r--libthread_db/Android.mk7
-rw-r--r--libthread_db/include/sys/procfs.h22
-rw-r--r--libthread_db/include/thread_db.h8
-rw-r--r--libthread_db/libthread_db.c22
-rw-r--r--linker/Android.mk1
-rw-r--r--linker/arch/x86/begin.S6
-rw-r--r--linker/dlfcn.c2
-rw-r--r--linker/linker.c9
85 files changed, 3586 insertions, 1113 deletions
diff --git a/libc/Android.mk b/libc/Android.mk
index f555d2a5c..34e588dfa 100644
--- a/libc/Android.mk
+++ b/libc/Android.mk
@@ -352,15 +352,31 @@ libc_common_src_files += \
arch-arm/bionic/memset.S \
arch-arm/bionic/setjmp.S \
arch-arm/bionic/sigsetjmp.S \
- arch-arm/bionic/strlen.c.arm \
arch-arm/bionic/strcpy.S \
arch-arm/bionic/strcmp.S \
arch-arm/bionic/syscall.S \
- string/memmove.c.arm \
- string/bcopy.c \
string/strncmp.c \
unistd/socketcalls.c
+# Check if we want a neonized version of memmove instead of the
+# current ARM version
+ifeq ($(TARGET_USE_SCORPION_BIONIC_OPTIMIZATION),true)
+libc_common_src_files += \
+ arch-arm/bionic/memmove.S
+else # Non-Scorpion-based ARM
+libc_common_src_files += \
+ string/bcopy.c \
+ string/memmove.c.arm
+endif # !TARGET_USE_SCORPION_BIONIC_OPTIMIZATION
+
+ifeq ($(ARCH_ARM_HAVE_ARMV7A),true)
+libc_common_src_files += arch-arm/bionic/strlen-armv7.S
+else
+libc_common_src_files += arch-arm/bionic/strlen.c.arm
+endif
+
+
+
# These files need to be arm so that gdbserver
# can set breakpoints in them without messing
# up any thumb code.
@@ -401,7 +417,7 @@ libc_common_src_files += \
arch-x86/string/memset_wrapper.S \
arch-x86/string/strcmp_wrapper.S \
arch-x86/string/strncmp_wrapper.S \
- arch-x86/string/strlen.S \
+ arch-x86/string/strlen_wrapper.S \
bionic/pthread-rwlocks.c \
string/strcpy.c \
bionic/pthread-timers.c \
@@ -496,6 +512,14 @@ ifeq ($(TARGET_ARCH),arm)
ifeq ($(ARCH_ARM_HAVE_TLS_REGISTER),true)
libc_common_cflags += -DHAVE_ARM_TLS_REGISTER
endif
+ # Add in defines to activate SCORPION_NEON_OPTIMIZATION
+ ifeq ($(TARGET_USE_SCORPION_BIONIC_OPTIMIZATION),true)
+ libc_common_cflags += -DSCORPION_NEON_OPTIMIZATION
+ ifeq ($(TARGET_USE_SCORPION_PLD_SET),true)
+ libc_common_cflags += -DPLDOFFS=$(TARGET_SCORPION_BIONIC_PLDOFFS)
+ libc_common_cflags += -DPLDSIZE=$(TARGET_SCORPION_BIONIC_PLDSIZE)
+ endif
+ endif
ifeq ($(TARGET_HAVE_TEGRA_ERRATA_657451),true)
libc_common_cflags += -DHAVE_TEGRA_ERRATA_657451
endif
@@ -525,6 +549,10 @@ ifeq ($(BOARD_USE_NASTY_PTHREAD_CREATE_HACK),true)
libc_common_cflags += -DNASTY_PTHREAD_CREATE_HACK
endif
+ifeq ($(TARGET_ARCH),arm)
+libc_crt_target_cflags += -DCRT_LEGACY_WORKAROUND
+endif
+
# Define some common includes
# ========================================================
libc_common_c_includes := \
@@ -549,18 +577,24 @@ ifneq ($(filter arm x86,$(TARGET_ARCH)),)
# that will call __cxa_finalize(&__dso_handle) in order to ensure that
# static C++ destructors are properly called on dlclose().
#
+
+libc_crt_target_so_cflags := $(libc_crt_target_cflags)
+ifeq ($(TARGET_ARCH),x86)
+ # This flag must be added for x86 targets, but not for ARM
+ libc_crt_target_so_cflags += -fPIC
+endif
GEN := $(TARGET_OUT_STATIC_LIBRARIES)/crtbegin_so.o
$(GEN): $(LOCAL_PATH)/arch-$(TARGET_ARCH)/bionic/crtbegin_so.S
@mkdir -p $(dir $@)
- $(TARGET_CC) $(libc_crt_target_cflags) -o $@ -c $<
+ $(TARGET_CC) $(libc_crt_target_so_cflags) -o $@ -c $<
ALL_GENERATED_SOURCES += $(GEN)
GEN := $(TARGET_OUT_STATIC_LIBRARIES)/crtend_so.o
$(GEN): $(LOCAL_PATH)/arch-$(TARGET_ARCH)/bionic/crtend_so.S
@mkdir -p $(dir $@)
- $(TARGET_CC) $(libc_crt_target_cflags) -o $@ -c $<
+ $(TARGET_CC) $(libc_crt_target_so_cflags) -o $@ -c $<
ALL_GENERATED_SOURCES += $(GEN)
-endif # TARGET_ARCH == x86
+endif # TARGET_ARCH == x86 || TARGET_ARCH == arm
GEN := $(TARGET_OUT_STATIC_LIBRARIES)/crtbegin_static.o
@@ -596,6 +630,17 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(libc_common_src_files)
LOCAL_CFLAGS := $(libc_common_cflags)
+ifdef NEEDS_ARM_ERRATA_754319_754320
+asm_flags := \
+ --defsym NEEDS_ARM_ERRATA_754319_754320_ASM=1
+
+LOCAL_CFLAGS+= \
+ $(foreach f,$(asm_flags),-Wa,"$(f)")
+endif
+
+ifeq ($(TARGET_ARCH),arm)
+LOCAL_CFLAGS += -DCRT_LEGACY_WORKAROUND
+endif
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libc_common
LOCAL_SYSTEM_SHARED_LIBRARIES :=
@@ -712,6 +757,7 @@ LOCAL_MODULE:= libc_malloc_debug_leak
LOCAL_SHARED_LIBRARIES := libc
LOCAL_WHOLE_STATIC_LIBRARIES := libc_common
LOCAL_SYSTEM_SHARED_LIBRARIES :=
+LOCAL_ALLOW_UNDEFINED_SYMBOLS := true
# Don't prelink
LOCAL_PRELINK_MODULE := false
# Don't install on release build
diff --git a/libc/arch-arm/bionic/arm_memcpy.S b/libc/arch-arm/bionic/arm_memcpy.S
new file mode 100644
index 000000000..ae1cf1ad1
--- /dev/null
+++ b/libc/arch-arm/bionic/arm_memcpy.S
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2011 Texas Instruments
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+ .text
+ .fpu neon
+ .code 32
+ .align 4
+
+
+/* r0 - dest */
+/* r1 - src */
+/* r2 - length */
+ .global memcpy
+memcpy:
+ .fnstart
+#if defined TARGET_BOARD_PLATFORM == omap4
+#define CACHE_LINE_SIZE 32
+#else
+#define CACHE_LINE_SIZE 64
+#endif
+ CMP r2,#3
+ BLS _BMLIB_memcpy_lastbytes
+ ANDS r12,r0,#3
+ BEQ l1;
+ LDRB r3,[r1],#1
+ CMP r12,#2
+ ADD r2,r2,r12
+ LDRLSB r12, [r1], #1
+ STRB r3,[r0],#1
+ LDRCCB r3,[r1],#1
+ STRLSB r12,[r0],#1
+ SUB r2,r2,#4
+ STRCCB r3,[r0],#1
+l1:
+ ANDS r3,r1,#3
+ BEQ _BMLIB_aeabi_memcpy4
+l3:
+ SUBS r2,r2,#8
+ BCC l2
+ LDR r3,[r1],#4
+ LDR r12,[r1],#4
+ STR r3,[r0],#4
+ STR r12,[r0],#4
+ B l3
+l2:
+ ADDS r2,r2,#4
+ LDRPL r3,[r1],#4
+ STRPL r3,[r0],#4
+ MOV r0,r0
+_BMLIB_memcpy_lastbytes:
+ LSLS r2,r2,#31
+ LDRCSB r3,[r1],#1
+ LDRCSB r12,[r1],#1
+ LDRMIB r2,[r1],#1
+ STRCSB r3,[r0],#1
+ STRCSB r12,[r0],#1
+ STRMIB r2,[r0],#1
+ BX lr
+
+_BMLIB_aeabi_memcpy4:
+ PUSH {r4-r8,lr}
+ SUBS r2,r2,#0x20
+ BCC l4
+ DSB
+#ifndef NOPLD
+ PLD [r1, #0]
+ PLD [r1, #(CACHE_LINE_SIZE*1)]
+ PLD [r1, #(CACHE_LINE_SIZE*2)]
+ PLD [r1, #(CACHE_LINE_SIZE*3)]
+ PLD [r1, #(CACHE_LINE_SIZE*4)]
+#endif
+l5:
+#ifndef NOPLD
+ PLD [r1, #(CACHE_LINE_SIZE*5)]
+#endif
+ LDMCS r1!,{r3-r8,r12,lr}
+ STMCS r0!,{r3-r8,r12,lr}
+ SUBS r2,r2,#0x20
+ BCS l5
+l4:
+ LSLS r12,r2,#28
+ LDMCS r1!,{r3,r4,r12,lr}
+ STMCS r0!,{r3,r4,r12,lr}
+ LDMMI r1!,{r3,r4}
+ STMMI r0!,{r3,r4}
+ POP {r4-r8,lr}
+ LSLS r12,r2,#30
+ LDRCS r3,[r1],#4
+ STRCS r3,[r0],#4
+ BXEQ lr
+_BMLIB_memcpy_lastbytes_aligned:
+ LSLS r2,r2,#31
+ LDRCSH r3,[r1],#2
+ LDRMIB r2,[r1],#1
+ STRCSH r3,[r0],#2
+ STRMIB r2,[r0],#1
+ BX lr
+ .fnend
diff --git a/libc/arch-arm/bionic/atexit.S b/libc/arch-arm/bionic/atexit.S
new file mode 100644
index 000000000..aa1e18d61
--- /dev/null
+++ b/libc/arch-arm/bionic/atexit.S
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CRT_LEGACY_WORKAROUND
+ .arch armv5te
+ .fpu softvfp
+ .eabi_attribute 20, 1
+ .eabi_attribute 21, 1
+ .eabi_attribute 23, 3
+ .eabi_attribute 24, 1
+ .eabi_attribute 25, 1
+ .eabi_attribute 26, 2
+ .eabi_attribute 30, 4
+ .eabi_attribute 18, 4
+ .code 16
+ .section .text.atexit,"ax",%progbits
+ .align 2
+ .global atexit
+ .hidden atexit
+ .code 16
+ .thumb_func
+ .type atexit, %function
+atexit:
+ .fnstart
+.LFB0:
+ .save {r4, lr}
+ push {r4, lr}
+.LCFI0:
+ ldr r3, .L3
+ mov r1, #0
+ @ sp needed for prologue
+.LPIC0:
+ add r3, pc
+ ldr r2, [r3]
+ bl __cxa_atexit
+ pop {r4, pc}
+.L4:
+ .align 2
+.L3:
+ .word __dso_handle-(.LPIC0+4)
+.LFE0:
+ .fnend
+ .size atexit, .-atexit
+#endif
diff --git a/libc/arch-arm/bionic/crtbegin_dynamic.S b/libc/arch-arm/bionic/crtbegin_dynamic.S
index d18e715f5..099908444 100644
--- a/libc/arch-arm/bionic/crtbegin_dynamic.S
+++ b/libc/arch-arm/bionic/crtbegin_dynamic.S
@@ -85,3 +85,4 @@ __CTOR_LIST__:
.long -1
#include "__dso_handle.S"
+#include "atexit.S"
diff --git a/libc/arch-arm/bionic/crtbegin_so.S b/libc/arch-arm/bionic/crtbegin_so.S
index bb6b3e2c3..9275b1e01 100644
--- a/libc/arch-arm/bionic/crtbegin_so.S
+++ b/libc/arch-arm/bionic/crtbegin_so.S
@@ -52,4 +52,10 @@ __FINI_ARRAY__:
.long -1
.long __on_dlclose
+#ifdef CRT_LEGACY_WORKAROUND
#include "__dso_handle.S"
+#else
+#include "__dso_handle_so.S"
+#endif
+
+#include "atexit.S"
diff --git a/libc/arch-arm/bionic/crtbegin_static.S b/libc/arch-arm/bionic/crtbegin_static.S
index 6f9cf25dd..13b05b272 100644
--- a/libc/arch-arm/bionic/crtbegin_static.S
+++ b/libc/arch-arm/bionic/crtbegin_static.S
@@ -86,3 +86,4 @@ __CTOR_LIST__:
#include "__dso_handle.S"
+#include "atexit.S"
diff --git a/libc/arch-arm/bionic/ffs.S b/libc/arch-arm/bionic/ffs.S
index f11141c97..052b46a53 100644
--- a/libc/arch-arm/bionic/ffs.S
+++ b/libc/arch-arm/bionic/ffs.S
@@ -36,47 +36,14 @@
* 6 bits as an index into the table. This algorithm should be a win
* over the checking each bit in turn as per the C compiled version.
*
- * under ARMv5 there's an instruction called CLZ (count leading Zero's) that
- * could be used
- *
- * This is the ffs algorithm devised by d.seal and posted to comp.sys.arm on
- * 16 Feb 1994.
+ * since ARMv5 there's an instruction called CLZ (count leading Zero's)
*/
ENTRY(ffs)
/* Standard trick to isolate bottom bit in r0 or 0 if r0 = 0 on entry */
rsb r1, r0, #0
ands r0, r0, r1
-#ifndef __ARM_ARCH_5__
- /*
- * now r0 has at most one set bit, call this X
- * if X = 0, all further instructions are skipped
- */
- adrne r2, .L_ffs_table
- orrne r0, r0, r0, lsl #4 /* r0 = X * 0x11 */
- orrne r0, r0, r0, lsl #6 /* r0 = X * 0x451 */
- rsbne r0, r0, r0, lsl #16 /* r0 = X * 0x0450fbaf */
-
- /* now lookup in table indexed on top 6 bits of r0 */
- ldrneb r0, [ r2, r0, lsr #26 ]
-
- bx lr
-
-.text;
-.type .L_ffs_table, _ASM_TYPE_OBJECT;
-.L_ffs_table:
-/* 0 1 2 3 4 5 6 7 */
- .byte 0, 1, 2, 13, 3, 7, 0, 14 /* 0- 7 */
- .byte 4, 0, 8, 0, 0, 0, 0, 15 /* 8-15 */
- .byte 11, 5, 0, 0, 9, 0, 0, 26 /* 16-23 */
- .byte 0, 0, 0, 0, 0, 22, 28, 16 /* 24-31 */
- .byte 32, 12, 6, 0, 0, 0, 0, 0 /* 32-39 */
- .byte 10, 0, 0, 25, 0, 0, 21, 27 /* 40-47 */
- .byte 31, 0, 0, 0, 0, 24, 0, 20 /* 48-55 */
- .byte 30, 0, 23, 19, 29, 18, 17, 0 /* 56-63 */
-#else
clzne r0, r0
rsbne r0, r0, #32
bx lr
-#endif
diff --git a/libc/arch-arm/bionic/memcmp.S b/libc/arch-arm/bionic/memcmp.S
index 67dcddc1b..0fe26996c 100644
--- a/libc/arch-arm/bionic/memcmp.S
+++ b/libc/arch-arm/bionic/memcmp.S
@@ -43,36 +43,70 @@
* (2) The loads are scheduled in a way they won't stall
*/
+#if __ARM_ARCH__ >= 7
+#define __ARM_CORTEX
+
+#if defined(CORTEX_CACHE_LINE_32)
+#define CACHE_LINE_SIZE 32
+#else
+#define CACHE_LINE_SIZE 64
+#endif
+
+#endif /* __ARM_ARCH__ */
+
+
memcmp:
.fnstart
+
+#if defined(__ARM_CORTEX)
+ pld [r0, #(CACHE_LINE_SIZE * 0)]
+ pld [r0, #(CACHE_LINE_SIZE * 1)]
+#else
+
PLD (r0, #0)
PLD (r1, #0)
-
+#endif
/* take of the case where length is 0 or the buffers are the same */
cmp r0, r1
+#if !defined(__ARM_CORTEX)
cmpne r2, #0
+#endif
moveq r0, #0
bxeq lr
+#if defined(__ARM_CORTEX)
+ pld [r1, #(CACHE_LINE_SIZE * 0)]
+ pld [r1, #(CACHE_LINE_SIZE * 1)]
+
+ /* make sure we have at least 8+4 bytes, this simplify things below
+ * and avoid some overhead for small blocks
+ */
+ cmp r2, #(8+4)
+ bmi 10f
+#endif /* __ARM_CORTEX */
+
+
.save {r4, lr}
/* save registers */
stmfd sp!, {r4, lr}
-
+#if !defined(__ARM_CORTEX)
PLD (r0, #32)
PLD (r1, #32)
+#endif
/* since r0 hold the result, move the first source
* pointer somewhere else
*/
mov r4, r0
-
+
+#if !defined(__ARM_CORTEX)
/* make sure we have at least 8+4 bytes, this simplify things below
* and avoid some overhead for small blocks
*/
cmp r2, #(8+4)
bmi 8f
-
+#endif
/* align first pointer to word boundary
* offset = -src & 3
*/
@@ -109,8 +143,14 @@ memcmp:
subs r2, r2, #(32 + 4)
bmi 1f
-0: PLD (r4, #64)
+0:
+#if defined(__ARM_CORTEX)
+ pld [r4, #(CACHE_LINE_SIZE * 2)]
+ pld [r1, #(CACHE_LINE_SIZE * 2)]
+#else
+ PLD (r4, #64)
PLD (r1, #64)
+#endif
ldr r0, [r4], #4
ldr lr, [r1, #4]!
eors r0, r0, ip
@@ -176,6 +216,21 @@ memcmp:
9: /* restore registers and return */
ldmfd sp!, {r4, lr}
bx lr
+#if defined(__ARM_CORTEX)
+10: /* process less than 12 bytes */
+ cmp r2, #0
+ moveq r0, #0
+ bxeq lr
+ mov r3, r0
+11:
+ ldrb r0, [r3], #1
+ ldrb ip, [r1], #1
+ subs r0, ip
+ bxne lr
+ subs r2, r2, #1
+ bne 11b
+ bx lr
+#endif /* __ARM_CORTEX */
.fnend
@@ -198,8 +253,14 @@ memcmp:
bic r1, r1, #3
ldr lr, [r1], #4
-6: PLD (r1, #64)
+6:
+#if defined(__ARM_CORTEX)
+ pld [r1, #(CACHE_LINE_SIZE * 2)]
+ pld [r4, #(CACHE_LINE_SIZE * 2)]
+#else
+ PLD (r1, #64)
PLD (r4, #64)
+#endif
mov ip, lr, lsr #16
ldr lr, [r1], #4
ldr r0, [r4], #4
@@ -240,13 +301,13 @@ memcmp:
4: /*************** offset is 1 or 3 (less optimized) ***************/
- stmfd sp!, {r5, r6, r7}
+ stmfd sp!, {r5, r6, r7}
// r5 = rhs
// r6 = lhs
// r7 = scratch
- mov r5, r0, lsl #3 /* r5 = right shift */
+ mov r5, r0, lsl #3 /* r5 = right shift */
rsb r6, r5, #32 /* r6 = left shift */
/* align the unaligned pointer */
@@ -269,7 +330,7 @@ memcmp:
bhs 6b
sub r1, r1, r6, lsr #3
- ldmfd sp!, {r5, r6, r7}
+ ldmfd sp!, {r5, r6, r7}
/* are we done? */
adds r2, r2, #8
@@ -284,5 +345,5 @@ memcmp:
sub r1, r1, r6, lsr #3
sub r4, r4, #4
mov r2, #4
- ldmfd sp!, {r5, r6, r7}
+ ldmfd sp!, {r5, r6, r7}
b 8b
diff --git a/libc/arch-arm/bionic/memcpy.S b/libc/arch-arm/bionic/memcpy.S
index ba55996ec..f2a4e3328 100644
--- a/libc/arch-arm/bionic/memcpy.S
+++ b/libc/arch-arm/bionic/memcpy.S
@@ -2,6 +2,8 @@
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -29,7 +31,114 @@
#include <machine/cpu-features.h>
#if defined(__ARM_NEON__)
-
+#if defined(SCORPION_NEON_OPTIMIZATION)
+ /*
+ * These can be overridden in:
+ * device/<vendor>/<board>/BoardConfig.mk
+ * by setting the following:
+ * TARGET_USE_SCORPION_BIONIC_OPTIMIZATION := true
+ * TARGET_USE_SCORPION_PLD_SET := true
+ * TARGET_SCORPION_BIONIC_PLDOFFS := <pldoffset>
+ * TARGET_SCORPION_BIONIC_PLDSIZE := <pldsize>
+ */
+#ifndef PLDOFFS
+#define PLDOFFS (6)
+#endif
+#ifndef PLDSIZE
+#define PLDSIZE (128) /* L2 cache line size */
+#endif
+ .code 32
+ .align 5
+ .globl memcpy
+ .func
+memcpy:
+ push {r0}
+ cmp r2, #4
+ blt .Lneon_lt4
+ cmp r2, #16
+ blt .Lneon_lt16
+ cmp r2, #32
+ blt .Lneon_16
+ cmp r2, #128
+ blt .Lneon_copy_32_a
+ /* Copy blocks of 128-bytes (word-aligned) at a time*/
+ /* Code below is optimized for PLDSIZE=128 only */
+ mov r12, r2, lsr #7
+ cmp r12, #PLDOFFS
+ ble .Lneon_copy_128_loop_nopld
+ sub r12, #PLDOFFS
+ pld [r1, #(PLDOFFS-1)*PLDSIZE]
+.Lneon_copy_128_loop_outer:
+ pld [r1, #(PLDOFFS*PLDSIZE)]
+ vld1.32 {q0, q1}, [r1]!
+ vld1.32 {q2, q3}, [r1]!
+ vld1.32 {q8, q9}, [r1]!
+ vld1.32 {q10, q11}, [r1]!
+ subs r12, r12, #1
+ vst1.32 {q0, q1}, [r0]!
+ vst1.32 {q2, q3}, [r0]!
+ vst1.32 {q8, q9}, [r0]!
+ vst1.32 {q10, q11}, [r0]!
+ bne .Lneon_copy_128_loop_outer
+ mov r12, #PLDOFFS
+.Lneon_copy_128_loop_nopld:
+ vld1.32 {q0, q1}, [r1]!
+ vld1.32 {q2, q3}, [r1]!
+ vld1.32 {q8, q9}, [r1]!
+ vld1.32 {q10, q11}, [r1]!
+ subs r12, r12, #1
+ vst1.32 {q0, q1}, [r0]!
+ vst1.32 {q2, q3}, [r0]!
+ vst1.32 {q8, q9}, [r0]!
+ vst1.32 {q10, q11}, [r0]!
+ bne .Lneon_copy_128_loop_nopld
+ ands r2, r2, #0x7f
+ beq .Lneon_exit
+ cmp r2, #32
+ blt .Lneon_16
+ nop
+ /* Copy blocks of 32-bytes (word aligned) at a time*/
+.Lneon_copy_32_a:
+ mov r12, r2, lsr #5
+.Lneon_copy_32_loop_a:
+ vld1.32 {q0,q1}, [r1]!
+ subs r12, r12, #1
+ vst1.32 {q0,q1}, [r0]!
+ bne .Lneon_copy_32_loop_a
+ ands r2, r2, #0x1f
+ beq .Lneon_exit
+.Lneon_16:
+ subs r2, r2, #16
+ blt .Lneon_lt16
+ vld1.32 {q8}, [r1]!
+ vst1.32 {q8}, [r0]!
+ beq .Lneon_exit
+.Lneon_lt16:
+ movs r12, r2, lsl #29
+ bcc .Lneon_skip8
+ ldr r3, [r1], #4
+ ldr r12, [r1], #4
+ str r3, [r0], #4
+ str r12, [r0], #4
+.Lneon_skip8:
+ bpl .Lneon_lt4
+ ldr r3, [r1], #4
+ str r3, [r0], #4
+.Lneon_lt4:
+ movs r2, r2, lsl #31
+ bcc .Lneon_lt2
+ ldrh r3, [r1], #2
+ strh r3, [r0], #2
+.Lneon_lt2:
+ bpl .Lneon_exit
+ ldrb r12, [r1]
+ strb r12, [r0]
+.Lneon_exit:
+ pop {r0}
+ bx lr
+ .endfunc
+ .end
+#else /* !SCORPION_NEON_OPTIMIZATION */
.text
.fpu neon
@@ -141,11 +250,14 @@ memcpy:
strcsb ip, [r0], #1
strcsb lr, [r0], #1
+.ifdef NEEDS_ARM_ERRATA_754319_754320_ASM
+ VMOV s0,s0 @ NOP for ARM Errata
+.endif
ldmfd sp!, {r0, lr}
bx lr
.fnend
-
+#endif /* !SCORPION_NEON_OPTIMIZATION */
#else /* __ARM_ARCH__ < 7 */
@@ -260,20 +372,30 @@ cached_aligned32:
*
*/
+#if __ARM_ARCH__ == 5
// Align the preload register to a cache-line because the cpu does
// "critical word first" (the first word requested is loaded first).
bic r12, r1, #0x1F
add r12, r12, #64
+#endif
1: ldmia r1!, { r4-r11 }
+
+#if __ARM_ARCH__ == 5
PLD (r12, #64)
+#else
+ PLD (r1, #64)
+#endif
subs r2, r2, #32
+#if __ARM_ARCH__ == 5
// NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
// for ARM9 preload will not be safely guarded by the preceding subs.
// When it is safely guarded the only possibility to have SIGSEGV here
// is because the caller overstates the length.
ldrhi r3, [r12], #32 /* cheap ARM9 preload */
+#endif
+
stmia r0!, { r4-r11 }
bhs 1b
diff --git a/libc/arch-arm/bionic/memmove.S b/libc/arch-arm/bionic/memmove.S
new file mode 100644
index 000000000..123419584
--- /dev/null
+++ b/libc/arch-arm/bionic/memmove.S
@@ -0,0 +1,356 @@
+/***************************************************************************
+ Copyright (c) 2009-2011 Code Aurora Forum. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Code Aurora nor the names of its contributors may
+ be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+ ***************************************************************************/
+
+/***************************************************************************
+ * Neon memmove: Attempts to do a memmove with Neon registers if possible,
+ * Inputs:
+ * dest: The destination buffer
+ * src: The source buffer
+ * n: The size of the buffer to transfer
+ * Outputs:
+ *
+ ***************************************************************************/
+
+#include <machine/cpu-features.h>
+
+#if defined(SCORPION_NEON_OPTIMIZATION)
+ /*
+ * These can be overridden in:
+ * device/<vendor>/<board>/BoardConfig.mk
+ * by setting the following:
+ * TARGET_USE_SCORPION_BIONIC_OPTIMIZATION := true
+ * TARGET_USE_SCORPION_PLD_SET := true
+ * TARGET_SCORPION_BIONIC_PLDOFFS := <pldoffset>
+ * TARGET_SCORPION_BIONIC_PLDSIZE := <pldsize>
+ */
+#ifndef PLDOFFS
+#define PLDOFFS (6)
+#endif
+#ifndef PLDSIZE
+#define PLDSIZE (128) /* L2 cache line size */
+#endif
+
+ .code 32
+ .align 5
+ .global memmove
+ .type memmove, %function
+
+ .global bcopy
+ .type bcopy, %function
+
+bcopy:
+ mov r12, r0
+ mov r0, r1
+ mov r1, r12
+memmove:
+ push {r0}
+
+ /*
+ * The requirements for memmove state that the function should
+ * operate as if data were being copied from the source to a
+ * buffer, then to the destination. This is to allow a user
+ * to copy data from a source and target that overlap.
+ *
+ * We can't just do byte copies front-to-back automatically, since
+ * there's a good chance we may have an overlap (why else would someone
+ * intentionally use memmove then?).
+ *
+ * We'll break this into two parts. Front-to-back, or back-to-front
+ * copies.
+ */
+.Lneon_memmove_cmf:
+ cmp r0, r1
+ blt .Lneon_front_to_back_copy
+ bgt .Lneon_back_to_front_copy
+ b .Lneon_memmove_done
+
+ /* #############################################################
+ * Front to Back copy
+ */
+.Lneon_front_to_back_copy:
+ /*
+ * For small copies, just do a quick memcpy. We can do this for
+ * front-to-back copies, aligned or unaligned, since we're only
+ * doing 1 byte at a time...
+ */
+ cmp r2, #4
+ bgt .Lneon_f2b_gt4
+ cmp r2, #0
+.Lneon_f2b_smallcopy_loop:
+ beq .Lneon_memmove_done
+ ldrb r12, [r1], #1
+ subs r2, r2, #1
+ strb r12, [r0], #1
+ b .Lneon_f2b_smallcopy_loop
+.Lneon_f2b_gt4:
+ /* The window size is in r3. */
+ sub r3, r1, r0
+ /* #############################################################
+ * Front to Back copy
+ */
+ /*
+ * Note that we can't just route based on the size in r2. If that's
+ * larger than the overlap window in r3, we could potentially
+ * (and likely!) destroy data we're copying.
+ */
+ cmp r2, r3
+ movle r12, r2
+ movgt r12, r3
+ cmp r12, #256
+ bge .Lneon_f2b_copy_128
+ cmp r12, #64
+ bge .Lneon_f2b_copy_32
+ cmp r12, #16
+ bge .Lneon_f2b_copy_16
+ cmp r12, #8
+ bge .Lneon_f2b_copy_8
+ cmp r12, #4
+ bge .Lneon_f2b_copy_4
+ b .Lneon_f2b_copy_1
+ nop
+.Lneon_f2b_copy_128:
+ mov r12, r2, lsr #7
+ cmp r12, #PLDOFFS
+ ble .Lneon_f2b_copy_128_loop_nopld
+ sub r12, #PLDOFFS
+ pld [r1, #(PLDOFFS-1)*PLDSIZE]
+.Lneon_f2b_copy_128_loop_outer:
+ pld [r1, #(PLDOFFS*PLDSIZE)]
+ vld1.32 {q0,q1}, [r1]!
+ vld1.32 {q2,q3}, [r1]!
+ vld1.32 {q8,q9}, [r1]!
+ vld1.32 {q10,q11}, [r1]!
+ subs r12, r12, #1
+ vst1.32 {q0,q1}, [r0]!
+ vst1.32 {q2,q3}, [r0]!
+ vst1.32 {q8,q9}, [r0]!
+ vst1.32 {q10,q11}, [r0]!
+ bne .Lneon_f2b_copy_128_loop_outer
+ mov r12, #PLDOFFS
+.Lneon_f2b_copy_128_loop_nopld:
+ vld1.32 {q0,q1}, [r1]!
+ vld1.32 {q2,q3}, [r1]!
+ vld1.32 {q8,q9}, [r1]!
+ vld1.32 {q10,q11}, [r1]!
+ subs r12, r12, #1
+ vst1.32 {q0,q1}, [r0]!
+ vst1.32 {q2,q3}, [r0]!
+ vst1.32 {q8,q9}, [r0]!
+ vst1.32 {q10,q11}, [r0]!
+ bne .Lneon_f2b_copy_128_loop_nopld
+ ands r2, r2, #0x7f
+ beq .Lneon_memmove_done
+ cmp r2, #32
+ bge .Lneon_f2b_copy_32
+ b .Lneon_f2b_copy_finish
+.Lneon_f2b_copy_32:
+ mov r12, r2, lsr #5
+.Lneon_f2b_copy_32_loop:
+ vld1.32 {q0,q1}, [r1]!
+ subs r12, r12, #1
+ vst1.32 {q0,q1}, [r0]!
+ bne .Lneon_f2b_copy_32_loop
+ ands r2, r2, #0x1f
+ beq .Lneon_memmove_done
+.Lneon_f2b_copy_finish:
+.Lneon_f2b_copy_16:
+ movs r12, r2, lsr #4
+ beq .Lneon_f2b_copy_8
+.Lneon_f2b_copy_16_loop:
+ vld1.32 {q0}, [r1]!
+ subs r12, r12, #1
+ vst1.32 {q0}, [r0]!
+ bne .Lneon_f2b_copy_16_loop
+ ands r2, r2, #0xf
+ beq .Lneon_memmove_done
+.Lneon_f2b_copy_8:
+ movs r12, r2, lsr #3
+ beq .Lneon_f2b_copy_4
+.Lneon_f2b_copy_8_loop:
+ vld1.32 {d0}, [r1]!
+ subs r12, r12, #1
+ vst1.32 {d0}, [r0]!
+ bne .Lneon_f2b_copy_8_loop
+ ands r2, r2, #0x7
+ beq .Lneon_memmove_done
+.Lneon_f2b_copy_4:
+ movs r12, r2, lsr #2
+ beq .Lneon_f2b_copy_1
+.Lneon_f2b_copy_4_loop:
+ ldr r3, [r1], #4
+ subs r12, r12, #1
+ str r3, [r0], #4
+ bne .Lneon_f2b_copy_4_loop
+ ands r2, r2, #0x3
+ nop
+.Lneon_f2b_copy_1:
+ cmp r2, #0
+ beq .Lneon_memmove_done
+.Lneon_f2b_copy_1_loop:
+ ldrb r12, [r1], #1
+ subs r2, r2, #1
+ strb r12, [r0], #1
+ bne .Lneon_f2b_copy_1_loop
+.Lneon_f2b_finish:
+ b .Lneon_memmove_done
+
+ /* #############################################################
+ * Back to Front copy
+ */
+.Lneon_back_to_front_copy:
+ /*
+ * Here, we'll want to shift to the end of the buffers. This
+ * actually points us one past where we need to go, but since
+ * we'll pre-decrement throughout, this will be fine.
+ */
+ add r0, r0, r2
+ add r1, r1, r2
+ cmp r2, #4
+ bgt .Lneon_b2f_gt4
+ cmp r2, #0
+.Lneon_b2f_smallcopy_loop:
+ beq .Lneon_memmove_done
+ ldrb r12, [r1, #-1]!
+ subs r2, r2, #1
+ strb r12, [r0, #-1]!
+ b .Lneon_b2f_smallcopy_loop
+.Lneon_b2f_gt4:
+ /*
+ * The minimum of the overlap window size and the copy size
+ * is in r3.
+ */
+ sub r3, r0, r1
+ /*
+ * #############################################################
+ * Back to Front copy -
+ */
+ cmp r2, r3
+ movle r12, r2
+ movgt r12, r3
+ cmp r12, #256
+ bge .Lneon_b2f_copy_128
+ cmp r12, #64
+ bge .Lneon_b2f_copy_32
+ cmp r12, #8
+ bge .Lneon_b2f_copy_8
+ cmp r12, #4
+ bge .Lneon_b2f_copy_4
+ b .Lneon_b2f_copy_1
+ nop
+.Lneon_b2f_copy_128:
+ movs r12, r2, lsr #7
+ cmp r12, #PLDOFFS
+ ble .Lneon_b2f_copy_128_loop_nopld
+ sub r12, #PLDOFFS
+ pld [r1, #-(PLDOFFS-1)*PLDSIZE]
+.Lneon_b2f_copy_128_loop_outer:
+ pld [r1, #-(PLDOFFS*PLDSIZE)]
+ sub r1, r1, #128
+ sub r0, r0, #128
+ vld1.32 {q0, q1}, [r1]!
+ vld1.32 {q2, q3}, [r1]!
+ vld1.32 {q8, q9}, [r1]!
+ vld1.32 {q10, q11}, [r1]!
+ subs r12, r12, #1
+ vst1.32 {q0, q1}, [r0]!
+ vst1.32 {q2, q3}, [r0]!
+ vst1.32 {q8, q9}, [r0]!
+ vst1.32 {q10, q11}, [r0]!
+ sub r1, r1, #128
+ sub r0, r0, #128
+ bne .Lneon_b2f_copy_128_loop_outer
+ mov r12, #PLDOFFS
+.Lneon_b2f_copy_128_loop_nopld:
+ sub r1, r1, #128
+ sub r0, r0, #128
+ vld1.32 {q0, q1}, [r1]!
+ vld1.32 {q2, q3}, [r1]!
+ vld1.32 {q8, q9}, [r1]!
+ vld1.32 {q10, q11}, [r1]!
+ subs r12, r12, #1
+ vst1.32 {q0, q1}, [r0]!
+ vst1.32 {q2, q3}, [r0]!
+ vst1.32 {q8, q9}, [r0]!
+ vst1.32 {q10, q11}, [r0]!
+ sub r1, r1, #128
+ sub r0, r0, #128
+ bne .Lneon_b2f_copy_128_loop_nopld
+ ands r2, r2, #0x7f
+ beq .Lneon_memmove_done
+ cmp r2, #32
+ bge .Lneon_b2f_copy_32
+ b .Lneon_b2f_copy_finish
+.Lneon_b2f_copy_32:
+ mov r12, r2, lsr #5
+.Lneon_b2f_copy_32_loop:
+ sub r1, r1, #32
+ sub r0, r0, #32
+ vld1.32 {q0,q1}, [r1]
+ subs r12, r12, #1
+ vst1.32 {q0,q1}, [r0]
+ bne .Lneon_b2f_copy_32_loop
+ ands r2, r2, #0x1f
+ beq .Lneon_memmove_done
+.Lneon_b2f_copy_finish:
+.Lneon_b2f_copy_8:
+ movs r12, r2, lsr #0x3
+ beq .Lneon_b2f_copy_4
+.Lneon_b2f_copy_8_loop:
+ sub r1, r1, #8
+ sub r0, r0, #8
+ vld1.32 {d0}, [r1]
+ subs r12, r12, #1
+ vst1.32 {d0}, [r0]
+ bne .Lneon_b2f_copy_8_loop
+ ands r2, r2, #0x7
+ beq .Lneon_memmove_done
+.Lneon_b2f_copy_4:
+ movs r12, r2, lsr #0x2
+ beq .Lneon_b2f_copy_1
+.Lneon_b2f_copy_4_loop:
+ ldr r3, [r1, #-4]!
+ subs r12, r12, #1
+ str r3, [r0, #-4]!
+ bne .Lneon_b2f_copy_4_loop
+ ands r2, r2, #0x3
+ nop
+.Lneon_b2f_copy_1:
+ cmp r2, #0
+ beq .Lneon_memmove_done
+.Lneon_b2f_copy_1_loop:
+ ldrb r12, [r1, #-1]!
+ subs r2, r2, #1
+ strb r12, [r0, #-1]!
+ bne .Lneon_b2f_copy_1_loop
+
+.Lneon_memmove_done:
+ pop {r0}
+ bx lr
+
+ .end
+#endif /* SCORPION_NEON_OPTIMIZATION */
+
diff --git a/libc/arch-arm/bionic/memset.S b/libc/arch-arm/bionic/memset.S
index 93abe15a2..69abd4bdf 100644
--- a/libc/arch-arm/bionic/memset.S
+++ b/libc/arch-arm/bionic/memset.S
@@ -2,6 +2,8 @@
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -35,6 +37,90 @@
.align
+
+#if defined(SCORPION_NEON_OPTIMIZATION)
+ .code 32
+ .align 8
+ .global memset
+ .type memset, %function
+
+ .global bzero
+ .type bzero, %function
+
+bzero:
+ mov r2, r1
+ mov r1, #0
+memset:
+ push {r0}
+
+ cmp r2, #6
+ bgt .Lmemset_gt6
+ cmp r2, #0
+ beq .Lmemset_smallcopy_done
+.Lmemset_smallcopy_loop:
+ strb r1, [r0], #1
+ subs r2, r2, #1
+ bne .Lmemset_smallcopy_loop
+.Lmemset_smallcopy_done:
+ pop {r0}
+ bx lr
+
+.Lmemset_gt6:
+ vdup.8 q0, r1
+ vmov r1, s0
+
+ /*
+ * Decide where to route for the maximum copy sizes.
+ */
+ cmp r2, #4
+ blt .Lmemset_lt4
+ cmp r2, #16
+ blt .Lmemset_lt16
+ vmov q1, q0
+ cmp r2, #128
+ blt .Lmemset_32
+.Lmemset_128:
+ mov r12, r2, lsr #7
+.Lmemset_128_loop:
+ vst1.32 {q0, q1}, [r0]!
+ vst1.32 {q0, q1}, [r0]!
+ vst1.32 {q0, q1}, [r0]!
+ vst1.32 {q0, q1}, [r0]!
+ subs r12, r12, #1
+ bne .Lmemset_128_loop
+ ands r2, r2, #0x7f
+ beq .Lmemset_end
+.Lmemset_32:
+ movs r12, r2, lsr #5
+ beq .Lmemset_lt32
+.Lmemset_32_loop:
+ subs r12, r12, #1
+ vst1.32 {q0, q1}, [r0]!
+ bne .Lmemset_32_loop
+ ands r2, r2, #0x1f
+ beq .Lmemset_end
+.Lmemset_lt32:
+ cmp r2, #16
+ blt .Lmemset_lt16
+ vst1.64 {q0}, [r0]!
+ subs r2, r2, #16
+ beq .Lmemset_end
+.Lmemset_lt16:
+ movs r12, r2, lsl #29
+ strcs r1, [r0], #4
+ strcs r1, [r0], #4
+ strmi r1, [r0], #4
+.Lmemset_lt4:
+ movs r2, r2, lsl #31
+ strcsh r1, [r0], #2
+ strmib r1, [r0]
+.Lmemset_end:
+ pop {r0}
+ bx lr
+
+ .end
+#else /* !SCORPION_NEON_OPTIMIZATION */
+
/*
* Optimized memset() for ARM.
*
@@ -115,3 +201,4 @@ memset:
bx lr
.fnend
+#endif /* SCORPION_NEON_OPTIMIZATION */
diff --git a/libc/arch-arm/bionic/strlen-armv7.S b/libc/arch-arm/bionic/strlen-armv7.S
new file mode 100644
index 000000000..125e92fb8
--- /dev/null
+++ b/libc/arch-arm/bionic/strlen-armv7.S
@@ -0,0 +1,111 @@
+/* Copyright (c) 2010-2011, Linaro Limited
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Linaro Limited nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ Written by Dave Gilbert <david.gilbert@linaro.org>
+
+ This strlen routine is optimised on a Cortex-A9 and should work on
+ all ARMv7 processors. This routine is reasonably fast for short
+ strings, but is probably slower than a simple implementation if all
+ your strings are very short */
+
+@ 2011-02-08 david.gilbert@linaro.org
+@ Extracted from local git 6848613a
+
+
+@ this lets us check a flag in a 00/ff byte easily in either endianness
+#ifdef __ARMEB__
+#define CHARTSTMASK(c) 1<<(31-(c*8))
+#else
+#define CHARTSTMASK(c) 1<<(c*8)
+#endif
+
+@-----------------------------------------------------------------------------------------------------------------------------
+ .syntax unified
+ .arch armv7-a
+
+ .thumb_func
+ .align 2
+ .p2align 4,,15
+ .global strlen
+ .type strlen,%function
+strlen:
+ @ r0 = string
+ @ returns count of bytes in string not including terminator
+ mov r1, r0
+ push { r4,r6 }
+ mvns r6, #0 @ all F
+ movs r4, #0
+ tst r0, #7
+ beq 2f
+
+1:
+ ldrb r2, [r1], #1
+ tst r1, #7 @ Hit alignment yet?
+ cbz r2, 10f @ Exit if we found the 0
+ bne 1b
+
+ @ So we're now aligned
+2:
+ ldmia r1!,{r2,r3}
+ uadd8 r2, r2, r6 @ Parallel add 0xff - sets the GE bits for anything that wasn't 0
+ sel r2, r4, r6 @ bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION
+ uadd8 r3, r3, r6 @ Parallel add 0xff - sets the GE bits for anything that wasn't 0
+ sel r3, r2, r6 @ bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION
+ cmp r3, #0
+ beq 2b
+
+strlenendtmp:
+ @ One (or more) of the bytes we loaded was 0 - but which one?
+ @ r2 has the mask corresponding to the first loaded word
+ @ r3 has a combined mask of the two words - but if r2 was all-non 0
+ @ then it's just the 2nd words
+ cmp r2, #0
+ itte eq
+ moveq r2, r3 @ the end is in the 2nd word
+ subeq r1,r1,#3
+ subne r1,r1,#7
+
+ @ r1 currently points to the 2nd byte of the word containing the 0
+ tst r2, # CHARTSTMASK(0) @ 1st character
+ bne 10f
+ adds r1,r1,#1
+ tst r2, # CHARTSTMASK(1) @ 2nd character
+ ittt eq
+ addeq r1,r1,#1
+ tsteq r2, # (3<<15) @ 2nd & 3rd character
+ @ If not the 3rd must be the last one
+ addeq r1,r1,#1
+
+10:
+ @ r0 is still at the beginning, r1 is pointing 1 byte after the terminator
+ sub r0, r1, r0
+ subs r0, r0, #1
+ pop { r4, r6 }
+ bx lr
diff --git a/libc/arch-x86/bionic/__stack_chk_fail_local.S b/libc/arch-x86/bionic/__stack_chk_fail_local.S
new file mode 100644
index 000000000..59fe86ec3
--- /dev/null
+++ b/libc/arch-x86/bionic/__stack_chk_fail_local.S
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Contributed by: Intel Corporation
+ */
+
+ .text
+ .p2align 4,,15
+ .globl __stack_chk_fail_local
+ .hidden __stack_chk_fail_local
+ .type __stack_chk_fail_local, @function
+
+__stack_chk_fail_local:
+#ifdef __PIC__
+ pushl %ebx
+ call __x86.get_pc_thunk.bx
+ addl $_GLOBAL_OFFSET_TABLE_, %ebx
+ call __stack_chk_fail@PLT
+#else /* PIC */
+ jmp __stack_chk_fail
+#endif /* not PIC */
+
+ .size __stack_chk_fail_local, .-__stack_chk_fail_local
diff --git a/libc/arch-x86/bionic/atexit.S b/libc/arch-x86/bionic/atexit.S
new file mode 100644
index 000000000..b28f40bb6
--- /dev/null
+++ b/libc/arch-x86/bionic/atexit.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+ .text
+ .p2align 4,,15
+ .globl atexit
+ .hidden atexit
+ .type atexit, @function
+atexit:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ call __x86.get_pc_thunk.bx
+ addl $_GLOBAL_OFFSET_TABLE_, %ebx
+ subl $20, %esp
+ movl $0, 4(%esp)
+ movl __dso_handle@GOTOFF(%ebx), %eax
+ movl %eax, 8(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ call __cxa_atexit@PLT
+ addl $20, %esp
+ popl %ebx
+ popl %ebp
+ ret
+ .size atexit, .-atexit
+
+ .section .text.__x86.get_pc_thunk.bx,"axG",@progbits,__x86.get_pc_thunk.bx,comdat
+ .globl __x86.get_pc_thunk.bx
+ .hidden __x86.get_pc_thunk.bx
+ .type __x86.get_pc_thunk.bx, @function
+__x86.get_pc_thunk.bx:
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ movl (%esp), %ebx
+ ret
diff --git a/libc/arch-x86/bionic/atomics_x86.S b/libc/arch-x86/bionic/atomics_x86.S
index 666e1821c..e98a391ee 100644
--- a/libc/arch-x86/bionic/atomics_x86.S
+++ b/libc/arch-x86/bionic/atomics_x86.S
@@ -73,100 +73,3 @@ __futex_syscall4:
popl %esi
popl %ebx
ret
-
-/* int __atomic_cmpxchg(int old, int new, volatile int* addr) */
-
-.text
-.globl __atomic_cmpxchg
-.type __atomic_cmpxchg, @function
-.align 4
-__atomic_cmpxchg:
- mov 4(%esp), %eax /* old */
- mov 8(%esp), %ecx /* new */
- mov 12(%esp), %edx /* addr */
- lock cmpxchg %ecx, (%edx)
- jnz 1f
- xor %eax, %eax
- jmp 2f
-1:
- movl $1, %eax
-2:
- ret /* 0 == success, 1 == failure */
-
-
-/* int __atomic_swap(int new, volatile int* addr) */
-
-.text
-.globl __atomic_swap
-.type __atomic_swap, @function
-.align 4
-__atomic_swap:
- mov 4(%esp), %ecx /* new */
- mov 8(%esp), %edx /* addr */
- lock xchg %ecx, (%edx)
- mov %ecx, %eax
- ret
-
-
-/*
- * int __atomic_dec(volatile int* addr)
- *
- * My x86 asm is really rusty.. this is probably suboptimal
- */
-
-.text
-.globl __atomic_dec
-.type __atomic_dec, @function
-.align 4
-__atomic_dec:
- pushl %ebx
- pushl %esi
- movl 12(%esp), %ebx /* addr */
-
-1:
- movl (%ebx), %esi /* old = *addr */
- movl %esi, %edx
- subl $1, %edx /* new = old - 1 */
-
- pushl %ebx
- pushl %edx
- pushl %esi
- call __atomic_cmpxchg
- addl $12, %esp
- test %eax, %eax
- jnz 1b
-
- movl %esi, %eax /* return old */
- popl %esi
- popl %ebx
- ret
-
-
-.text
-/* int __atomic_inc(volatile int* addr) */
-.globl __atomic_inc
-.type __atomic_inc, @function
-.align 4
-__atomic_inc:
- pushl %ebx
- pushl %esi
- movl 12(%esp), %ebx /* addr */
-
-1:
- movl (%ebx), %esi /* old = *addr */
- movl %esi, %edx
- addl $1, %edx /* new = old + 1 */
-
- pushl %ebx
- pushl %edx
- pushl %esi
- call __atomic_cmpxchg
- addl $12, %esp
- test %eax, %eax
- jnz 1b
-
- movl %esi, %eax /* return old */
- popl %esi
- popl %ebx
- ret
-
diff --git a/libc/arch-x86/bionic/clone.S b/libc/arch-x86/bionic/clone.S
index 3b50cc3d3..b9b0957f6 100644
--- a/libc/arch-x86/bionic/clone.S
+++ b/libc/arch-x86/bionic/clone.S
@@ -20,8 +20,7 @@ __pthread_clone:
movl %eax, -12(%ecx)
movl 24(%esp), %eax
movl %eax, -8(%ecx)
- lea (%ecx), %eax
- movl %eax, -4(%ecx)
+ movl %ecx, -4(%ecx)
movl $__NR_clone, %eax
int $0x80
@@ -52,4 +51,4 @@ __pthread_clone:
/* XXX: TODO: Add __bionic_clone here
* See bionic/bionic_clone.c and arch-arm/bionic/clone.S
* for more details...
- */ \ No newline at end of file
+ */
diff --git a/libc/arch-x86/bionic/crtbegin_dynamic.S b/libc/arch-x86/bionic/crtbegin_dynamic.S
index 88e7e6a0a..9ba0d2f6c 100644
--- a/libc/arch-x86/bionic/crtbegin_dynamic.S
+++ b/libc/arch-x86/bionic/crtbegin_dynamic.S
@@ -66,14 +66,7 @@ _start:
1: .long __PREINIT_ARRAY__
.long __INIT_ARRAY__
.long __FINI_ARRAY__
- .long __CTOR_LIST__
-
-# the .ctors section contains a list of pointers to "constructor"
-# functions that need to be called in order during C library initialization,
-# just before the program is being run. This is a C++ requirement
-#
-# the last entry shall be 0, and is defined in crtend.S
-#
+
.section .preinit_array, "aw"
.globl __PREINIT_ARRAY__
__PREINIT_ARRAY__:
@@ -83,15 +76,62 @@ __PREINIT_ARRAY__:
.globl __INIT_ARRAY__
__INIT_ARRAY__:
.long -1
+ .long frame_dummy
.section .fini_array, "aw"
.globl __FINI_ARRAY__
__FINI_ARRAY__:
.long -1
+ .long __do_global_dtors_aux
- .section .ctors, "aw"
- .globl __CTOR_LIST__
-__CTOR_LIST__:
- .long -1
+ .section .eh_frame,"a",@progbits
+ .align 4
+ .type __EH_FRAME_BEGIN__, @object
+__EH_FRAME_BEGIN__:
+ .text
+ .p2align 4,,15
+ .type __do_global_dtors_aux, @function
+__do_global_dtors_aux:
+ pushl %ebp
+ movl %esp, %ebp
+ subl $24, %esp
+ cmpb $0, completed.4454
+ jne .L4
+ movl $__deregister_frame_info_bases, %eax
+ testl %eax, %eax
+ je .L3
+ movl $__EH_FRAME_BEGIN__, (%esp)
+ call __deregister_frame_info_bases
+.L3:
+ movb $1, completed.4454
+.L4:
+ leave
+ ret
+ .text
+ .p2align 4,,15
+ .type frame_dummy, @function
+frame_dummy:
+ pushl %ebp
+ movl $__register_frame_info_bases, %eax
+ movl %esp, %ebp
+ subl $24, %esp
+ testl %eax, %eax
+ je .L7
+ movl %ebx, 12(%esp)
+ movl $0, 8(%esp)
+ movl $object.4466, 4(%esp)
+ movl $__EH_FRAME_BEGIN__, (%esp)
+ call __register_frame_info_bases
+.L7:
+ leave
+ ret
+ .local completed.4454
+ .comm completed.4454,1,1
+ .local object.4466
+ .comm object.4466,24,4
+ .weak __register_frame_info_bases
+ .weak __deregister_frame_info_bases
#include "__dso_handle.S"
+#include "atexit.S"
+#include "__stack_chk_fail_local.S"
diff --git a/libc/arch-x86/bionic/crtbegin_so.S b/libc/arch-x86/bionic/crtbegin_so.S
index d879feff0..99662fe16 100644
--- a/libc/arch-x86/bionic/crtbegin_so.S
+++ b/libc/arch-x86/bionic/crtbegin_so.S
@@ -1,18 +1,30 @@
-# This function is to be called when the shared library
-# is unloaded through dlclose()
-_on_dlclose:
- lea __dso_handle, %eax
- call __cxa_finalize
- ret
-
-/* we put the _init() function here in case the user files for the shared
- * libs want to drop things into .init section.
- * We then will call our ctors from crtend_so.o */
-.section .init
-.align 4
-.type _init, @function
-.globl _init
-_init:
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
.section .init_array, "aw"
.align 4
@@ -20,6 +32,7 @@ _init:
.globl __INIT_ARRAY__
__INIT_ARRAY__:
.long -1
+ .long frame_dummy
.section .fini_array, "aw"
.align 4
@@ -27,13 +40,72 @@ __INIT_ARRAY__:
.globl __FINI_ARRAY__
__FINI_ARRAY__:
.long -1
- .long _on_dlclose
+ .long __do_global_dtors_aux
-.section .ctors, "aw"
-.align 4
-.type __CTOR_LIST__, @object
-.globl __CTOR_LIST__
-__CTOR_LIST__:
- .long -1
+ .section .eh_frame,"a",@progbits
+ .align 4
+ .type __EH_FRAME_BEGIN__, @object
+__EH_FRAME_BEGIN__:
+ .text
+ .p2align 4,,15
+ .type __do_global_dtors_aux, @function
+__do_global_dtors_aux:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ call __x86.get_pc_thunk.bx
+ addl $_GLOBAL_OFFSET_TABLE_, %ebx
+ subl $20, %esp
+ cmpb $0, completed.4454@GOTOFF(%ebx)
+ jne .L5
+ movl __dso_handle@GOTOFF(%ebx), %eax
+ movl %eax, (%esp)
+ call __cxa_finalize@PLT
+ movl __deregister_frame_info_bases@GOT(%ebx), %eax
+ testl %eax, %eax
+ je .L4
+ leal __EH_FRAME_BEGIN__@GOTOFF(%ebx), %eax
+ movl %eax, (%esp)
+ call __deregister_frame_info_bases@PLT
+.L4:
+ movb $1, completed.4454@GOTOFF(%ebx)
+.L5:
+ addl $20, %esp
+ popl %ebx
+ popl %ebp
+ ret
+ .text
+ .p2align 4,,15
+ .type frame_dummy, @function
+frame_dummy:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ call __x86.get_pc_thunk.bx
+ addl $_GLOBAL_OFFSET_TABLE_, %ebx
+ subl $20, %esp
+ movl __register_frame_info_bases@GOT(%ebx), %eax
+ testl %eax, %eax
+ je .L8
+ leal object.4469@GOTOFF(%ebx), %eax
+ movl %eax, 4(%esp)
+ leal __EH_FRAME_BEGIN__@GOTOFF(%ebx), %eax
+ movl %ebx, 12(%esp)
+ movl $0, 8(%esp)
+ movl %eax, (%esp)
+ call __register_frame_info_bases@PLT
+.L8:
+ addl $20, %esp
+ popl %ebx
+ popl %ebp
+ ret
+ .local completed.4454
+ .comm completed.4454,1,1
+ .local object.4469
+ .comm object.4469,24,4
+ .weak __register_frame_info_bases
+ .weak __deregister_frame_info_bases
-#include "__dso_handle.S"
+#include "__dso_handle_so.S"
+#include "atexit.S"
+#include "__stack_chk_fail_local.S"
diff --git a/libc/arch-x86/bionic/crtbegin_static.S b/libc/arch-x86/bionic/crtbegin_static.S
index 3f8446ec7..8e7033017 100644
--- a/libc/arch-x86/bionic/crtbegin_static.S
+++ b/libc/arch-x86/bionic/crtbegin_static.S
@@ -65,7 +65,6 @@ _start:
1: .long __PREINIT_ARRAY__
.long __INIT_ARRAY__
.long __FINI_ARRAY__
- .long __CTOR_LIST__
.section .preinit_array, "aw"
.globl __PREINIT_ARRAY__
@@ -76,15 +75,62 @@ __PREINIT_ARRAY__:
.globl __INIT_ARRAY__
__INIT_ARRAY__:
.long -1
+ .long frame_dummy
.section .fini_array, "aw"
.globl __FINI_ARRAY__
__FINI_ARRAY__:
.long -1
+ .long __do_global_dtors_aux
- .section .ctors, "aw"
- .globl __CTOR_LIST__
-__CTOR_LIST__:
- .long -1
+ .section .eh_frame,"a",@progbits
+ .align 4
+ .type __EH_FRAME_BEGIN__, @object
+__EH_FRAME_BEGIN__:
+ .text
+ .p2align 4,,15
+ .type __do_global_dtors_aux, @function
+__do_global_dtors_aux:
+ pushl %ebp
+ movl %esp, %ebp
+ subl $24, %esp
+ cmpb $0, completed.4454
+ jne .L4
+ movl $__deregister_frame_info_bases, %eax
+ testl %eax, %eax
+ je .L3
+ movl $__EH_FRAME_BEGIN__, (%esp)
+ call __deregister_frame_info_bases
+.L3:
+ movb $1, completed.4454
+.L4:
+ leave
+ ret
+ .text
+ .p2align 4,,15
+ .type frame_dummy, @function
+frame_dummy:
+ pushl %ebp
+ movl $__register_frame_info_bases, %eax
+ movl %esp, %ebp
+ subl $24, %esp
+ testl %eax, %eax
+ je .L7
+ movl %ebx, 12(%esp)
+ movl $0, 8(%esp)
+ movl $object.4466, 4(%esp)
+ movl $__EH_FRAME_BEGIN__, (%esp)
+ call __register_frame_info_bases
+.L7:
+ leave
+ ret
+ .local completed.4454
+ .comm completed.4454,1,1
+ .local object.4466
+ .comm object.4466,24,4
+ .weak __register_frame_info_bases
+ .weak __deregister_frame_info_bases
#include "__dso_handle.S"
+#include "atexit.S"
+#include "__stack_chk_fail_local.S"
diff --git a/libc/arch-x86/bionic/crtend.S b/libc/arch-x86/bionic/crtend.S
index 7f5fb6660..68447e7db 100644
--- a/libc/arch-x86/bionic/crtend.S
+++ b/libc/arch-x86/bionic/crtend.S
@@ -1,4 +1,3 @@
-
.section .preinit_array, "aw"
.long 0
@@ -8,6 +7,9 @@
.section .fini_array, "aw"
.long 0
- .section .ctors, "aw"
- .long 0
-
+ .section .eh_frame,"a",@progbits
+ .align 4
+ .type __FRAME_END__, @object
+ .size __FRAME_END__, 4
+__FRAME_END__:
+ .zero 4
diff --git a/libc/arch-x86/bionic/crtend_so.S b/libc/arch-x86/bionic/crtend_so.S
index 7fb228083..63e58b970 100644
--- a/libc/arch-x86/bionic/crtend_so.S
+++ b/libc/arch-x86/bionic/crtend_so.S
@@ -1,47 +1,12 @@
-.text
-.align 4
-.type __bionic_call_ctors, @function
-
-/*
- * The CTORS_LIST is marked by -1 (start) and 0 (end).
- * We mark the end of the .ctors section with the __CTOR_END__ section so
- * that we can just iterate backwards from it until we hit -1 and execute
- * all the function pointers. This seems to be the way to do it for SVR4
- * derived systems.
- */
-__bionic_call_ctors:
- pushl %esi
- mov $__CTOR_END__, %esi
-
-0:
- /* now grab the next function pointer and check if its -1. If not,
- * call it, otherwise we're done. We use %esi since it's callee saved.
- */
- subl $4, %esi
- mov (%esi), %eax
- cmp $0xffffffff, %eax
- je 1f
- call *%eax
- jmp 0b
-
-1:
- /* we're done */
- popl %esi
- ret
-
-.section .init
-.align 4
- call __bionic_call_ctors
- ret
-
-.section .ctors, "aw", @progbits
-.align 4
-.type __CTOR_END__, @object
-__CTOR_END__:
- .long 0
-
.section .init_array, "aw"
.long 0
.section .fini_array, "aw"
.long 0
+
+ .section .eh_frame,"a",@progbits
+ .align 4
+ .type __FRAME_END__, @object
+ .size __FRAME_END__, 4
+__FRAME_END__:
+ .zero 4
diff --git a/libc/arch-x86/include/endian.h b/libc/arch-x86/include/endian.h
index ad37919f3..4a705364e 100644
--- a/libc/arch-x86/include/endian.h
+++ b/libc/arch-x86/include/endian.h
@@ -31,14 +31,14 @@
#if defined(_KERNEL) && !defined(I386_CPU)
#define __swap32md(x) ({ \
- u_int32_t __swap32md_x = (x); \
+ uint32_t __swap32md_x = (x); \
\
__asm ("bswap %1" : "+r" (__swap32md_x)); \
__swap32md_x; \
})
#else
#define __swap32md(x) ({ \
- u_int32_t __swap32md_x = (x); \
+ uint32_t __swap32md_x = (x); \
\
__asm ("rorw $8, %w1; rorl $16, %1; rorw $8, %w1" : \
"+r" (__swap32md_x)); \
@@ -47,13 +47,13 @@
#endif /* _KERNEL && !I386_CPU */
#define __swap64md(x) ({ \
- u_int64_t __swap64md_x = (x); \
+ uint64_t __swap64md_x = (x); \
\
- (u_int64_t)__swap32md(__swap64md_x >> 32) | \
- (u_int64_t)__swap32md(__swap64md_x & 0xffffffff) << 32; \
+ (uint64_t)__swap32md(__swap64md_x >> 32) | \
+ (uint64_t)__swap32md(__swap64md_x & 0xffffffff) << 32; \
})
#define __swap16md(x) ({ \
- u_int16_t __swap16md_x = (x); \
+ uint16_t __swap16md_x = (x); \
\
__asm ("rorw $8, %w1" : "+r" (__swap16md_x)); \
__swap16md_x; \
diff --git a/libc/arch-x86/include/machine/_types.h b/libc/arch-x86/include/machine/_types.h
index be4f6e409..e9280a5da 100644
--- a/libc/arch-x86/include/machine/_types.h
+++ b/libc/arch-x86/include/machine/_types.h
@@ -36,8 +36,8 @@
#define _I386__TYPES_H_
/* the kernel defines size_t as unsigned int, but g++ wants it to be unsigned long */
-#ifndef _SIZE_T
-# define _SIZE_T
+#ifndef _SIZE_T_DEFINED_
+# define _SIZE_T_DEFINED_
# ifdef ANDROID
typedef unsigned int size_t;
# else
@@ -54,9 +54,6 @@ typedef long int ssize_t;
typedef long ptrdiff_t;
#endif
-#define _OFF_T_DEFINED_
-#define _SIZE_T_DEFINED_
-
#include <linux/types.h>
/* 7.18.1.1 Exact-width integer types */
diff --git a/libc/arch-x86/include/sys/atomics.h b/libc/arch-x86/include/sys/atomics.h
new file mode 100644
index 000000000..7aed3ae0b
--- /dev/null
+++ b/libc/arch-x86/include/sys/atomics.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef _SYS_ATOMICS_H
+#define _SYS_ATOMICS_H
+
+#include <sys/cdefs.h>
+#include <sys/time.h>
+
+__BEGIN_DECLS
+
+static inline __attribute__((always_inline)) int
+__atomic_cmpxchg(int old, int _new, volatile int *ptr)
+{
+ return !__sync_bool_compare_and_swap (ptr, old, _new);
+}
+
+static inline __attribute__((always_inline)) int
+__atomic_swap(int _new, volatile int *ptr)
+{
+ return __sync_lock_test_and_set(ptr, _new);
+}
+
+static inline __attribute__((always_inline)) int
+__atomic_dec(volatile int *ptr)
+{
+ return __sync_fetch_and_sub (ptr, 1);
+}
+
+static inline __attribute__((always_inline)) int
+__atomic_inc(volatile int *ptr)
+{
+ return __sync_fetch_and_add (ptr, 1);
+}
+
+int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
+int __futex_wake(volatile void *ftx, int count);
+
+__END_DECLS
+
+#endif /* _SYS_ATOMICS_H */
diff --git a/libc/arch-x86/string/memcmp_wrapper.S b/libc/arch-x86/string/memcmp_wrapper.S
index 7e28c1e7b..fa0c67259 100644
--- a/libc/arch-x86/string/memcmp_wrapper.S
+++ b/libc/arch-x86/string/memcmp_wrapper.S
@@ -31,7 +31,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if defined(USE_SSSE3)
# define MEMCMP memcmp
-# include "ssse3-memcmp3.S"
+# include "ssse3-memcmp3-new.S"
#else
diff --git a/libc/arch-x86/string/sse2-memset5-atom.S b/libc/arch-x86/string/sse2-memset5-atom.S
index 59a598c36..4b7f71bca 100644
--- a/libc/arch-x86/string/sse2-memset5-atom.S
+++ b/libc/arch-x86/string/sse2-memset5-atom.S
@@ -49,7 +49,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endif
#ifndef cfi_restore
-# define cfi_restore(reg) .cfi_restore (reg)
+# define cfi_restore(reg) .cfi_restore reg
#endif
#ifndef cfi_adjust_cfa_offset
@@ -285,7 +285,6 @@ L(32bytesormore):
pxor %xmm0, %xmm0
#else
movd %eax, %xmm0
- punpcklbw %xmm0, %xmm0
pshufd $0, %xmm0, %xmm0
#endif
testl $0xf, %edx
@@ -329,14 +328,17 @@ L(128bytesormore):
#ifdef DATA_CACHE_SIZE
POP (%ebx)
+# define RESTORE_EBX_STATE CFI_PUSH (%ebx)
cmp $DATA_CACHE_SIZE, %ecx
#else
# ifdef SHARED
+# define RESTORE_EBX_STATE
call __i686.get_pc_thunk.bx
add $_GLOBAL_OFFSET_TABLE_, %ebx
cmp __x86_data_cache_size@GOTOFF(%ebx), %ecx
# else
POP (%ebx)
+# define RESTORE_EBX_STATE CFI_PUSH (%ebx)
cmp __x86_data_cache_size, %ecx
# endif
#endif
@@ -370,7 +372,7 @@ L(128bytesormore_normal):
jae L(128bytesormore_normal)
L(128bytesless_normal):
- lea 128(%ecx), %ecx
+ add $128, %ecx
BRANCH_TO_JMPTBL_ENTRY (L(table_16_128bytes))
ALIGN (4)
@@ -393,8 +395,13 @@ L(128bytes_L2_normal):
L(128bytesless_L2_normal):
BRANCH_TO_JMPTBL_ENTRY (L(table_16_128bytes))
+ RESTORE_EBX_STATE
L(128bytesormore_nt_start):
sub %ebx, %ecx
+ mov %ebx, %eax
+ and $0x7f, %eax
+ add %eax, %ecx
+ movd %xmm0, %eax
ALIGN (4)
L(128bytesormore_shared_cache_loop):
prefetcht0 0x3c0(%edx)
diff --git a/libc/arch-x86/string/sse2-strlen-atom.S b/libc/arch-x86/string/sse2-strlen-atom.S
new file mode 100644
index 000000000..891186822
--- /dev/null
+++ b/libc/arch-x86/string/sse2-strlen-atom.S
@@ -0,0 +1,369 @@
+#define STRLEN sse2_strlen_atom
+
+#ifndef L
+# define L(label) .L##label
+#endif
+
+#ifndef cfi_startproc
+# define cfi_startproc .cfi_startproc
+#endif
+
+#ifndef cfi_endproc
+# define cfi_endproc .cfi_endproc
+#endif
+
+#ifndef cfi_rel_offset
+# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off
+#endif
+
+#ifndef cfi_restore
+# define cfi_restore(reg) .cfi_restore reg
+#endif
+
+#ifndef cfi_adjust_cfa_offset
+# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
+#endif
+
+#ifndef cfi_remember_state
+# define cfi_remember_state .cfi_remember_state
+#endif
+
+#ifndef cfi_restore_state
+# define cfi_restore_state .cfi_restore_state
+#endif
+
+#ifndef ENTRY
+# define ENTRY(name) \
+ .type name, @function; \
+ .globl name; \
+ .p2align 4; \
+name: \
+ cfi_startproc
+#endif
+
+#ifndef END
+# define END(name) \
+ cfi_endproc; \
+ .size name, .-name
+#endif
+
+#define CFI_PUSH(REG) \
+ cfi_adjust_cfa_offset (4); \
+ cfi_rel_offset (REG, 0)
+
+#define CFI_POP(REG) \
+ cfi_adjust_cfa_offset (-4); \
+ cfi_restore (REG)
+
+#define PUSH(REG) pushl REG; CFI_PUSH (REG)
+#define POP(REG) popl REG; CFI_POP (REG)
+#define PARMS 4
+#define STR PARMS
+#define ENTRANCE
+#define RETURN ret
+
+ .text
+ENTRY (STRLEN)
+ ENTRANCE
+ mov STR(%esp), %edx
+ xor %eax, %eax
+ cmpb $0, (%edx)
+ jz L(exit_tail0)
+ cmpb $0, 1(%edx)
+ jz L(exit_tail1)
+ cmpb $0, 2(%edx)
+ jz L(exit_tail2)
+ cmpb $0, 3(%edx)
+ jz L(exit_tail3)
+ cmpb $0, 4(%edx)
+ jz L(exit_tail4)
+ cmpb $0, 5(%edx)
+ jz L(exit_tail5)
+ cmpb $0, 6(%edx)
+ jz L(exit_tail6)
+ cmpb $0, 7(%edx)
+ jz L(exit_tail7)
+ cmpb $0, 8(%edx)
+ jz L(exit_tail8)
+ cmpb $0, 9(%edx)
+ jz L(exit_tail9)
+ cmpb $0, 10(%edx)
+ jz L(exit_tail10)
+ cmpb $0, 11(%edx)
+ jz L(exit_tail11)
+ cmpb $0, 12(%edx)
+ jz L(exit_tail12)
+ cmpb $0, 13(%edx)
+ jz L(exit_tail13)
+ cmpb $0, 14(%edx)
+ jz L(exit_tail14)
+ cmpb $0, 15(%edx)
+ jz L(exit_tail15)
+ pxor %xmm0, %xmm0
+ mov %edx, %eax
+ mov %edx, %ecx
+ and $-16, %eax
+ add $16, %ecx
+ add $16, %eax
+
+ pcmpeqb (%eax), %xmm0
+ pmovmskb %xmm0, %edx
+ pxor %xmm1, %xmm1
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm1
+ pmovmskb %xmm1, %edx
+ pxor %xmm2, %xmm2
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+
+ pcmpeqb (%eax), %xmm2
+ pmovmskb %xmm2, %edx
+ pxor %xmm3, %xmm3
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm3
+ pmovmskb %xmm3, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm0
+ pmovmskb %xmm0, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm1
+ pmovmskb %xmm1, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm2
+ pmovmskb %xmm2, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm3
+ pmovmskb %xmm3, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm0
+ pmovmskb %xmm0, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm1
+ pmovmskb %xmm1, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm2
+ pmovmskb %xmm2, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm3
+ pmovmskb %xmm3, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm0
+ pmovmskb %xmm0, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm1
+ pmovmskb %xmm1, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm2
+ pmovmskb %xmm2, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ pcmpeqb (%eax), %xmm3
+ pmovmskb %xmm3, %edx
+ test %edx, %edx
+ lea 16(%eax), %eax
+ jnz L(exit)
+
+ and $-0x40, %eax
+ PUSH (%esi)
+ PUSH (%edi)
+ PUSH (%ebx)
+ PUSH (%ebp)
+ xor %ebp, %ebp
+L(aligned_64):
+ pcmpeqb (%eax), %xmm0
+ pcmpeqb 16(%eax), %xmm1
+ pcmpeqb 32(%eax), %xmm2
+ pcmpeqb 48(%eax), %xmm3
+ pmovmskb %xmm0, %edx
+ pmovmskb %xmm1, %esi
+ pmovmskb %xmm2, %edi
+ pmovmskb %xmm3, %ebx
+ or %edx, %ebp
+ or %esi, %ebp
+ or %edi, %ebp
+ or %ebx, %ebp
+ lea 64(%eax), %eax
+ jz L(aligned_64)
+L(48leave):
+ test %edx, %edx
+ jnz L(aligned_64_exit_16)
+ test %esi, %esi
+ jnz L(aligned_64_exit_32)
+ test %edi, %edi
+ jnz L(aligned_64_exit_48)
+ mov %ebx, %edx
+ lea (%eax), %eax
+ jmp L(aligned_64_exit)
+L(aligned_64_exit_48):
+ lea -16(%eax), %eax
+ mov %edi, %edx
+ jmp L(aligned_64_exit)
+L(aligned_64_exit_32):
+ lea -32(%eax), %eax
+ mov %esi, %edx
+ jmp L(aligned_64_exit)
+L(aligned_64_exit_16):
+ lea -48(%eax), %eax
+L(aligned_64_exit):
+ POP (%ebp)
+ POP (%ebx)
+ POP (%edi)
+ POP (%esi)
+L(exit):
+ sub %ecx, %eax
+ test %dl, %dl
+ jz L(exit_high)
+ test $0x01, %dl
+ jnz L(exit_tail0)
+
+ test $0x02, %dl
+ jnz L(exit_tail1)
+
+ test $0x04, %dl
+ jnz L(exit_tail2)
+
+ test $0x08, %dl
+ jnz L(exit_tail3)
+
+ test $0x10, %dl
+ jnz L(exit_tail4)
+
+ test $0x20, %dl
+ jnz L(exit_tail5)
+
+ test $0x40, %dl
+ jnz L(exit_tail6)
+ add $7, %eax
+L(exit_tail0):
+ RETURN
+
+L(exit_high):
+ add $8, %eax
+ test $0x01, %dh
+ jnz L(exit_tail0)
+
+ test $0x02, %dh
+ jnz L(exit_tail1)
+
+ test $0x04, %dh
+ jnz L(exit_tail2)
+
+ test $0x08, %dh
+ jnz L(exit_tail3)
+
+ test $0x10, %dh
+ jnz L(exit_tail4)
+
+ test $0x20, %dh
+ jnz L(exit_tail5)
+
+ test $0x40, %dh
+ jnz L(exit_tail6)
+ add $7, %eax
+ RETURN
+
+ .p2align 4
+L(exit_tail1):
+ add $1, %eax
+ RETURN
+
+L(exit_tail2):
+ add $2, %eax
+ RETURN
+
+L(exit_tail3):
+ add $3, %eax
+ RETURN
+
+L(exit_tail4):
+ add $4, %eax
+ RETURN
+
+L(exit_tail5):
+ add $5, %eax
+ RETURN
+
+L(exit_tail6):
+ add $6, %eax
+ RETURN
+
+L(exit_tail7):
+ add $7, %eax
+ RETURN
+
+L(exit_tail8):
+ add $8, %eax
+ RETURN
+
+L(exit_tail9):
+ add $9, %eax
+ RETURN
+
+L(exit_tail10):
+ add $10, %eax
+ RETURN
+
+L(exit_tail11):
+ add $11, %eax
+ RETURN
+
+L(exit_tail12):
+ add $12, %eax
+ RETURN
+
+L(exit_tail13):
+ add $13, %eax
+ RETURN
+
+L(exit_tail14):
+ add $14, %eax
+ RETURN
+
+L(exit_tail15):
+ add $15, %eax
+ ret
+
+END (STRLEN)
diff --git a/libc/arch-x86/string/ssse3-memcmp3.S b/libc/arch-x86/string/ssse3-memcmp3-new.S
index a7ce819ff..5ad879150 100644
--- a/libc/arch-x86/string/ssse3-memcmp3.S
+++ b/libc/arch-x86/string/ssse3-memcmp3-new.S
@@ -53,13 +53,21 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endif
#ifndef cfi_restore
-# define cfi_restore(reg) .cfi_restore (reg)
+# define cfi_restore(reg) .cfi_restore reg
#endif
#ifndef cfi_adjust_cfa_offset
# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
#endif
+#ifndef cfi_remember_state
+# define cfi_remember_state .cfi_remember_state
+#endif
+
+#ifndef cfi_restore_state
+# define cfi_restore_state .cfi_restore_state
+#endif
+
#ifndef ENTRY
# define ENTRY(name) \
.type name, @function; \
@@ -91,8 +99,7 @@ name: \
#define BLK2 BLK1+4
#define LEN BLK2+4
#define RETURN_END POP (%edi); POP (%esi); POP (%ebx); ret
-#define RETURN RETURN_END; CFI_PUSH (%ebx); CFI_PUSH (%edi); \
- CFI_PUSH (%esi)
+#define RETURN RETURN_END; cfi_restore_state; cfi_remember_state
.section .text.ssse3,"ax",@progbits
ENTRY (MEMCMP)
@@ -131,6 +138,7 @@ L(48bytesormore):
PUSH (%ebx)
PUSH (%esi)
PUSH (%edi)
+ cfi_remember_state
movdqu (%eax), %xmm3
movdqu (%edx), %xmm0
movl %eax, %edi
@@ -211,8 +219,8 @@ L(shr_0):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_0_gobble):
lea -48(%ecx), %ecx
@@ -257,8 +265,8 @@ L(shr_0_gobble_loop_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_1):
cmp $80, %ecx
@@ -287,8 +295,8 @@ L(shr_1):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_1_gobble):
sub $32, %ecx
@@ -340,8 +348,8 @@ L(shr_1_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_2):
cmp $80, %ecx
@@ -370,8 +378,8 @@ L(shr_2):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_2_gobble):
sub $32, %ecx
@@ -423,8 +431,8 @@ L(shr_2_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_3):
cmp $80, %ecx
@@ -453,8 +461,8 @@ L(shr_3):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_3_gobble):
sub $32, %ecx
@@ -506,8 +514,8 @@ L(shr_3_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_4):
cmp $80, %ecx
@@ -536,8 +544,8 @@ L(shr_4):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_4_gobble):
sub $32, %ecx
@@ -589,8 +597,8 @@ L(shr_4_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_5):
cmp $80, %ecx
@@ -619,8 +627,8 @@ L(shr_5):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_5_gobble):
sub $32, %ecx
@@ -672,8 +680,8 @@ L(shr_5_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_6):
cmp $80, %ecx
@@ -702,8 +710,8 @@ L(shr_6):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_6_gobble):
sub $32, %ecx
@@ -755,8 +763,8 @@ L(shr_6_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_7):
cmp $80, %ecx
@@ -785,8 +793,8 @@ L(shr_7):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_7_gobble):
sub $32, %ecx
@@ -838,8 +846,8 @@ L(shr_7_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_8):
cmp $80, %ecx
@@ -868,8 +876,8 @@ L(shr_8):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_8_gobble):
sub $32, %ecx
@@ -921,8 +929,8 @@ L(shr_8_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_9):
cmp $80, %ecx
@@ -951,8 +959,8 @@ L(shr_9):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_9_gobble):
sub $32, %ecx
@@ -1004,8 +1012,8 @@ L(shr_9_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_10):
cmp $80, %ecx
@@ -1034,8 +1042,8 @@ L(shr_10):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_10_gobble):
sub $32, %ecx
@@ -1087,8 +1095,8 @@ L(shr_10_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_11):
cmp $80, %ecx
@@ -1117,8 +1125,8 @@ L(shr_11):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_11_gobble):
sub $32, %ecx
@@ -1170,8 +1178,8 @@ L(shr_11_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_12):
cmp $80, %ecx
@@ -1200,8 +1208,8 @@ L(shr_12):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_12_gobble):
sub $32, %ecx
@@ -1253,8 +1261,8 @@ L(shr_12_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_13):
cmp $80, %ecx
@@ -1283,8 +1291,8 @@ L(shr_13):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_13_gobble):
sub $32, %ecx
@@ -1336,8 +1344,8 @@ L(shr_13_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_14):
cmp $80, %ecx
@@ -1366,8 +1374,8 @@ L(shr_14):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_14_gobble):
sub $32, %ecx
@@ -1419,8 +1427,8 @@ L(shr_14_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_15):
cmp $80, %ecx
@@ -1449,8 +1457,8 @@ L(shr_15):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shr_15_gobble):
sub $32, %ecx
@@ -1502,8 +1510,8 @@ L(shr_15_gobble_next):
POP (%esi)
jmp L(less48bytes)
- CFI_PUSH (%esi)
- CFI_PUSH (%edi)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(exit):
pmovmskb %xmm1, %ebx
diff --git a/libc/arch-x86/string/ssse3-memcpy5.S b/libc/arch-x86/string/ssse3-memcpy5.S
index 6b9040266..b4773dfb1 100644
--- a/libc/arch-x86/string/ssse3-memcpy5.S
+++ b/libc/arch-x86/string/ssse3-memcpy5.S
@@ -53,13 +53,21 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endif
#ifndef cfi_restore
-# define cfi_restore(reg) .cfi_restore (reg)
+# define cfi_restore(reg) .cfi_restore reg
#endif
#ifndef cfi_adjust_cfa_offset
# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
#endif
+#ifndef cfi_remember_state
+# define cfi_remember_state .cfi_remember_state
+#endif
+
+#ifndef cfi_restore_state
+# define cfi_restore_state .cfi_restore_state
+#endif
+
#ifndef ENTRY
# define ENTRY(name) \
.type name, @function; \
@@ -118,8 +126,8 @@ name: \
jmp *%ebx
# define BRANCH_TO_JMPTBL_ENTRY_VALUE(TABLE) \
- addl $(TABLE - .), %ebx
-
+ addl $(TABLE - .), %ebx
+
# define BRANCH_TO_JMPTBL_ENTRY_TAIL(TABLE, INDEX, SCALE) \
addl (%ebx,INDEX,SCALE), %ebx; \
/* We loaded the jump table. Go. */ \
@@ -146,7 +154,7 @@ __i686.get_pc_thunk.bx:
# define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
jmp *TABLE(,INDEX,SCALE)
-# define BRANCH_TO_JMPTBL_ENTRY_VALUE(TABLE)
+# define BRANCH_TO_JMPTBL_ENTRY_VALUE(TABLE)
# define BRANCH_TO_JMPTBL_ENTRY_TAIL(TABLE, INDEX, SCALE) \
jmp *TABLE(,INDEX,SCALE)
@@ -198,6 +206,7 @@ L(48bytesormore):
movl %edx, %edi
and $-16, %edx
PUSH (%esi)
+ cfi_remember_state
add $16, %edx
movl %edi, %esi
sub %edx, %edi
@@ -223,6 +232,8 @@ L(48bytesormore):
BRANCH_TO_JMPTBL_ENTRY (L(shl_table), %edi, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_0):
movdqu %xmm0, (%esi)
@@ -270,6 +281,7 @@ L(shl_0_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY (L(table_48bytes_fwd), %ecx, 4)
+ CFI_PUSH (%edi)
L(shl_0_gobble):
#ifdef DATA_CACHE_SIZE_HALF
@@ -419,7 +431,8 @@ L(shl_0_mem_less_16bytes):
add %ecx, %eax
BRANCH_TO_JMPTBL_ENTRY (L(table_48bytes_fwd), %ecx, 4)
-
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_1):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -463,6 +476,8 @@ L(shl_1_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_2):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -506,6 +521,8 @@ L(shl_2_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_3):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -549,6 +566,8 @@ L(shl_3_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_4):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -592,6 +611,8 @@ L(shl_4_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_5):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -635,7 +656,8 @@ L(shl_5_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
-
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_6):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -679,6 +701,8 @@ L(shl_6_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_7):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -722,6 +746,8 @@ L(shl_7_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_8):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -765,6 +791,8 @@ L(shl_8_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_9):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -808,6 +836,8 @@ L(shl_9_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_10):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -851,6 +881,8 @@ L(shl_10_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_11):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -894,6 +926,8 @@ L(shl_11_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_12):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -937,6 +971,8 @@ L(shl_12_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_13):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -980,6 +1016,8 @@ L(shl_13_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_14):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -1023,7 +1061,8 @@ L(shl_14_end):
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
-
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(shl_15):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
@@ -1264,8 +1303,10 @@ L(fwd_write_3bytes):
movl DEST(%esp), %eax
# endif
#endif
- RETURN
+ RETURN_END
+ cfi_restore_state
+ cfi_remember_state
ALIGN (4)
L(large_page):
movdqu (%eax), %xmm1
@@ -1688,6 +1729,7 @@ L(bk_write_less32bytes):
L(bk_write_less32bytes_2):
BRANCH_TO_JMPTBL_ENTRY (L(table_48_bytes_bwd), %ecx, 4)
+ CFI_PUSH (%esi)
ALIGN (4)
L(bk_align):
cmp $8, %ecx
diff --git a/libc/arch-x86/string/ssse3-strcmp.S b/libc/arch-x86/string/ssse3-strcmp-latest.S
index cfb2e9ff2..69c6425be 100644
--- a/libc/arch-x86/string/ssse3-strcmp.S
+++ b/libc/arch-x86/string/ssse3-strcmp-latest.S
@@ -45,13 +45,21 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endif
#ifndef cfi_restore
-# define cfi_restore(reg) .cfi_restore (reg)
+# define cfi_restore(reg) .cfi_restore reg
#endif
#ifndef cfi_adjust_cfa_offset
# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
#endif
+#ifndef cfi_remember_state
+# define cfi_remember_state .cfi_remember_state
+#endif
+
+#ifndef cfi_restore_state
+# define cfi_restore_state .cfi_restore_state
+#endif
+
#ifndef ENTRY
# define ENTRY(name) \
.type name, @function; \
@@ -201,6 +209,9 @@ L(crosspage):
PUSH (%ebx)
PUSH (%edi)
PUSH (%esi)
+#ifdef USE_AS_STRNCMP
+ cfi_remember_state
+#endif
movl %edx, %edi
movl %eax, %ecx
@@ -1521,17 +1532,18 @@ L(gobble_ashr_12):
sub $0xffff, %esi
jnz L(exit)
+#ifdef USE_AS_STRNCMP
+ cmp $16, %ebp
+ lea -16(%ebp), %ebp
+ jbe L(more8byteseq)
+#endif
+
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_12)
-#ifdef USE_AS_STRNCMP
- cmp $16, %ebp
- lea -16(%ebp), %ebp
- jbe L(more8byteseq)
-#endif
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
@@ -2087,10 +2099,7 @@ L(neq_bigger):
RETURN
#ifdef USE_AS_STRNCMP
- CFI_PUSH (%ebx)
- CFI_PUSH (%edi)
- CFI_PUSH (%esi)
-
+ cfi_restore_state
.p2align 4
L(more8byteseq):
POP (%esi)
diff --git a/libc/arch-x86/string/strcmp_wrapper.S b/libc/arch-x86/string/strcmp_wrapper.S
index 69b7f0bed..20f3064e5 100644
--- a/libc/arch-x86/string/strcmp_wrapper.S
+++ b/libc/arch-x86/string/strcmp_wrapper.S
@@ -31,7 +31,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if defined(USE_SSSE3)
# define ssse3_strcmp_latest strcmp
-# include "ssse3-strcmp.S"
+# include "ssse3-strcmp-latest.S"
#else
diff --git a/libc/arch-x86/string/strlen_wrapper.S b/libc/arch-x86/string/strlen_wrapper.S
new file mode 100644
index 000000000..e62786b9d
--- /dev/null
+++ b/libc/arch-x86/string/strlen_wrapper.S
@@ -0,0 +1,40 @@
+/*
+Copyright (c) 2010, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+
+ * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#if defined(USE_SSE2)
+
+# define sse2_strlen_atom strlen
+# include "sse2-strlen-atom.S"
+
+#else
+
+# include "strlen.S"
+
+#endif
diff --git a/libc/arch-x86/string/strncmp_wrapper.S b/libc/arch-x86/string/strncmp_wrapper.S
index 205018420..191d7555e 100644
--- a/libc/arch-x86/string/strncmp_wrapper.S
+++ b/libc/arch-x86/string/strncmp_wrapper.S
@@ -32,7 +32,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# define USE_AS_STRNCMP
# define ssse3_strcmp_latest strncmp
-# include "ssse3-strcmp.S"
+# include "ssse3-strcmp-latest.S"
#else
diff --git a/libc/bionic/cpuacct.c b/libc/bionic/cpuacct.c
index 73170739e..1321d0eec 100644
--- a/libc/bionic/cpuacct.c
+++ b/libc/bionic/cpuacct.c
@@ -30,16 +30,19 @@
#include <errno.h>
#include <sys/stat.h>
#include "cpuacct.h"
+#include <fcntl.h>
int cpuacct_add(uid_t uid)
{
int count;
- FILE *fp;
+ int fd;
char buf[80];
+ ssize_t n;
+ int ret = 0;
count = snprintf(buf, sizeof(buf), "/acct/uid/%d/tasks", uid);
- fp = fopen(buf, "w+");
- if (!fp) {
+ fd = open(buf, O_RDWR | O_CREAT, 0666);
+ if (fd == -1) {
/* Note: sizeof("tasks") returns 6, which includes the NULL char */
buf[count - sizeof("tasks")] = 0;
if (mkdir(buf, 0775) < 0)
@@ -47,14 +50,19 @@ int cpuacct_add(uid_t uid)
/* Note: sizeof("tasks") returns 6, which includes the NULL char */
buf[count - sizeof("tasks")] = '/';
- fp = fopen(buf, "w+");
+ fd = open(buf, O_RDWR | O_CREAT, 0666);
}
- if (!fp)
+ if (fd == -1)
return -errno;
- fprintf(fp, "0");
- if (fclose(fp))
+ n = TEMP_FAILURE_RETRY(write(fd, "0", 1));
+ if (n < 0)
+ ret = -errno;
+ else if (n == 0)
+ ret = -EIO;
+
+ if (TEMP_FAILURE_RETRY(close(fd)) == -1)
return -errno;
- return 0;
+ return ret;
}
diff --git a/libc/bionic/dlmalloc.c b/libc/bionic/dlmalloc.c
index 19fbb7521..ff94e2972 100644
--- a/libc/bionic/dlmalloc.c
+++ b/libc/bionic/dlmalloc.c
@@ -465,6 +465,13 @@ DEFAULT_MMAP_THRESHOLD default: 256K
*/
+#ifdef ANDROID
+#define USE_BUILTIN_FFS 1
+#ifdef __arm__
+#include <machine/cpu-features.h>
+#endif
+#endif /* ANDROID */
+
#ifndef WIN32
#ifdef _WIN32
#define WIN32 1
@@ -2394,6 +2401,17 @@ static size_t traverse_and_check(mstate m);
}
#else /* GNUC */
+#if defined(__ARM_ARCH__) && __ARM_ARCH__ >= 7
+#define compute_bit2idx(X, I) \
+{ \
+ unsigned int J; \
+ __asm__ ("rbit %0, %1\n" \
+ "clz %0, %0" \
+ : "=r" (J) : "r" (X)); \
+ I = (bindex_t) J; \
+}
+
+#else /* ARM_ARCH */
#if USE_BUILTIN_FFS
#define compute_bit2idx(X, I) I = ffs(X)-1
@@ -2409,6 +2427,7 @@ static size_t traverse_and_check(mstate m);
N += K = Y >> (1-0) & 1; Y >>= K;\
I = (bindex_t)(N + Y);\
}
+#endif /* ARM_ARCH */
#endif /* USE_BUILTIN_FFS */
#endif /* GNUC */
@@ -4542,8 +4561,25 @@ void dlmalloc_stats() {
size_t dlmalloc_usable_size(void* mem) {
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
- if (cinuse(p))
- return chunksize(p) - overhead_for(p);
+
+#if FOOTERS
+ mstate fm = get_mstate_for(p);
+ if (!ok_magic(fm)) {
+ USAGE_ERROR_ACTION(fm, p);
+ return;
+ }
+#else /* FOOTERS */
+#define fm gm
+#endif
+
+ if (!PREACTION(fm)) {
+ if (cinuse(p)) {
+ size_t ret = chunksize(p) - overhead_for(p);
+ POSTACTION(fm);
+ return ret;
+ }
+ POSTACTION(fm);
+ }
}
return 0;
}
@@ -5147,32 +5183,35 @@ void dlmalloc_walk_heap(void(*handler)(const void *chunkptr, size_t chunklen,
mstate m = (mstate)gm;
#endif
- s = &m->seg;
- while (s != 0) {
- mchunkptr p = align_as_chunk(s->base);
- while (segment_holds(s, p) &&
- p != m->top && p->head != FENCEPOST_HEAD) {
- void *chunkptr, *userptr;
- size_t chunklen, userlen;
- chunkptr = p;
- chunklen = chunksize(p);
- if (cinuse(p)) {
- userptr = chunk2mem(p);
- userlen = chunklen - overhead_for(p);
+ if (!PREACTION(m)) {
+ s = &m->seg;
+ while (s != 0) {
+ mchunkptr p = align_as_chunk(s->base);
+ while (segment_holds(s, p) &&
+ p != m->top && p->head != FENCEPOST_HEAD) {
+ void *chunkptr, *userptr;
+ size_t chunklen, userlen;
+ chunkptr = p;
+ chunklen = chunksize(p);
+ if (cinuse(p)) {
+ userptr = chunk2mem(p);
+ userlen = chunklen - overhead_for(p);
+ }
+ else {
+ userptr = NULL;
+ userlen = 0;
+ }
+ handler(chunkptr, chunklen, userptr, userlen, harg);
+ p = next_chunk(p);
}
- else {
- userptr = NULL;
- userlen = 0;
+ if (p == m->top) {
+ /* The top chunk is just a big free chunk for our purposes.
+ */
+ handler(m->top, m->topsize, NULL, 0, harg);
}
- handler(chunkptr, chunklen, userptr, userlen, harg);
- p = next_chunk(p);
- }
- if (p == m->top) {
- /* The top chunk is just a big free chunk for our purposes.
- */
- handler(m->top, m->topsize, NULL, 0, harg);
+ s = s->next;
}
- s = s->next;
+ POSTACTION(m);
}
}
diff --git a/libc/bionic/libc_init_common.h b/libc/bionic/libc_init_common.h
index 6016d4d2d..35050a1da 100644
--- a/libc/bionic/libc_init_common.h
+++ b/libc/bionic/libc_init_common.h
@@ -35,7 +35,9 @@ typedef struct
void (**preinit_array)(void);
void (**init_array)(void);
void (**fini_array)(void);
+#ifndef __i386__
void (**ctors_array)(void);
+#endif
} structors_array_t;
extern void __libc_init_common(uintptr_t *elfdata);
diff --git a/libc/bionic/libc_init_static.c b/libc/bionic/libc_init_static.c
index 3634c7ba8..a2c11a9d0 100644
--- a/libc/bionic/libc_init_static.c
+++ b/libc/bionic/libc_init_static.c
@@ -75,8 +75,10 @@ __noreturn void __libc_init(uintptr_t *elfdata,
/* pre-init array. */
call_array(structors->preinit_array);
+#ifndef __i386__
/* .ctors section initializers, for non-arm-eabi ABIs */
call_array(structors->ctors_array);
+#endif
// call static constructors
call_array(structors->init_array);
diff --git a/libc/bionic/pthread.c b/libc/bionic/pthread.c
index fcccbed89..6b0183074 100644
--- a/libc/bionic/pthread.c
+++ b/libc/bionic/pthread.c
@@ -68,7 +68,13 @@ int __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct time
#define __likely(cond) __builtin_expect(!!(cond), 1)
#define __unlikely(cond) __builtin_expect(!!(cond), 0)
-void _thread_created_hook(pid_t thread_id) __attribute__((noinline));
+#ifdef __i386__
+#define ATTRIBUTES __attribute__((noinline)) __attribute__((fastcall))
+#else
+#define ATTRIBUTES __attribute__((noinline))
+#endif
+
+void ATTRIBUTES _thread_created_hook(pid_t thread_id);
#define PTHREAD_ATTR_FLAG_DETACHED 0x00000001
#define PTHREAD_ATTR_FLAG_USER_STACK 0x00000002
diff --git a/libc/bionic/ptrace.c b/libc/bionic/ptrace.c
index b1ca00ccd..0bb1acd78 100644
--- a/libc/bionic/ptrace.c
+++ b/libc/bionic/ptrace.c
@@ -1,63 +1,68 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-#include <sys/types.h>
-#include <sys/ptrace.h>
-
-extern long __ptrace(int request, pid_t pid, void *addr, void *data);
-
-long ptrace(int request, pid_t pid, void * addr, void * data)
-{
- switch (request) {
- case PTRACE_PEEKUSR:
- case PTRACE_PEEKTEXT:
- case PTRACE_PEEKDATA:
- {
- long word;
- long ret;
-
- ret = __ptrace(request, pid, addr, &word);
- if (ret == 0) {
- return word;
- } else {
- // __ptrace will set errno for us
- return -1;
- }
- }
-
- default:
- return __ptrace(request, pid, addr, data);
- }
-}
-
-/*
- * Hook for gdb to get notified when a thread is created
- */
-void _thread_created_hook(pid_t thread_id) __attribute__((noinline));
-void _thread_created_hook(pid_t thread_id)
-{
-}
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <sys/types.h>
+#include <sys/ptrace.h>
+
+extern long __ptrace(int request, pid_t pid, void *addr, void *data);
+
+long ptrace(int request, pid_t pid, void * addr, void * data)
+{
+ switch (request) {
+ case PTRACE_PEEKUSR:
+ case PTRACE_PEEKTEXT:
+ case PTRACE_PEEKDATA:
+ {
+ long word;
+ long ret;
+
+ ret = __ptrace(request, pid, addr, &word);
+ if (ret == 0) {
+ return word;
+ } else {
+ // __ptrace will set errno for us
+ return -1;
+ }
+ }
+
+ default:
+ return __ptrace(request, pid, addr, data);
+ }
+}
+
+/*
+ * Hook for gdb to get notified when a thread is created
+ */
+#ifdef __i386__
+#define ATTRIBUTES __attribute__((noinline)) __attribute__((fastcall))
+#else
+#define ATTRIBUTES __attribute__((noinline))
+#endif
+
+void ATTRIBUTES _thread_created_hook(pid_t thread_id)
+{
+}
diff --git a/libc/bionic/sha1.c b/libc/bionic/sha1.c
index efa95a55c..a4fbd673b 100644
--- a/libc/bionic/sha1.c
+++ b/libc/bionic/sha1.c
@@ -22,10 +22,7 @@
#include <assert.h>
#include <sha1.h>
#include <string.h>
-
-#if HAVE_NBTOOL_CONFIG_H
-#include "nbtool_config.h"
-#endif
+#include <endian.h>
#if !HAVE_SHA1_H
@@ -36,8 +33,7 @@
* I got the idea of expanding during the round function from SSLeay
*/
#if BYTE_ORDER == LITTLE_ENDIAN
-# define blk0(i) (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \
- |(rol(block->l[i],8)&0x00FF00FF))
+# define blk0(i) swap32(block->l[i])
#else
# define blk0(i) block->l[i]
#endif
@@ -54,77 +50,17 @@
#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
typedef union {
- u_char c[64];
- u_int l[16];
+ uint8_t c[SHA1_BLOCK_SIZE];
+ uint32_t l[SHA1_BLOCK_SIZE/4];
} CHAR64LONG16;
-/* old sparc64 gcc could not compile this */
-#undef SPARC64_GCC_WORKAROUND
-#if defined(__sparc64__) && defined(__GNUC__) && __GNUC__ < 3
-#define SPARC64_GCC_WORKAROUND
-#endif
-
-#ifdef SPARC64_GCC_WORKAROUND
-void do_R01(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *);
-void do_R2(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *);
-void do_R3(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *);
-void do_R4(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *);
-
-#define nR0(v,w,x,y,z,i) R0(*v,*w,*x,*y,*z,i)
-#define nR1(v,w,x,y,z,i) R1(*v,*w,*x,*y,*z,i)
-#define nR2(v,w,x,y,z,i) R2(*v,*w,*x,*y,*z,i)
-#define nR3(v,w,x,y,z,i) R3(*v,*w,*x,*y,*z,i)
-#define nR4(v,w,x,y,z,i) R4(*v,*w,*x,*y,*z,i)
-
-void
-do_R01(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *block)
-{
- nR0(a,b,c,d,e, 0); nR0(e,a,b,c,d, 1); nR0(d,e,a,b,c, 2); nR0(c,d,e,a,b, 3);
- nR0(b,c,d,e,a, 4); nR0(a,b,c,d,e, 5); nR0(e,a,b,c,d, 6); nR0(d,e,a,b,c, 7);
- nR0(c,d,e,a,b, 8); nR0(b,c,d,e,a, 9); nR0(a,b,c,d,e,10); nR0(e,a,b,c,d,11);
- nR0(d,e,a,b,c,12); nR0(c,d,e,a,b,13); nR0(b,c,d,e,a,14); nR0(a,b,c,d,e,15);
- nR1(e,a,b,c,d,16); nR1(d,e,a,b,c,17); nR1(c,d,e,a,b,18); nR1(b,c,d,e,a,19);
-}
-
-void
-do_R2(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *block)
-{
- nR2(a,b,c,d,e,20); nR2(e,a,b,c,d,21); nR2(d,e,a,b,c,22); nR2(c,d,e,a,b,23);
- nR2(b,c,d,e,a,24); nR2(a,b,c,d,e,25); nR2(e,a,b,c,d,26); nR2(d,e,a,b,c,27);
- nR2(c,d,e,a,b,28); nR2(b,c,d,e,a,29); nR2(a,b,c,d,e,30); nR2(e,a,b,c,d,31);
- nR2(d,e,a,b,c,32); nR2(c,d,e,a,b,33); nR2(b,c,d,e,a,34); nR2(a,b,c,d,e,35);
- nR2(e,a,b,c,d,36); nR2(d,e,a,b,c,37); nR2(c,d,e,a,b,38); nR2(b,c,d,e,a,39);
-}
-
-void
-do_R3(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *block)
-{
- nR3(a,b,c,d,e,40); nR3(e,a,b,c,d,41); nR3(d,e,a,b,c,42); nR3(c,d,e,a,b,43);
- nR3(b,c,d,e,a,44); nR3(a,b,c,d,e,45); nR3(e,a,b,c,d,46); nR3(d,e,a,b,c,47);
- nR3(c,d,e,a,b,48); nR3(b,c,d,e,a,49); nR3(a,b,c,d,e,50); nR3(e,a,b,c,d,51);
- nR3(d,e,a,b,c,52); nR3(c,d,e,a,b,53); nR3(b,c,d,e,a,54); nR3(a,b,c,d,e,55);
- nR3(e,a,b,c,d,56); nR3(d,e,a,b,c,57); nR3(c,d,e,a,b,58); nR3(b,c,d,e,a,59);
-}
-
-void
-do_R4(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *block)
-{
- nR4(a,b,c,d,e,60); nR4(e,a,b,c,d,61); nR4(d,e,a,b,c,62); nR4(c,d,e,a,b,63);
- nR4(b,c,d,e,a,64); nR4(a,b,c,d,e,65); nR4(e,a,b,c,d,66); nR4(d,e,a,b,c,67);
- nR4(c,d,e,a,b,68); nR4(b,c,d,e,a,69); nR4(a,b,c,d,e,70); nR4(e,a,b,c,d,71);
- nR4(d,e,a,b,c,72); nR4(c,d,e,a,b,73); nR4(b,c,d,e,a,74); nR4(a,b,c,d,e,75);
- nR4(e,a,b,c,d,76); nR4(d,e,a,b,c,77); nR4(c,d,e,a,b,78); nR4(b,c,d,e,a,79);
-}
-#endif
-
/*
* Hash a single 512-bit block. This is the core of the algorithm.
*/
-void SHA1Transform(state, buffer)
- u_int32_t state[5];
- const u_char buffer[64];
+void SHA1Transform(uint32_t state[SHA1_DIGEST_LENGTH/4],
+ const uint8_t buffer[SHA1_BLOCK_SIZE])
{
- u_int32_t a, b, c, d, e;
+ uint32_t a, b, c, d, e;
CHAR64LONG16 *block;
#ifdef SHA1HANDSOFF
@@ -136,7 +72,7 @@ void SHA1Transform(state, buffer)
#ifdef SHA1HANDSOFF
block = &workspace;
- (void)memcpy(block, buffer, 64);
+ (void)memcpy(block, buffer, SHA1_BLOCK_SIZE);
#else
block = (CHAR64LONG16 *)(void *)buffer;
#endif
@@ -148,12 +84,6 @@ void SHA1Transform(state, buffer)
d = state[3];
e = state[4];
-#ifdef SPARC64_GCC_WORKAROUND
- do_R01(&a, &b, &c, &d, &e, block);
- do_R2(&a, &b, &c, &d, &e, block);
- do_R3(&a, &b, &c, &d, &e, block);
- do_R4(&a, &b, &c, &d, &e, block);
-#else
/* 4 rounds of 20 operations each. Loop unrolled. */
R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
@@ -175,7 +105,6 @@ void SHA1Transform(state, buffer)
R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
-#endif
/* Add the working vars back into context.state[] */
state[0] += a;
@@ -192,78 +121,91 @@ void SHA1Transform(state, buffer)
/*
* SHA1Init - Initialize new context
*/
-void SHA1Init(context)
- SHA1_CTX *context;
+void SHA1Init(SHA1_CTX *context)
{
-
assert(context != 0);
/* SHA1 initialization constants */
- context->state[0] = 0x67452301;
- context->state[1] = 0xEFCDAB89;
- context->state[2] = 0x98BADCFE;
- context->state[3] = 0x10325476;
- context->state[4] = 0xC3D2E1F0;
- context->count[0] = context->count[1] = 0;
+ *context = (SHA1_CTX) {
+ .state = {
+ 0x67452301,
+ 0xEFCDAB89,
+ 0x98BADCFE,
+ 0x10325476,
+ 0xC3D2E1F0,
+ },
+ .count = 0,
+ };
}
/*
* Run your data through this.
*/
-void SHA1Update(context, data, len)
- SHA1_CTX *context;
- const u_char *data;
- u_int len;
+void SHA1Update(SHA1_CTX *context, const uint8_t *data, unsigned int len)
{
- u_int i, j;
+ unsigned int i, j;
+ unsigned int partial, done;
+ const uint8_t *src;
assert(context != 0);
assert(data != 0);
- j = context->count[0];
- if ((context->count[0] += len << 3) < j)
- context->count[1] += (len>>29)+1;
- j = (j >> 3) & 63;
- if ((j + len) > 63) {
- (void)memcpy(&context->buffer[j], data, (i = 64-j));
- SHA1Transform(context->state, context->buffer);
- for ( ; i + 63 < len; i += 64)
- SHA1Transform(context->state, &data[i]);
- j = 0;
- } else {
- i = 0;
+ partial = context->count % SHA1_BLOCK_SIZE;
+ context->count += len;
+ done = 0;
+ src = data;
+
+ if ((partial + len) >= SHA1_BLOCK_SIZE) {
+ if (partial) {
+ done = -partial;
+ memcpy(context->buffer + partial, data, done + SHA1_BLOCK_SIZE);
+ src = context->buffer;
+ }
+ do {
+ SHA1Transform(context->state, src);
+ done += SHA1_BLOCK_SIZE;
+ src = data + done;
+ } while (done + SHA1_BLOCK_SIZE <= len);
+ partial = 0;
}
- (void)memcpy(&context->buffer[j], &data[i], len - i);
+ memcpy(context->buffer + partial, src, len - done);
}
/*
* Add padding and return the message digest.
*/
-void SHA1Final(digest, context)
- u_char digest[20];
- SHA1_CTX* context;
+void SHA1Final(uint8_t digest[SHA1_DIGEST_LENGTH], SHA1_CTX *context)
{
- u_int i;
- u_char finalcount[8];
+ uint32_t i, index, pad_len;
+ uint64_t bits;
+ static const uint8_t padding[SHA1_BLOCK_SIZE] = { 0x80, };
assert(digest != 0);
assert(context != 0);
- for (i = 0; i < 8; i++) {
- finalcount[i] = (u_char)((context->count[(i >= 4 ? 0 : 1)]
- >> ((3-(i & 3)) * 8) ) & 255); /* Endian independent */
- }
- SHA1Update(context, (const u_char *)"\200", 1);
- while ((context->count[0] & 504) != 448)
- SHA1Update(context, (const u_char *)"\0", 1);
- SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform() */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ bits = swap64(context->count << 3);
+#else
+ bits = context->count << 3;
+#endif
+
+ /* Pad out to 56 mod 64 */
+ index = context->count & 0x3f;
+ pad_len = (index < 56) ? (56 - index) : ((64 + 56) - index);
+ SHA1Update(context, padding, pad_len);
+
+ /* Append length */
+ SHA1Update(context, (const uint8_t *)&bits, sizeof(bits));
if (digest) {
- for (i = 0; i < 20; i++)
- digest[i] = (u_char)
- ((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
+ for (i = 0; i < SHA1_DIGEST_LENGTH/4; i++)
+#if BYTE_ORDER == LITTLE_ENDIAN
+ ((uint32_t *)digest)[i] = swap32(context->state[i]);
+#else
+ ((uint32_t *)digest)[i] = context->state[i];
+#endif
}
}
diff --git a/libc/include/errno.h b/libc/include/errno.h
index 2b2685af4..cae0e3b78 100644
--- a/libc/include/errno.h
+++ b/libc/include/errno.h
@@ -45,6 +45,7 @@ __BEGIN_DECLS
extern int __set_errno(int error);
/* internal function returning the address of the thread-specific errno */
+__attribute__((const))
extern volatile int* __errno(void);
/* a macro expanding to the errno l-value */
diff --git a/libc/include/pthread.h b/libc/include/pthread.h
index f4591f1a0..6407a1991 100644
--- a/libc/include/pthread.h
+++ b/libc/include/pthread.h
@@ -146,6 +146,7 @@ void pthread_exit(void * retval);
int pthread_join(pthread_t thid, void ** ret_val);
int pthread_detach(pthread_t thid);
+__attribute__((const))
pthread_t pthread_self(void);
int pthread_equal(pthread_t one, pthread_t two);
diff --git a/libc/include/resolv.h b/libc/include/resolv.h
index 4247d68b7..7257d343d 100644
--- a/libc/include/resolv.h
+++ b/libc/include/resolv.h
@@ -34,12 +34,13 @@
#include <sys/socket.h>
#include <stdio.h>
#include <arpa/nameser.h>
+#include <netinet/in.h>
__BEGIN_DECLS
struct res_state;
-extern struct __res_state *__res_state(void);
+extern struct __res_state *__res_state(void) __attribute__((const));
#define _res (*__res_state())
/* Base-64 functions - because some code expects it there */
@@ -49,6 +50,21 @@ extern struct __res_state *__res_state(void);
extern int b64_ntop(u_char const *, size_t, char *, size_t);
extern int b64_pton(char const *, u_char *, size_t);
+/* Set name of default interface */
+extern void _resolv_set_default_iface(const char* ifname);
+
+/* set name servers for an interface */
+extern void _resolv_set_nameservers_for_iface(const char* ifname, char** servers, int numservers);
+
+/* tell resolver of the address of an interface */
+extern void _resolv_set_addr_of_iface(const char* ifname, struct in_addr* addr);
+
+/* flush the cache associated with the default interface */
+extern void _resolv_flush_cache_for_default_iface();
+
+/* flush the cache associated with a certain interface */
+extern void _resolv_flush_cache_for_iface(const char* ifname);
+
__END_DECLS
#endif /* _RESOLV_H_ */
diff --git a/libc/include/sha1.h b/libc/include/sha1.h
index f7ada46a5..bc51ac0c2 100644
--- a/libc/include/sha1.h
+++ b/libc/include/sha1.h
@@ -13,19 +13,20 @@
#include <sys/types.h>
#define SHA1_DIGEST_LENGTH 20
-#define SHA1_DIGEST_STRING_LENGTH 41
+#define SHA1_BLOCK_SIZE 64
typedef struct {
- uint32_t state[5];
- uint32_t count[2];
- u_char buffer[64];
+ uint64_t count;
+ uint32_t state[SHA1_DIGEST_LENGTH / 4];
+ uint8_t buffer[SHA1_BLOCK_SIZE];
} SHA1_CTX;
__BEGIN_DECLS
-void SHA1Transform(uint32_t[5], const u_char[64]);
+void SHA1Transform(uint32_t[SHA1_DIGEST_LENGTH/4],
+ const uint8_t[SHA1_BLOCK_SIZE]);
void SHA1Init(SHA1_CTX *);
-void SHA1Update(SHA1_CTX *, const u_char *, u_int);
-void SHA1Final(u_char[SHA1_DIGEST_LENGTH], SHA1_CTX *);
+void SHA1Update(SHA1_CTX *, const uint8_t *, unsigned int);
+void SHA1Final(uint8_t[SHA1_DIGEST_LENGTH], SHA1_CTX *);
__END_DECLS
#endif /* _SYS_SHA1_H_ */
diff --git a/libc/include/sys/_system_properties.h b/libc/include/sys/_system_properties.h
index 42a7f6c0f..95652c322 100644
--- a/libc/include/sys/_system_properties.h
+++ b/libc/include/sys/_system_properties.h
@@ -74,7 +74,8 @@ struct prop_msg
};
#define PROP_MSG_SETPROP 1
-
+#define PROP_MSG_SETPROP_SYNC 2
+
/*
** Rules:
**
diff --git a/libc/kernel/common/linux/if_ether.h b/libc/kernel/common/linux/if_ether.h
index 1ba7a9928..7e235a54d 100644
--- a/libc/kernel/common/linux/if_ether.h
+++ b/libc/kernel/common/linux/if_ether.h
@@ -84,6 +84,7 @@
#define ETH_P_TRAILER 0x001C
#define ETH_P_PHONET 0x00F5
#define ETH_P_IEEE802154 0x00F6
+#define ETH_P_CAIF 0x00F7
struct ethhdr {
unsigned char h_dest[ETH_ALEN];
diff --git a/libc/kernel/common/linux/socket.h b/libc/kernel/common/linux/socket.h
index b578df943..c30dae0d4 100644
--- a/libc/kernel/common/linux/socket.h
+++ b/libc/kernel/common/linux/socket.h
@@ -138,7 +138,8 @@ struct ucred {
#define AF_LLC 26
#define AF_TIPC 30
#define AF_BLUETOOTH 31
-#define AF_MAX 32
+#define AF_CAIF 38
+#define AF_MAX 39
#define PF_UNSPEC AF_UNSPEC
#define PF_UNIX AF_UNIX
@@ -170,6 +171,7 @@ struct ucred {
#define PF_LLC AF_LLC
#define PF_TIPC AF_TIPC
#define PF_BLUETOOTH AF_BLUETOOTH
+#define PF_CAIF AF_CAIF
#define PF_MAX AF_MAX
#define SOMAXCONN 128
diff --git a/libc/kernel/common/linux/tty.h b/libc/kernel/common/linux/tty.h
index b28791c76..b1f2eab83 100644
--- a/libc/kernel/common/linux/tty.h
+++ b/libc/kernel/common/linux/tty.h
@@ -12,4 +12,6 @@
#ifndef _LINUX_TTY_H
#define _LINUX_TTY_H
+#define N_CAIF 20
+
#endif
diff --git a/libc/kernel/common/linux/videodev2.h b/libc/kernel/common/linux/videodev2.h
index 3a91510a2..5e52ce48e 100644
--- a/libc/kernel/common/linux/videodev2.h
+++ b/libc/kernel/common/linux/videodev2.h
@@ -413,6 +413,9 @@ struct v4l2_window {
__u32 clipcount;
void __user *bitmap;
__u8 global_alpha;
+#ifdef OMAP_ENHANCEMENT
+ __u8 zorder;
+#endif
};
struct v4l2_captureparm {
diff --git a/libc/netbsd/net/dnsproxyd_lock.h b/libc/netbsd/net/dnsproxyd_lock.h
new file mode 100644
index 000000000..544b6f8df
--- /dev/null
+++ b/libc/netbsd/net/dnsproxyd_lock.h
@@ -0,0 +1,15 @@
+#ifndef __DNSPROXYD_LOCK
+#define __DNSPROXYD_LOCK
+
+#include <pthread.h>
+
+/*
+ * Since there can be multiple processes and threads trying to call getaddrinfo
+ * and gethostbyaddr, but the proxy tries to use /dev/socket/dnsproxyd, this
+ * mutex should be used to prevent these threads from using the same thread at
+ * the same time.
+ */
+
+static pthread_mutex_t dnsproxyd_lock = PTHREAD_MUTEX_INITIALIZER;
+
+#endif
diff --git a/libc/netbsd/net/getaddrinfo.c b/libc/netbsd/net/getaddrinfo.c
index e4d8c56a7..bea1c81d8 100644
--- a/libc/netbsd/net/getaddrinfo.c
+++ b/libc/netbsd/net/getaddrinfo.c
@@ -77,10 +77,14 @@
* friends.
*/
+#include <pthread.h>
+#include <fcntl.h>
#include <sys/cdefs.h>
#include <sys/types.h>
+#include <sys/stat.h>
#include <sys/param.h>
#include <sys/socket.h>
+#include <sys/un.h>
#include <net/if.h>
#include <netinet/in.h>
#include <arpa/inet.h>
@@ -99,6 +103,11 @@
#include <syslog.h>
#include <stdarg.h>
#include "nsswitch.h"
+#include "dnsproxyd_lock.h"
+
+#ifdef ANDROID_CHANGES
+#include <sys/system_properties.h>
+#endif /* ANDROID_CHANGES */
typedef union sockaddr_union {
struct sockaddr generic;
@@ -391,6 +400,200 @@ _have_ipv4() {
return _test_connect(PF_INET, &addr.generic, sizeof(addr.in));
}
+// Returns 0 on success, else returns non-zero on error (in which case
+// getaddrinfo should continue as normal)
+static int
+android_getaddrinfo_proxy(
+ const char *hostname, const char *servname,
+ const struct addrinfo *hints, struct addrinfo **res)
+{
+ int sock;
+ const int one = 1;
+ struct sockaddr_un proxy_addr;
+ const char* cache_mode = getenv("ANDROID_DNS_MODE");
+ FILE* proxy = NULL;
+ int success = 0;
+
+ // Clear this at start, as we use its non-NULLness later (in the
+ // error path) to decide if we have to free up any memory we
+ // allocated in the process (before failing).
+ *res = NULL;
+
+ if (cache_mode != NULL && strcmp(cache_mode, "local") == 0) {
+ // Don't use the proxy in local mode. This is used by the
+ // proxy itself.
+ return -1;
+ }
+
+ // Temporary cautious hack to disable the DNS proxy for processes
+ // requesting special treatment. Ideally the DNS proxy should
+ // accomodate these apps, though.
+ char propname[PROP_NAME_MAX];
+ char propvalue[PROP_VALUE_MAX];
+ snprintf(propname, sizeof(propname), "net.dns1.%d", getpid());
+ if (__system_property_get(propname, propvalue) > 0) {
+ return -1;
+ }
+
+ // Bogus things we can't serialize. Don't use the proxy.
+ if ((hostname != NULL &&
+ strcspn(hostname, " \n\r\t^'\"") != strlen(hostname)) ||
+ (servname != NULL &&
+ strcspn(servname, " \n\r\t^'\"") != strlen(servname))) {
+ return -1;
+ }
+
+ // Lock here to prevent android_gethostbyaddr_proxy from trying to
+ // write to the same socket at the same time.
+ pthread_mutex_lock(&dnsproxyd_lock);
+ sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (sock < 0) {
+ pthread_mutex_unlock(&dnsproxyd_lock);
+ return -1;
+ }
+
+ if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) < 0) {
+ pthread_mutex_unlock(&dnsproxyd_lock);
+ return -1;
+ }
+
+ memset(&proxy_addr, 0, sizeof(proxy_addr));
+ proxy_addr.sun_family = AF_UNIX;
+ strlcpy(proxy_addr.sun_path, "/dev/socket/dnsproxyd",
+ sizeof(proxy_addr.sun_path));
+ if (connect(sock, (const struct sockaddr*) &proxy_addr,
+ sizeof(proxy_addr)) != 0) {
+ pthread_mutex_unlock(&dnsproxyd_lock);
+ close(sock);
+ return -1;
+ }
+
+ // Send the request.
+ proxy = fdopen(sock, "r+");
+ if (fprintf(proxy, "getaddrinfo %s %s %d %d %d %d",
+ hostname == NULL ? "^" : hostname,
+ servname == NULL ? "^" : servname,
+ hints == NULL ? -1 : hints->ai_flags,
+ hints == NULL ? -1 : hints->ai_family,
+ hints == NULL ? -1 : hints->ai_socktype,
+ hints == NULL ? -1 : hints->ai_protocol) < 0) {
+ goto exit;
+ }
+ // literal NULL byte at end, required by FrameworkListener
+ if (fputc(0, proxy) == EOF ||
+ fflush(proxy) != 0) {
+ goto exit;
+ }
+
+ int remote_rv;
+ if (fread(&remote_rv, sizeof(int), 1, proxy) != 1) {
+ goto exit;
+ }
+
+ if (remote_rv != 0) {
+ goto exit;
+ }
+
+ struct addrinfo* ai = NULL;
+ struct addrinfo** nextres = res;
+ while (1) {
+ uint32_t addrinfo_len;
+ if (fread(&addrinfo_len, sizeof(addrinfo_len),
+ 1, proxy) != 1) {
+ break;
+ }
+ addrinfo_len = ntohl(addrinfo_len);
+ if (addrinfo_len == 0) {
+ success = 1;
+ break;
+ }
+
+ if (addrinfo_len < sizeof(struct addrinfo)) {
+ break;
+ }
+ struct addrinfo* ai = calloc(1, addrinfo_len +
+ sizeof(struct sockaddr_storage));
+ if (ai == NULL) {
+ break;
+ }
+
+ if (fread(ai, addrinfo_len, 1, proxy) != 1) {
+ // Error; fall through.
+ break;
+ }
+
+ // Zero out the pointer fields we copied which aren't
+ // valid in this address space.
+ ai->ai_addr = NULL;
+ ai->ai_canonname = NULL;
+ ai->ai_next = NULL;
+
+ // struct sockaddr
+ uint32_t addr_len;
+ if (fread(&addr_len, sizeof(addr_len), 1, proxy) != 1) {
+ break;
+ }
+ addr_len = ntohl(addr_len);
+ if (addr_len != 0) {
+ if (addr_len > sizeof(struct sockaddr_storage)) {
+ // Bogus; too big.
+ break;
+ }
+ struct sockaddr* addr = (struct sockaddr*)(ai + 1);
+ if (fread(addr, addr_len, 1, proxy) != 1) {
+ break;
+ }
+ ai->ai_addr = addr;
+ }
+
+ // cannonname
+ uint32_t name_len;
+ if (fread(&name_len, sizeof(name_len), 1, proxy) != 1) {
+ break;
+ }
+ name_len = ntohl(name_len);
+ if (name_len != 0) {
+ ai->ai_canonname = (char*) malloc(name_len);
+ if (fread(ai->ai_canonname, name_len, 1, proxy) != 1) {
+ break;
+ }
+ if (ai->ai_canonname[name_len - 1] != '\0') {
+ // The proxy should be returning this
+ // NULL-terminated.
+ break;
+ }
+ }
+
+ *nextres = ai;
+ nextres = &ai->ai_next;
+ ai = NULL;
+ }
+
+ if (ai != NULL) {
+ // Clean up partially-built addrinfo that we never ended up
+ // attaching to the response.
+ freeaddrinfo(ai);
+ }
+exit:
+ pthread_mutex_unlock(&dnsproxyd_lock);
+ if (proxy != NULL) {
+ fclose(proxy);
+ }
+
+ if (success) {
+ return 0;
+ }
+
+ // Proxy failed; fall through to local
+ // resolver case. But first clean up any
+ // memory we might've allocated.
+ if (*res) {
+ freeaddrinfo(*res);
+ *res = NULL;
+ }
+ return -1;
+}
+
int
getaddrinfo(const char *hostname, const char *servname,
const struct addrinfo *hints, struct addrinfo **res)
@@ -537,6 +740,13 @@ getaddrinfo(const char *hostname, const char *servname,
if (pai->ai_flags & AI_NUMERICHOST)
ERR(EAI_NONAME);
+ /*
+ * BEGIN ANDROID CHANGES; proxying to the cache
+ */
+ if (android_getaddrinfo_proxy(hostname, servname, hints, res) == 0) {
+ return 0;
+ }
+
/*
* hostname as alphabetical name.
* we would like to prefer AF_INET6 than AF_INET, so we'll make a
diff --git a/libc/netbsd/net/getnameinfo.c b/libc/netbsd/net/getnameinfo.c
index 36664435d..5479481a8 100644
--- a/libc/netbsd/net/getnameinfo.c
+++ b/libc/netbsd/net/getnameinfo.c
@@ -45,6 +45,7 @@
* beware on merge.
*/
+#include <pthread.h>
#include <sys/cdefs.h>
#if defined(LIBC_SCCS) && !defined(lint)
__RCSID("$NetBSD: getnameinfo.c,v 1.43 2006/02/17 15:58:26 ginsbach Exp $");
@@ -64,11 +65,17 @@ __RCSID("$NetBSD: getnameinfo.c,v 1.43 2006/02/17 15:58:26 ginsbach Exp $");
#include <netdb.h>
#ifdef ANDROID_CHANGES
#include "resolv_private.h"
+#include <sys/system_properties.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/un.h>
+#include <errno.h>
#else
#include <resolv.h>
#endif
#include <stddef.h>
#include <string.h>
+#include "dnsproxyd_lock.h"
static const struct afd {
int a_af;
@@ -124,7 +131,108 @@ int getnameinfo(const struct sockaddr* sa, socklen_t salen, char* host, size_t h
}
}
+#ifdef ANDROID_CHANGES
+/* On success length of the host name is returned. A return
+ * value of 0 means there's no host name associated with
+ * the address. On failure -1 is returned in which case
+ * normal execution flow shall continue. */
+static int
+android_gethostbyaddr_proxy(struct hostent* hp, const void *addr, socklen_t addrLen, int addrFamily) {
+
+ int sock;
+ const int one = 1;
+ struct sockaddr_un proxy_addr;
+ const char* cache_mode = getenv("ANDROID_DNS_MODE");
+ FILE* proxy = NULL;
+ int result = -1;
+
+ if (cache_mode != NULL && strcmp(cache_mode, "local") == 0) {
+ // Don't use the proxy in local mode. This is used by the
+ // proxy itself.
+ return -1;
+ }
+
+ // Temporary cautious hack to disable the DNS proxy for processes
+ // requesting special treatment. Ideally the DNS proxy should
+ // accomodate these apps, though.
+ char propname[PROP_NAME_MAX];
+ char propvalue[PROP_VALUE_MAX];
+ snprintf(propname, sizeof(propname), "net.dns1.%d", getpid());
+ if (__system_property_get(propname, propvalue) > 0) {
+ return -1;
+ }
+ // create socket
+ // Lock here to prevent android_getaddrinfo_proxy from trying to
+ // write to the same socket at the same time.
+ pthread_mutex_lock(&dnsproxyd_lock);
+ sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (sock < 0) {
+ pthread_mutex_unlock(&dnsproxyd_lock);
+ return -1;
+ }
+
+ if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) < 0) {
+ pthread_mutex_unlock(&dnsproxyd_lock);
+ return -1;
+ }
+
+ memset(&proxy_addr, 0, sizeof(proxy_addr));
+ proxy_addr.sun_family = AF_UNIX;
+ strlcpy(proxy_addr.sun_path, "/dev/socket/dnsproxyd",
+ sizeof(proxy_addr.sun_path));
+ if (connect(sock, (const struct sockaddr*) (void*) &proxy_addr,
+ sizeof(proxy_addr)) != 0) {
+ pthread_mutex_unlock(&dnsproxyd_lock);
+ close(sock);
+ return -1;
+ }
+
+ // send request to DnsProxyListener
+ proxy = fdopen(sock,"r+");
+ if (proxy == NULL) {
+ goto exit;
+ }
+
+ char buf[INET6_ADDRSTRLEN]; // big enough for IPv4 and IPv6
+ const char* addrStr = inet_ntop(addrFamily, addr, &buf, sizeof(buf));
+ if (addrStr == NULL) {
+ goto exit;
+ }
+ if (fprintf(proxy, "gethostbyaddr %s %d %d", addrStr, addrLen, addrFamily) < 0) {
+ goto exit;
+ }
+
+ // literal NULL byte at end, required by FrameworkListener
+ if (fputc(0, proxy) == EOF || fflush(proxy) != 0) {
+ goto exit;
+ }
+
+ result = 0;
+ uint32_t name_len;
+ if (fread(&name_len, sizeof(name_len), 1, proxy) != 1) {
+ goto exit;
+ }
+
+ name_len = ntohl(name_len);
+ if (name_len <= 0) {
+ goto exit;
+ }
+
+ if (fread(hp->h_name, name_len, 1, proxy) != 1) {
+ goto exit;
+ }
+
+ result = name_len;
+ exit:
+ pthread_mutex_unlock(&dnsproxyd_lock);
+ if (proxy != NULL) {
+ fclose(proxy);
+ }
+
+ return result;
+}
+#endif
/*
* getnameinfo_inet():
* Format an IPv4 or IPv6 sockaddr into a printable string.
@@ -277,7 +385,21 @@ getnameinfo_inet(sa, salen, host, hostlen, serv, servlen, flags)
break;
}
} else {
+#ifdef ANDROID_CHANGES
+ struct hostent android_proxy_hostent;
+ char android_proxy_buf[MAXDNAME];
+ android_proxy_hostent.h_name = android_proxy_buf;
+
+ int hostnamelen = android_gethostbyaddr_proxy(&android_proxy_hostent,
+ addr, afd->a_addrlen, afd->a_af);
+ if (hostnamelen >= 0) {
+ hp = (hostnamelen > 0) ? &android_proxy_hostent : NULL;
+ } else {
+ hp = gethostbyaddr(addr, afd->a_addrlen, afd->a_af);
+ }
+#else
hp = gethostbyaddr(addr, afd->a_addrlen, afd->a_af);
+#endif
if (hp) {
#if 0
diff --git a/libc/netbsd/resolv/res_cache.c b/libc/netbsd/resolv/res_cache.c
index 2621a7bbf..20f37e106 100644
--- a/libc/netbsd/resolv/res_cache.c
+++ b/libc/netbsd/resolv/res_cache.c
@@ -27,17 +27,28 @@
*/
#include "resolv_cache.h"
+#include <resolv.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "pthread.h"
+#include <errno.h>
+#include "arpa_nameser.h"
+#include <sys/system_properties.h>
+#include <net/if.h>
+#include <netdb.h>
+#include <linux/if.h>
+
+#include <arpa/inet.h>
+#include "resolv_private.h"
+
/* This code implements a small and *simple* DNS resolver cache.
*
- * It is only used to cache DNS answers for a maximum of CONFIG_SECONDS seconds
- * in order to reduce DNS traffic. It is not supposed to be a full DNS cache,
- * since we plan to implement that in the future in a dedicated process running
- * on the system.
+ * It is only used to cache DNS answers for a time defined by the smallest TTL
+ * among the answer records in order to reduce DNS traffic. It is not supposed
+ * to be a full DNS cache, since we plan to implement that in the future in a
+ * dedicated process running on the system.
*
* Note that its design is kept simple very intentionally, i.e.:
*
@@ -47,9 +58,8 @@
* (this means that two similar queries that encode the DNS name
* differently will be treated distinctly).
*
- * - the TTLs of answer RRs are ignored. our DNS resolver library does not use
- * them anyway, but it means that records with a TTL smaller than
- * CONFIG_SECONDS will be kept in the cache anyway.
+ * the smallest TTL value among the answer records are used as the time
+ * to keep an answer in the cache.
*
* this is bad, but we absolutely want to avoid parsing the answer packets
* (and should be solved by the later full DNS cache process).
@@ -104,7 +114,7 @@
*/
#define CONFIG_SECONDS (60*10) /* 10 minutes */
-/* maximum number of entries kept in the cache. This value has been
+/* default number of entries kept in the cache. This value has been
* determined by browsing through various sites and counting the number
* of corresponding requests. Keep in mind that our framework is currently
* performing two requests per name lookup (one for IPv4, the other for IPv6)
@@ -123,10 +133,16 @@
* most high-level websites use lots of media/ad servers with different names
* but these are generally reused when browsing through the site.
*
- * As such, a valud of 64 should be relatively conformtable at the moment.
+ * As such, a value of 64 should be relatively comfortable at the moment.
+ *
+ * The system property ro.net.dns_cache_size can be used to override the default
+ * value with a custom value
*/
#define CONFIG_MAX_ENTRIES 64
+/* name of the system property that can be used to set the cache size */
+#define DNS_CACHE_SIZE_PROP_NAME "ro.net.dns_cache_size"
+
/****************************************************************************/
/****************************************************************************/
/***** *****/
@@ -141,6 +157,7 @@
/* set to 1 to debug query data */
#define DEBUG_DATA 0
+#undef XLOG
#if DEBUG
# include <logd.h>
# define XLOG(...) \
@@ -987,10 +1004,50 @@ typedef struct Entry {
int querylen;
const uint8_t* answer;
int answerlen;
- time_t when; /* time_t when entry was added to table */
- int id; /* for debugging purpose */
+ time_t expires; /* time_t when the entry isn't valid any more */
+ int id; /* for debugging purpose */
} Entry;
+/**
+ * Parse the answer records and find the smallest
+ * TTL among the answer records.
+ *
+ * The returned TTL is the number of seconds to
+ * keep the answer in the cache.
+ *
+ * In case of parse error zero (0) is returned which
+ * indicates that the answer shall not be cached.
+ */
+static u_long
+answer_getTTL(const void* answer, int answerlen)
+{
+ ns_msg handle;
+ int ancount, n;
+ u_long result, ttl;
+ ns_rr rr;
+
+ result = 0;
+ if (ns_initparse(answer, answerlen, &handle) >= 0) {
+ // get number of answer records
+ ancount = ns_msg_count(handle, ns_s_an);
+ for (n = 0; n < ancount; n++) {
+ if (ns_parserr(&handle, ns_s_an, n, &rr) == 0) {
+ ttl = ns_rr_ttl(rr);
+ if (n == 0 || ttl < result) {
+ result = ttl;
+ }
+ } else {
+ XLOG("ns_parserr failed ancount no = %d. errno = %s\n", n, strerror(errno));
+ }
+ }
+ } else {
+ XLOG("ns_parserr failed. %s\n", strerror(errno));
+ }
+
+ XLOG("TTL = %d\n", result);
+
+ return result;
+}
static void
entry_free( Entry* e )
@@ -1072,8 +1129,6 @@ entry_alloc( const Entry* init, const void* answer, int answerlen )
memcpy( (char*)e->answer, answer, e->answerlen );
- e->when = _time_now();
-
return e;
}
@@ -1103,17 +1158,25 @@ entry_equals( const Entry* e1, const Entry* e2 )
* for simplicity, the hash-table fields 'hash' and 'hlink' are
* inlined in the Entry structure.
*/
-#define MAX_HASH_ENTRIES (2*CONFIG_MAX_ENTRIES)
typedef struct resolv_cache {
+ int max_entries;
int num_entries;
Entry mru_list;
pthread_mutex_t lock;
unsigned generation;
int last_id;
- Entry* entries[ MAX_HASH_ENTRIES ];
+ Entry* entries;
} Cache;
+typedef struct resolv_cache_info {
+ char ifname[IF_NAMESIZE + 1];
+ struct in_addr ifaddr;
+ Cache* cache;
+ struct resolv_cache_info* next;
+ char* nameservers[MAXNS +1];
+ struct addrinfo* nsaddrinfo[MAXNS + 1];
+} CacheInfo;
#define HTABLE_VALID(x) ((x) != NULL && (x) != HTABLE_DELETED)
@@ -1123,9 +1186,9 @@ _cache_flush_locked( Cache* cache )
int nn;
time_t now = _time_now();
- for (nn = 0; nn < MAX_HASH_ENTRIES; nn++)
+ for (nn = 0; nn < cache->max_entries; nn++)
{
- Entry** pnode = &cache->entries[nn];
+ Entry** pnode = (Entry**) &cache->entries[nn];
while (*pnode != NULL) {
Entry* node = *pnode;
@@ -1143,17 +1206,48 @@ _cache_flush_locked( Cache* cache )
"*************************");
}
-struct resolv_cache*
+/* Return max number of entries allowed in the cache,
+ * i.e. cache size. The cache size is either defined
+ * by system property ro.net.dns_cache_size or by
+ * CONFIG_MAX_ENTRIES if system property not set
+ * or set to invalid value. */
+static int
+_res_cache_get_max_entries( void )
+{
+ int result = -1;
+ char cache_size[PROP_VALUE_MAX];
+
+ if (__system_property_get(DNS_CACHE_SIZE_PROP_NAME, cache_size) > 0) {
+ result = atoi(cache_size);
+ }
+
+ // ro.net.dns_cache_size not set or set to negative value
+ if (result <= 0) {
+ result = CONFIG_MAX_ENTRIES;
+ }
+
+ XLOG("cache size: %d", result);
+ return result;
+}
+
+static struct resolv_cache*
_resolv_cache_create( void )
{
struct resolv_cache* cache;
cache = calloc(sizeof(*cache), 1);
if (cache) {
- cache->generation = ~0U;
- pthread_mutex_init( &cache->lock, NULL );
- cache->mru_list.mru_prev = cache->mru_list.mru_next = &cache->mru_list;
- XLOG("%s: cache created\n", __FUNCTION__);
+ cache->max_entries = _res_cache_get_max_entries();
+ cache->entries = calloc(sizeof(*cache->entries), cache->max_entries);
+ if (cache->entries) {
+ cache->generation = ~0U;
+ pthread_mutex_init( &cache->lock, NULL );
+ cache->mru_list.mru_prev = cache->mru_list.mru_next = &cache->mru_list;
+ XLOG("%s: cache created\n", __FUNCTION__);
+ } else {
+ free(cache);
+ cache = NULL;
+ }
}
return cache;
}
@@ -1183,12 +1277,47 @@ _cache_dump_mru( Cache* cache )
XLOG("%s", temp);
}
+
+static void
+_dump_answer(const void* answer, int answerlen)
+{
+ res_state statep;
+ FILE* fp;
+ char* buf;
+ int fileLen;
+
+ fp = fopen("/data/reslog.txt", "w+");
+ if (fp != NULL) {
+ statep = __res_get_state();
+
+ res_pquery(statep, answer, answerlen, fp);
+
+ //Get file length
+ fseek(fp, 0, SEEK_END);
+ fileLen=ftell(fp);
+ fseek(fp, 0, SEEK_SET);
+ buf = (char *)malloc(fileLen+1);
+ if (buf != NULL) {
+ //Read file contents into buffer
+ fread(buf, fileLen, 1, fp);
+ XLOG("%s\n", buf);
+ free(buf);
+ }
+ fclose(fp);
+ remove("/data/reslog.txt");
+ }
+ else {
+ XLOG("_dump_answer: can't open file\n");
+ }
+}
#endif
#if DEBUG
# define XLOG_QUERY(q,len) _dump_query((q), (len))
+# define XLOG_ANSWER(a, len) _dump_answer((a), (len))
#else
# define XLOG_QUERY(q,len) ((void)0)
+# define XLOG_ANSWER(a,len) ((void)0)
#endif
/* This function tries to find a key within the hash table
@@ -1209,8 +1338,8 @@ static Entry**
_cache_lookup_p( Cache* cache,
Entry* key )
{
- int index = key->hash % MAX_HASH_ENTRIES;
- Entry** pnode = &cache->entries[ key->hash % MAX_HASH_ENTRIES ];
+ int index = key->hash % cache->max_entries;
+ Entry** pnode = (Entry**) &cache->entries[ index ];
while (*pnode != NULL) {
Entry* node = *pnode;
@@ -1322,7 +1451,7 @@ _resolv_cache_lookup( struct resolv_cache* cache,
now = _time_now();
/* remove stale entries here */
- if ( (unsigned)(now - e->when) >= CONFIG_SECONDS ) {
+ if (now >= e->expires) {
XLOG( " NOT IN CACHE (STALE ENTRY %p DISCARDED)", *lookup );
_cache_remove_p(cache, lookup);
goto Exit;
@@ -1363,6 +1492,7 @@ _resolv_cache_add( struct resolv_cache* cache,
Entry key[1];
Entry* e;
Entry** lookup;
+ u_long ttl;
/* don't assume that the query has already been cached
*/
@@ -1375,6 +1505,7 @@ _resolv_cache_add( struct resolv_cache* cache,
XLOG( "%s: query:", __FUNCTION__ );
XLOG_QUERY(query,querylen);
+ XLOG_ANSWER(answer, answerlen);
#if DEBUG_DATA
XLOG( "answer:");
XLOG_BYTES(answer,answerlen);
@@ -1389,7 +1520,7 @@ _resolv_cache_add( struct resolv_cache* cache,
goto Exit;
}
- if (cache->num_entries >= CONFIG_MAX_ENTRIES) {
+ if (cache->num_entries >= cache->max_entries) {
_cache_remove_oldest(cache);
/* need to lookup again */
lookup = _cache_lookup_p(cache, key);
@@ -1401,9 +1532,13 @@ _resolv_cache_add( struct resolv_cache* cache,
}
}
- e = entry_alloc( key, answer, answerlen );
- if (e != NULL) {
- _cache_add_p(cache, lookup, e);
+ ttl = answer_getTTL(answer, answerlen);
+ if (ttl > 0) {
+ e = entry_alloc(key, answer, answerlen);
+ if (e != NULL) {
+ e->expires = ttl + _time_now();
+ _cache_add_p(cache, lookup, e);
+ }
}
#if DEBUG
_cache_dump_mru(cache);
@@ -1420,11 +1555,47 @@ Exit:
/****************************************************************************/
/****************************************************************************/
-static struct resolv_cache* _res_cache;
static pthread_once_t _res_cache_once;
+// Head of the list of caches. Protected by _res_cache_list_lock.
+static struct resolv_cache_info _res_cache_list;
+
+// name of the current default inteface
+static char _res_default_ifname[IF_NAMESIZE + 1];
+
+// lock protecting everything in the _resolve_cache_info structs (next ptr, etc)
+static pthread_mutex_t _res_cache_list_lock;
+
+
+/* lookup the default interface name */
+static char *_get_default_iface_locked();
+/* insert resolv_cache_info into the list of resolv_cache_infos */
+static void _insert_cache_info_locked(struct resolv_cache_info* cache_info);
+/* creates a resolv_cache_info */
+static struct resolv_cache_info* _create_cache_info( void );
+/* gets cache associated with an interface name, or NULL if none exists */
+static struct resolv_cache* _find_named_cache_locked(const char* ifname);
+/* gets a resolv_cache_info associated with an interface name, or NULL if not found */
+static struct resolv_cache_info* _find_cache_info_locked(const char* ifname);
+/* free dns name server list of a resolv_cache_info structure */
+static void _free_nameservers(struct resolv_cache_info* cache_info);
+/* look up the named cache, and creates one if needed */
+static struct resolv_cache* _get_res_cache_for_iface_locked(const char* ifname);
+/* empty the named cache */
+static void _flush_cache_for_iface_locked(const char* ifname);
+/* empty the nameservers set for the named cache */
+static void _free_nameservers_locked(struct resolv_cache_info* cache_info);
+/* lookup the namserver for the name interface */
+static int _get_nameserver_locked(const char* ifname, int n, char* addr, int addrLen);
+/* lookup the addr of the nameserver for the named interface */
+static struct addrinfo* _get_nameserver_addr_locked(const char* ifname, int n);
+/* lookup the inteface's address */
+static struct in_addr* _get_addr_locked(const char * ifname);
+
+
+
static void
-_res_cache_init( void )
+_res_cache_init(void)
{
const char* env = getenv(CONFIG_ENV);
@@ -1433,29 +1604,394 @@ _res_cache_init( void )
return;
}
- _res_cache = _resolv_cache_create();
+ memset(&_res_default_ifname, 0, sizeof(_res_default_ifname));
+ memset(&_res_cache_list, 0, sizeof(_res_cache_list));
+ pthread_mutex_init(&_res_cache_list_lock, NULL);
}
-
struct resolv_cache*
-__get_res_cache( void )
+__get_res_cache(void)
+{
+ struct resolv_cache *cache;
+
+ pthread_once(&_res_cache_once, _res_cache_init);
+
+ pthread_mutex_lock(&_res_cache_list_lock);
+
+ char* ifname = _get_default_iface_locked();
+
+ // if default interface not set then use the first cache
+ // associated with an interface as the default one.
+ if (ifname[0] == '\0') {
+ struct resolv_cache_info* cache_info = _res_cache_list.next;
+ while (cache_info) {
+ if (cache_info->ifname[0] != '\0') {
+ ifname = cache_info->ifname;
+ }
+
+ cache_info = cache_info->next;
+ }
+ }
+ cache = _get_res_cache_for_iface_locked(ifname);
+
+ pthread_mutex_unlock(&_res_cache_list_lock);
+ XLOG("_get_res_cache. default_ifname = %s\n", ifname);
+ return cache;
+}
+
+static struct resolv_cache*
+_get_res_cache_for_iface_locked(const char* ifname)
{
- pthread_once( &_res_cache_once, _res_cache_init );
- return _res_cache;
+ if (ifname == NULL)
+ return NULL;
+
+ struct resolv_cache* cache = _find_named_cache_locked(ifname);
+ if (!cache) {
+ struct resolv_cache_info* cache_info = _create_cache_info();
+ if (cache_info) {
+ cache = _resolv_cache_create();
+ if (cache) {
+ int len = sizeof(cache_info->ifname);
+ cache_info->cache = cache;
+ strncpy(cache_info->ifname, ifname, len - 1);
+ cache_info->ifname[len - 1] = '\0';
+
+ _insert_cache_info_locked(cache_info);
+ } else {
+ free(cache_info);
+ }
+ }
+ }
+ return cache;
}
void
-_resolv_cache_reset( unsigned generation )
+_resolv_cache_reset(unsigned generation)
{
XLOG("%s: generation=%d", __FUNCTION__, generation);
- if (_res_cache == NULL)
+ pthread_once(&_res_cache_once, _res_cache_init);
+ pthread_mutex_lock(&_res_cache_list_lock);
+
+ char* ifname = _get_default_iface_locked();
+ // if default interface not set then use the first cache
+ // associated with an interface as the default one.
+ // Note: Copied the code from __get_res_cache since this
+ // method will be deleted/obsolete when cache per interface
+ // implemented all over
+ if (ifname[0] == '\0') {
+ struct resolv_cache_info* cache_info = _res_cache_list.next;
+ while (cache_info) {
+ if (cache_info->ifname[0] != '\0') {
+ ifname = cache_info->ifname;
+ }
+
+ cache_info = cache_info->next;
+ }
+ }
+ struct resolv_cache* cache = _get_res_cache_for_iface_locked(ifname);
+
+ if (cache == NULL) {
+ pthread_mutex_unlock(&_res_cache_list_lock);
return;
+ }
+
+ pthread_mutex_lock( &cache->lock );
+ if (cache->generation != generation) {
+ _cache_flush_locked(cache);
+ cache->generation = generation;
+ }
+ pthread_mutex_unlock( &cache->lock );
+
+ pthread_mutex_unlock(&_res_cache_list_lock);
+}
+
+void
+_resolv_flush_cache_for_default_iface(void)
+{
+ char* ifname;
+
+ pthread_once(&_res_cache_once, _res_cache_init);
+ pthread_mutex_lock(&_res_cache_list_lock);
+
+ ifname = _get_default_iface_locked();
+ _flush_cache_for_iface_locked(ifname);
+
+ pthread_mutex_unlock(&_res_cache_list_lock);
+}
+
+void
+_resolv_flush_cache_for_iface(const char* ifname)
+{
+ pthread_once(&_res_cache_once, _res_cache_init);
+ pthread_mutex_lock(&_res_cache_list_lock);
+
+ _flush_cache_for_iface_locked(ifname);
+
+ pthread_mutex_unlock(&_res_cache_list_lock);
+}
+
+static void
+_flush_cache_for_iface_locked(const char* ifname)
+{
+ struct resolv_cache* cache = _find_named_cache_locked(ifname);
+ if (cache) {
+ pthread_mutex_lock(&cache->lock);
+ _cache_flush_locked(cache);
+ pthread_mutex_unlock(&cache->lock);
+ }
+}
+
+static struct resolv_cache_info*
+_create_cache_info(void)
+{
+ struct resolv_cache_info* cache_info;
+
+ cache_info = calloc(sizeof(*cache_info), 1);
+ return cache_info;
+}
+
+static void
+_insert_cache_info_locked(struct resolv_cache_info* cache_info)
+{
+ struct resolv_cache_info* last;
+
+ for (last = &_res_cache_list; last->next; last = last->next);
+
+ last->next = cache_info;
+
+}
+
+static struct resolv_cache*
+_find_named_cache_locked(const char* ifname) {
+
+ struct resolv_cache_info* info = _find_cache_info_locked(ifname);
+
+ if (info != NULL) return info->cache;
+
+ return NULL;
+}
+
+static struct resolv_cache_info*
+_find_cache_info_locked(const char* ifname)
+{
+ if (ifname == NULL)
+ return NULL;
+
+ struct resolv_cache_info* cache_info = _res_cache_list.next;
+
+ while (cache_info) {
+ if (strcmp(cache_info->ifname, ifname) == 0) {
+ break;
+ }
+
+ cache_info = cache_info->next;
+ }
+ return cache_info;
+}
+
+static char*
+_get_default_iface_locked(void)
+{
+ char* iface = _res_default_ifname;
+
+ return iface;
+}
+
+void
+_resolv_set_default_iface(const char* ifname)
+{
+ XLOG("_resolv_set_default_if ifname %s\n",ifname);
+
+ pthread_once(&_res_cache_once, _res_cache_init);
+ pthread_mutex_lock(&_res_cache_list_lock);
+
+ int size = sizeof(_res_default_ifname);
+ memset(_res_default_ifname, 0, size);
+ strncpy(_res_default_ifname, ifname, size - 1);
+ _res_default_ifname[size - 1] = '\0';
+
+ pthread_mutex_unlock(&_res_cache_list_lock);
+}
+
+void
+_resolv_set_nameservers_for_iface(const char* ifname, char** servers, int numservers)
+{
+ int i, rt, index;
+ struct addrinfo hints;
+ char sbuf[NI_MAXSERV];
+
+ pthread_once(&_res_cache_once, _res_cache_init);
+
+ pthread_mutex_lock(&_res_cache_list_lock);
+ // creates the cache if not created
+ _get_res_cache_for_iface_locked(ifname);
+
+ struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
+
+ if (cache_info != NULL) {
+ // free current before adding new
+ _free_nameservers_locked(cache_info);
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_socktype = SOCK_DGRAM; /*dummy*/
+ hints.ai_flags = AI_NUMERICHOST;
+ sprintf(sbuf, "%u", NAMESERVER_PORT);
+
+ index = 0;
+ for (i = 0; i < numservers && i < MAXNS; i++) {
+ rt = getaddrinfo(servers[i], sbuf, &hints, &cache_info->nsaddrinfo[index]);
+ if (rt == 0) {
+ cache_info->nameservers[index] = strdup(servers[i]);
+ index++;
+ } else {
+ cache_info->nsaddrinfo[index] = NULL;
+ }
+ }
+ }
+ pthread_mutex_unlock(&_res_cache_list_lock);
+}
+
+static void
+_free_nameservers_locked(struct resolv_cache_info* cache_info)
+{
+ int i;
+ for (i = 0; i <= MAXNS; i++) {
+ free(cache_info->nameservers[i]);
+ cache_info->nameservers[i] = NULL;
+ if (cache_info->nsaddrinfo[i] != NULL) {
+ freeaddrinfo(cache_info->nsaddrinfo[i]);
+ cache_info->nsaddrinfo[i] = NULL;
+ }
+ }
+}
+
+int
+_resolv_cache_get_nameserver(int n, char* addr, int addrLen)
+{
+ char *ifname;
+ int result = 0;
+
+ pthread_once(&_res_cache_once, _res_cache_init);
+ pthread_mutex_lock(&_res_cache_list_lock);
+
+ ifname = _get_default_iface_locked();
+ result = _get_nameserver_locked(ifname, n, addr, addrLen);
+
+ pthread_mutex_unlock(&_res_cache_list_lock);
+ return result;
+}
+
+static int
+_get_nameserver_locked(const char* ifname, int n, char* addr, int addrLen)
+{
+ int len = 0;
+ char* ns;
+ struct resolv_cache_info* cache_info;
+
+ if (n < 1 || n > MAXNS || !addr)
+ return 0;
+
+ cache_info = _find_cache_info_locked(ifname);
+ if (cache_info) {
+ ns = cache_info->nameservers[n - 1];
+ if (ns) {
+ len = strlen(ns);
+ if (len < addrLen) {
+ strncpy(addr, ns, len);
+ addr[len] = '\0';
+ } else {
+ len = 0;
+ }
+ }
+ }
+
+ return len;
+}
+
+struct addrinfo*
+_cache_get_nameserver_addr(int n)
+{
+ struct addrinfo *result;
+ char* ifname;
+
+ pthread_once(&_res_cache_once, _res_cache_init);
+ pthread_mutex_lock(&_res_cache_list_lock);
+
+ ifname = _get_default_iface_locked();
+
+ result = _get_nameserver_addr_locked(ifname, n);
+ pthread_mutex_unlock(&_res_cache_list_lock);
+ return result;
+}
- pthread_mutex_lock( &_res_cache->lock );
- if (_res_cache->generation != generation) {
- _cache_flush_locked(_res_cache);
- _res_cache->generation = generation;
+static struct addrinfo*
+_get_nameserver_addr_locked(const char* ifname, int n)
+{
+ struct addrinfo* ai = NULL;
+ struct resolv_cache_info* cache_info;
+
+ if (n < 1 || n > MAXNS)
+ return NULL;
+
+ cache_info = _find_cache_info_locked(ifname);
+ if (cache_info) {
+ ai = cache_info->nsaddrinfo[n - 1];
+ }
+ return ai;
+}
+
+void
+_resolv_set_addr_of_iface(const char* ifname, struct in_addr* addr)
+{
+ pthread_once(&_res_cache_once, _res_cache_init);
+ pthread_mutex_lock(&_res_cache_list_lock);
+ struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
+ if (cache_info) {
+ memcpy(&cache_info->ifaddr, addr, sizeof(*addr));
+
+ if (DEBUG) {
+ char* addr_s = inet_ntoa(cache_info->ifaddr);
+ XLOG("address of interface %s is %s\n", ifname, addr_s);
+ }
+ }
+ pthread_mutex_unlock(&_res_cache_list_lock);
+}
+
+struct in_addr*
+_resolv_get_addr_of_default_iface(void)
+{
+ struct in_addr* ai = NULL;
+ char* ifname;
+
+ pthread_once(&_res_cache_once, _res_cache_init);
+ pthread_mutex_lock(&_res_cache_list_lock);
+ ifname = _get_default_iface_locked();
+ ai = _get_addr_locked(ifname);
+ pthread_mutex_unlock(&_res_cache_list_lock);
+
+ return ai;
+}
+
+struct in_addr*
+_resolv_get_addr_of_iface(const char* ifname)
+{
+ struct in_addr* ai = NULL;
+
+ pthread_once(&_res_cache_once, _res_cache_init);
+ pthread_mutex_lock(&_res_cache_list_lock);
+ ai =_get_addr_locked(ifname);
+ pthread_mutex_unlock(&_res_cache_list_lock);
+ return ai;
+}
+
+static struct in_addr*
+_get_addr_locked(const char * ifname)
+{
+ struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
+ if (cache_info) {
+ return &cache_info->ifaddr;
}
- pthread_mutex_unlock( &_res_cache->lock );
+ return NULL;
}
diff --git a/libc/netbsd/resolv/res_init.c b/libc/netbsd/resolv/res_init.c
index 81e570f77..1cdfc534a 100644
--- a/libc/netbsd/resolv/res_init.c
+++ b/libc/netbsd/resolv/res_init.c
@@ -225,6 +225,9 @@ __res_vinit(res_state statp, int preinit) {
char dnsProperty[PROP_VALUE_MAX];
#endif
+ if ((statp->options & RES_INIT) != 0U)
+ res_ndestroy(statp);
+
if (!preinit) {
statp->retrans = RES_TIMEOUT;
statp->retry = RES_DFLRETRY;
@@ -232,9 +235,6 @@ __res_vinit(res_state statp, int preinit) {
statp->id = res_randomid();
}
- if ((statp->options & RES_INIT) != 0U)
- res_ndestroy(statp);
-
memset(u, 0, sizeof(u));
#ifdef USELOOPBACK
u[nserv].sin.sin_addr = inet_makeaddr(IN_LOOPBACKNET, 1);
diff --git a/libc/netbsd/resolv/res_send.c b/libc/netbsd/resolv/res_send.c
index 696f8cfeb..94ba9693f 100644
--- a/libc/netbsd/resolv/res_send.c
+++ b/libc/netbsd/resolv/res_send.c
@@ -413,7 +413,7 @@ res_nsend(res_state statp,
if (EXT(statp).nssocks[ns] == -1)
continue;
peerlen = sizeof(peer);
- if (getsockname(EXT(statp).nssocks[ns],
+ if (getpeername(EXT(statp).nssocks[ns],
(struct sockaddr *)(void *)&peer, &peerlen) < 0) {
needclose++;
break;
diff --git a/libc/netbsd/resolv/res_state.c b/libc/netbsd/resolv/res_state.c
index 3a2301d2e..94c62bfc9 100644
--- a/libc/netbsd/resolv/res_state.c
+++ b/libc/netbsd/resolv/res_state.c
@@ -38,21 +38,32 @@
#define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
#include <sys/_system_properties.h>
+/* Set to 1 to enable debug traces */
+#define DEBUG 0
+
+#if DEBUG
+# include <logd.h>
+# include <unistd.h> /* for gettid() */
+# define D(...) __libc_android_log_print(ANDROID_LOG_DEBUG,"libc", __VA_ARGS__)
+#else
+# define D(...) do{}while(0)
+#endif
+
static pthread_key_t _res_key;
static pthread_once_t _res_once;
typedef struct {
- int _h_errno;
- struct __res_state _nres[1];
- unsigned _serial;
- struct prop_info* _pi;
- struct res_static _rstatic[1];
+ int _h_errno;
+ struct __res_state _nres[1];
+ unsigned _serial;
+ struct prop_info* _pi;
+ struct res_static _rstatic[1];
} _res_thread;
static _res_thread*
_res_thread_alloc(void)
{
- _res_thread* rt = malloc(sizeof(*rt));
+ _res_thread* rt = calloc(1, sizeof(*rt));
if (rt) {
rt->_h_errno = 0;
@@ -62,12 +73,7 @@ _res_thread_alloc(void)
if (rt->_pi) {
rt->_serial = rt->_pi->serial;
}
- if ( res_ninit( rt->_nres ) < 0 ) {
- free(rt);
- rt = NULL;
- } else {
- memset(rt->_rstatic, 0, sizeof rt->_rstatic);
- }
+ memset(rt->_rstatic, 0, sizeof rt->_rstatic);
}
return rt;
}
@@ -91,6 +97,8 @@ _res_thread_free( void* _rt )
{
_res_thread* rt = _rt;
+ D("%s: rt=%p for thread=%d", __FUNCTION__, rt, gettid());
+
_res_static_done(rt->_rstatic);
res_ndestroy(rt->_nres);
free(rt);
@@ -108,29 +116,60 @@ _res_thread_get(void)
_res_thread* rt;
pthread_once( &_res_once, _res_init_key );
rt = pthread_getspecific( _res_key );
- if (rt == NULL) {
- if ((rt = _res_thread_alloc()) == NULL) {
- return NULL;
+
+ if (rt != NULL) {
+ /* We already have one thread-specific DNS state object.
+ * Check the serial value for any changes to net.* properties */
+ D("%s: Called for tid=%d rt=%p rt->pi=%p rt->serial=%d",
+ __FUNCTION__, gettid(), rt, rt->_pi, rt->_serial);
+ if (rt->_pi == NULL) {
+ /* The property wasn't created when _res_thread_get() was
+ * called the last time. This should only happen very
+ * early during the boot sequence. First, let's try to see if it
+ * is here now. */
+ rt->_pi = (struct prop_info*) __system_property_find("net.change");
+ if (rt->_pi == NULL) {
+ /* Still nothing, return current state */
+ D("%s: exiting for tid=%d rt=%d since system property not found",
+ __FUNCTION__, gettid(), rt);
+ return rt;
+ }
}
- rt->_h_errno = 0;
- rt->_serial = 0;
- pthread_setspecific( _res_key, rt );
- }
- /* Check the serial value for any chanes to net.* properties. */
- if (rt->_pi == NULL) {
- rt->_pi = (struct prop_info*) __system_property_find("net.change");
+ if (rt->_serial == rt->_pi->serial) {
+ /* Nothing changed, so return the current state */
+ D("%s: tid=%d rt=%p nothing changed, returning",
+ __FUNCTION__, gettid(), rt);
+ return rt;
+ }
+ /* Update the recorded serial number, and go reset the state */
+ rt->_serial = rt->_pi->serial;
+ goto RESET_STATE;
}
- if (rt->_pi == NULL || rt->_serial == rt->_pi->serial) {
- return rt;
+
+ /* It is the first time this function is called in this thread,
+ * we need to create a new thread-specific DNS resolver state. */
+ rt = _res_thread_alloc();
+ if (rt == NULL) {
+ return NULL;
}
- rt->_serial = rt->_pi->serial;
- /* Reload from system properties. */
+ pthread_setspecific( _res_key, rt );
+ D("%s: tid=%d Created new DNS state rt=%p",
+ __FUNCTION__, gettid(), rt);
+
+RESET_STATE:
+ /* Reset the state, note that res_ninit() can now properly reset
+ * an existing state without leaking memory.
+ */
+ D("%s: tid=%d, rt=%p, resetting DNS state (options RES_INIT=%d)",
+ __FUNCTION__, gettid(), rt, (rt->_nres->options & RES_INIT) != 0);
if ( res_ninit( rt->_nres ) < 0 ) {
- free(rt);
- rt = NULL;
- pthread_setspecific( _res_key, rt );
+ /* This should not happen */
+ D("%s: tid=%d rt=%p, woot, res_ninit() returned < 0",
+ __FUNCTION__, gettid(), rt);
+ _res_thread_free(rt);
+ pthread_setspecific( _res_key, NULL );
+ return NULL;
}
- _resolv_cache_reset(rt->_serial);
return rt;
}
diff --git a/libc/private/__dso_handle.S b/libc/private/__dso_handle.S
index fcebab642..3e801284f 100644
--- a/libc/private/__dso_handle.S
+++ b/libc/private/__dso_handle.S
@@ -32,6 +32,11 @@
#
.section .bss
.align 4
+
+#ifndef CRT_LEGACY_WORKAROUND
+ .hidden __dso_handle
+#endif
+
.globl __dso_handle
__dso_handle:
.long 0
diff --git a/libc/private/__dso_handle_so.S b/libc/private/__dso_handle_so.S
new file mode 100644
index 000000000..77a5d7fba
--- /dev/null
+++ b/libc/private/__dso_handle_so.S
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+# The __dso_handle global variable is used by static
+# C++ constructors and destructors in the binary.
+# See http://www.codesourcery.com/public/cxx-abi/abi.html#dso-dtor
+#
+ .data
+ .align 4
+ .hidden __dso_handle
+ .globl __dso_handle
+__dso_handle:
+ .long __dso_handle
diff --git a/libc/private/resolv_cache.h b/libc/private/resolv_cache.h
index cd876fb28..2a5445355 100644
--- a/libc/private/resolv_cache.h
+++ b/libc/private/resolv_cache.h
@@ -30,13 +30,45 @@
struct resolv_cache; /* forward */
-/* get cache instance, can be NULL if cache is disabled
- * (e.g. through an environment variable) */
+/* gets the cache for the default interface. Might be NULL*/
extern struct resolv_cache* __get_res_cache(void);
+/* get the cache for a specified interface. Can be NULL*/
+extern struct resolv_cache* __get_res_cache_for_iface(const char* ifname);
+
/* this gets called everytime we detect some changes in the DNS configuration
* and will flush the cache */
-extern void _resolv_cache_reset( unsigned generation );
+extern void _resolv_cache_reset( unsigned generation );
+
+/* Gets the address of the n:th name server for the default interface
+ * Return length of address on success else 0.
+ * Note: The first name server is at n = 1 */
+extern int _resolv_cache_get_nameserver(int n, char* addr, int addrLen);
+
+/* Gets the address of the n:th name server for a certain interface
+ * Return length of address on success else 0.
+ * Note: The first name server is at n = 1 */
+extern int _resolv_cache_get_nameserver_for_iface(const char* ifname, int n,
+ char* addr, int addrLen);
+
+/* Gets addrinfo of the n:th name server associated with an interface.
+ * NULL is returned if no address if found.
+ * Note: The first name server is at n = 1. */
+extern struct addrinfo* _resolv_cache_get_nameserver_addr_for_iface(const char* ifname, int n);
+
+/* Gets addrinfo of the n:th name server associated with the default interface
+ * NULL is returned if no address if found.
+ * Note: The first name server is at n = 1. */
+extern struct addrinfo* _resolv_cache_get_nameserver_addr(int n);
+
+/* gets the address associated with the default interface */
+extern struct in_addr* _resolv_get_addr_of_default_iface();
+
+/* gets the address associated with the specified interface */
+extern struct in_addr* _resolv_get_addr_of_iface(const char* ifname);
+
+/* Get name of default interface */
+extern char* _resolv_get_default_iface();
typedef enum {
RESOLV_CACHE_UNSUPPORTED, /* the cache can't handle that kind of queries */
diff --git a/libc/stdlib/atexit.c b/libc/stdlib/atexit.c
index 4ba217732..f4bcab99e 100644
--- a/libc/stdlib/atexit.c
+++ b/libc/stdlib/atexit.c
@@ -104,6 +104,7 @@ unlock:
return (ret);
}
+#ifdef CRT_LEGACY_WORKAROUND
/*
* Register a function to be performed at exit.
*/
@@ -112,6 +113,7 @@ atexit(void (*func)(void))
{
return (__cxa_atexit((void (*)(void *))func, NULL, NULL));
}
+#endif
/*
* Call all handlers registered with __cxa_atexit() for the shared
diff --git a/libc/string/memmove.c b/libc/string/memmove.c
index 072104b6c..7c1e9b2fd 100644
--- a/libc/string/memmove.c
+++ b/libc/string/memmove.c
@@ -32,10 +32,10 @@ void *memmove(void *dst, const void *src, size_t n)
{
const char *p = src;
char *q = dst;
- /* We can use the optimized memcpy if the destination is below the
- * source (i.e. q < p), or if it is completely over it (i.e. q >= p+n).
+ /* We can use the optimized memcpy if the destination is completely below the
+ * source (i.e. q+n <= p), or if it is completely over it (i.e. q >= p+n).
*/
- if (__builtin_expect((q < p) || ((size_t)(q - p) >= n), 1)) {
+ if (__builtin_expect((q + n < p) || (q >= p + n), 1)) {
return memcpy(dst, src, n);
} else {
bcopy(src, dst, n);
diff --git a/libc/tools/checksyscalls.py b/libc/tools/checksyscalls.py
index f6182f368..af2360a35 100755
--- a/libc/tools/checksyscalls.py
+++ b/libc/tools/checksyscalls.py
@@ -87,7 +87,11 @@ def process_nr_line(line,dict):
m = re_arm_nr_line.match(line)
if m:
#print "%s = %s" % (m.group(1), m.group(2))
- dict["ARM_"+m.group(1)] = int(m.group(2)) + 0x0f0000
+ try:
+ dict["ARM_"+m.group(1)] = int(m.group(2)) + 0x0f0000
+ except:
+ print "EXCEPTION: %s = %s" % (m.group(1), m.group(2))
+ pass
return
m = re_x86_line.match(line)
@@ -121,7 +125,7 @@ if linux_root[-1] == '/':
if len(linux_root) > 8 and linux_root[-8:] == '/include':
linux_root = linux_root[:-8]
-arm_unistd = linux_root + "/include/asm-arm/unistd.h"
+arm_unistd = linux_root + "/arch/arm/include/asm/unistd.h"
if not os.path.exists(arm_unistd):
print "WEIRD: could not locate the ARM unistd.h header file"
print "tried searching in '%s'" % arm_unistd
@@ -132,17 +136,17 @@ if not os.path.exists(arm_unistd):
# with two distinct unistd_32.h and unistd_64.h definition files.
# take care of this here
#
-x86_unistd = linux_root + "/include/asm-i386/unistd_32.h"
+x86_unistd = linux_root + "/arch/x86/include/asm/unistd_32.h"
if not os.path.exists(x86_unistd):
x86_unistd1 = x86_unistd
- x86_unistd = linux_root + "/include/asm-x86/unistd.h"
+ x86_unistd = linux_root + "/arch/x86/include/asm/unistd.h"
if not os.path.exists(x86_unistd):
print "WEIRD: could not locate the i386/x86 unistd.h header file"
print "tried searching in '%s' and '%s'" % (x86_unistd1, x86_unistd)
print "maybe using a different set of kernel headers might help"
sys.exit(1)
-process_header( linux_root+"/include/asm-arm/unistd.h", arm_dict )
+process_header( linux_root+"/arch/arm/include/asm/unistd.h", arm_dict )
process_header( x86_unistd, x86_dict )
# now perform the comparison
diff --git a/libc/tzcode/localtime.c b/libc/tzcode/localtime.c
index 85a913ea6..34892a62d 100644
--- a/libc/tzcode/localtime.c
+++ b/libc/tzcode/localtime.c
@@ -500,6 +500,7 @@ register const int doextend;
if (lseek(fid, off, SEEK_SET) < 0) {
XLOG(( "tzload: could not seek to %d in '%s'\n", off, DATAFILE ));
+ close(fid);
return -1;
}
}
diff --git a/libc/unistd/exec.c b/libc/unistd/exec.c
index cbb98b366..89396ac31 100644
--- a/libc/unistd/exec.c
+++ b/libc/unistd/exec.c
@@ -194,9 +194,9 @@ execvp(const char *name, char * const *argv)
(void)writev(STDERR_FILENO, iov, 3);
continue;
}
- bcopy(p, buf, lp);
+ memcpy(buf, p, lp);
buf[lp] = '/';
- bcopy(name, buf + lp + 1, ln);
+ memcpy(buf + lp + 1, name, ln);
buf[lp + ln + 1] = '\0';
retry: (void)execve(bp, argv, environ);
@@ -216,7 +216,7 @@ retry: (void)execve(bp, argv, environ);
goto done;
memp[0] = "sh";
memp[1] = bp;
- bcopy(argv + 1, memp + 2, cnt * sizeof(char *));
+ memcpy(memp + 2, argv + 1, cnt * sizeof(char *));
(void)execve(_PATH_BSHELL, memp, environ);
goto done;
case ENOMEM:
diff --git a/libc/unistd/open.c b/libc/unistd/open.c
index e8b1c8975..03cba4520 100644
--- a/libc/unistd/open.c
+++ b/libc/unistd/open.c
@@ -35,9 +35,7 @@ int open(const char *pathname, int flags, ...)
{
mode_t mode = 0;
-#if !defined(__i386__)
flags |= O_LARGEFILE;
-#endif
if (flags & O_CREAT)
{
diff --git a/libc/unistd/openat.c b/libc/unistd/openat.c
index 88b39a411..6b7b36738 100644
--- a/libc/unistd/openat.c
+++ b/libc/unistd/openat.c
@@ -35,9 +35,7 @@ int openat(int fd, const char *pathname, int flags, ...)
{
mode_t mode = 0;
-#if !defined(__i386__)
flags |= O_LARGEFILE;
-#endif
if (flags & O_CREAT)
{
diff --git a/libc/unistd/opendir.c b/libc/unistd/opendir.c
index afa3ea0ee..3aa96c97a 100644
--- a/libc/unistd/opendir.c
+++ b/libc/unistd/opendir.c
@@ -238,6 +238,7 @@ int scandir(const char *dir, struct dirent ***namelist,
de_list = (struct dirent **)
malloc(sizeof(struct dirent *)*de_list_size);
if (de_list == NULL) {
+ closedir(d);
return -1;
}
}
@@ -248,7 +249,12 @@ int scandir(const char *dir, struct dirent ***namelist,
de_list_new = (struct dirent **)
realloc(de_list, sizeof(struct dirent *)*de_list_size);
if (de_list_new == NULL) {
+ int i = 0;
+ for (;i < n_elem; i++) {
+ free(de_list[i]);
+ }
free(de_list);
+ closedir(d);
return -1;
}
de_list = de_list_new;
diff --git a/libc/unistd/sigsetmask.c b/libc/unistd/sigsetmask.c
index b98759577..4f4645865 100644
--- a/libc/unistd/sigsetmask.c
+++ b/libc/unistd/sigsetmask.c
@@ -38,6 +38,8 @@ sigsetmask(int mask)
sigset_t the_sigset;
} in, out;
+ in.the_mask = mask;
+
n = sigprocmask(SIG_SETMASK, &in.the_sigset, &out.the_sigset);
if (n)
return n;
diff --git a/libc/zoneinfo/zoneinfo.dat b/libc/zoneinfo/zoneinfo.dat
index 27ca5d0c6..49b99ecd3 100644
--- a/libc/zoneinfo/zoneinfo.dat
+++ b/libc/zoneinfo/zoneinfo.dat
Binary files differ
diff --git a/libc/zoneinfo/zoneinfo.idx b/libc/zoneinfo/zoneinfo.idx
index 09bd15fab..c9eefc64f 100644
--- a/libc/zoneinfo/zoneinfo.idx
+++ b/libc/zoneinfo/zoneinfo.idx
Binary files differ
diff --git a/libc/zoneinfo/zoneinfo.version b/libc/zoneinfo/zoneinfo.version
index 76dcafb8e..c4f49224b 100644
--- a/libc/zoneinfo/zoneinfo.version
+++ b/libc/zoneinfo/zoneinfo.version
@@ -1 +1 @@
-2010k
+2011n
diff --git a/libm/i387/fenv.c b/libm/i387/fenv.c
index 2794faf81..aabe270d5 100644
--- a/libm/i387/fenv.c
+++ b/libm/i387/fenv.c
@@ -153,7 +153,8 @@ feholdexcept(fenv_t *envp)
int
feupdateenv(const fenv_t *envp)
{
- int mxcsr, status;
+ int mxcsr;
+ short status;
__fnstsw(&status);
if (__HAS_SSE())
diff --git a/libm/i387/fenv.h b/libm/i387/fenv.h
deleted file mode 100644
index b124366ac..000000000
--- a/libm/i387/fenv.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/*-
- * Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD: src/lib/msun/i387/fenv.h,v 1.4 2005/03/17 22:21:46 das Exp $
- */
-
-#ifndef _FENV_H_
-#define _FENV_H_
-
-#include <sys/cdefs.h>
-#include <sys/_types.h>
-
-/*
- * To preserve binary compatibility with FreeBSD 5.3, we pack the
- * mxcsr into some reserved fields, rather than changing sizeof(fenv_t).
- */
-typedef struct {
- __uint16_t __control;
- __uint16_t __mxcsr_hi;
- __uint16_t __status;
- __uint16_t __mxcsr_lo;
- __uint32_t __tag;
- char __other[16];
-} fenv_t;
-
-#define __get_mxcsr(env) (((env).__mxcsr_hi << 16) | \
- ((env).__mxcsr_lo))
-#define __set_mxcsr(env, x) do { \
- (env).__mxcsr_hi = (__uint32_t)(x) >> 16; \
- (env).__mxcsr_lo = (__uint16_t)(x); \
-} while (0)
-
-typedef __uint16_t fexcept_t;
-
-/* Exception flags */
-#define FE_INVALID 0x01
-#define FE_DENORMAL 0x02
-#define FE_DIVBYZERO 0x04
-#define FE_OVERFLOW 0x08
-#define FE_UNDERFLOW 0x10
-#define FE_INEXACT 0x20
-#define FE_ALL_EXCEPT (FE_DIVBYZERO | FE_DENORMAL | FE_INEXACT | \
- FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
-
-/* Rounding modes */
-#define FE_TONEAREST 0x0000
-#define FE_DOWNWARD 0x0400
-#define FE_UPWARD 0x0800
-#define FE_TOWARDZERO 0x0c00
-#define _ROUND_MASK (FE_TONEAREST | FE_DOWNWARD | \
- FE_UPWARD | FE_TOWARDZERO)
-
-/*
- * As compared to the x87 control word, the SSE unit's control word
- * has the rounding control bits offset by 3 and the exception mask
- * bits offset by 7.
- */
-#define _SSE_ROUND_SHIFT 3
-#define _SSE_EMASK_SHIFT 7
-
-/* After testing for SSE support once, we cache the result in __has_sse. */
-enum __sse_support { __SSE_YES, __SSE_NO, __SSE_UNK };
-extern enum __sse_support __has_sse;
-int __test_sse(void);
-#ifdef __SSE__
-#define __HAS_SSE() 1
-#else
-#define __HAS_SSE() (__has_sse == __SSE_YES || \
- (__has_sse == __SSE_UNK && __test_sse()))
-#endif
-
-__BEGIN_DECLS
-
-/* Default floating-point environment */
-extern const fenv_t __fe_dfl_env;
-#define FE_DFL_ENV (&__fe_dfl_env)
-
-#define __fldcw(__cw) __asm __volatile("fldcw %0" : : "m" (__cw))
-#define __fldenv(__env) __asm __volatile("fldenv %0" : : "m" (__env))
-#define __fnclex() __asm __volatile("fnclex")
-#define __fnstenv(__env) __asm __volatile("fnstenv %0" : "=m" (*(__env)))
-#define __fnstcw(__cw) __asm __volatile("fnstcw %0" : "=m" (*(__cw)))
-#define __fnstsw(__sw) __asm __volatile("fnstsw %0" : "=am" (*(__sw)))
-#define __fwait() __asm __volatile("fwait")
-#define __ldmxcsr(__csr) __asm __volatile("ldmxcsr %0" : : "m" (__csr))
-#define __stmxcsr(__csr) __asm __volatile("stmxcsr %0" : "=m" (*(__csr)))
-
-static __inline int
-feclearexcept(int __excepts)
-{
- fenv_t __env;
- int __mxcsr;
-
- if (__excepts == FE_ALL_EXCEPT) {
- __fnclex();
- } else {
- __fnstenv(&__env);
- __env.__status &= ~__excepts;
- __fldenv(__env);
- }
- if (__HAS_SSE()) {
- __stmxcsr(&__mxcsr);
- __mxcsr &= ~__excepts;
- __ldmxcsr(__mxcsr);
- }
- return (0);
-}
-
-static __inline int
-fegetexceptflag(fexcept_t *__flagp, int __excepts)
-{
- int __mxcsr, __status;
-
- __fnstsw(&__status);
- if (__HAS_SSE())
- __stmxcsr(&__mxcsr);
- else
- __mxcsr = 0;
- *__flagp = (__mxcsr | __status) & __excepts;
- return (0);
-}
-
-int fesetexceptflag(const fexcept_t *__flagp, int __excepts);
-int feraiseexcept(int __excepts);
-
-static __inline int
-fetestexcept(int __excepts)
-{
- int __mxcsr, __status;
-
- __fnstsw(&__status);
- if (__HAS_SSE())
- __stmxcsr(&__mxcsr);
- else
- __mxcsr = 0;
- return ((__status | __mxcsr) & __excepts);
-}
-
-static __inline int
-fegetround(void)
-{
- int __control;
-
- /*
- * We assume that the x87 and the SSE unit agree on the
- * rounding mode. Reading the control word on the x87 turns
- * out to be about 5 times faster than reading it on the SSE
- * unit on an Opteron 244.
- */
- __fnstcw(&__control);
- return (__control & _ROUND_MASK);
-}
-
-static __inline int
-fesetround(int __round)
-{
- int __mxcsr, __control;
-
- if (__round & ~_ROUND_MASK)
- return (-1);
-
- __fnstcw(&__control);
- __control &= ~_ROUND_MASK;
- __control |= __round;
- __fldcw(__control);
-
- if (__HAS_SSE()) {
- __stmxcsr(&__mxcsr);
- __mxcsr &= ~(_ROUND_MASK << _SSE_ROUND_SHIFT);
- __mxcsr |= __round << _SSE_ROUND_SHIFT;
- __ldmxcsr(__mxcsr);
- }
-
- return (0);
-}
-
-int fegetenv(fenv_t *__envp);
-int feholdexcept(fenv_t *__envp);
-
-static __inline int
-fesetenv(const fenv_t *__envp)
-{
- fenv_t __env = *__envp;
- int __mxcsr;
-
- __mxcsr = __get_mxcsr(__env);
- __set_mxcsr(__env, 0xffffffff);
- __fldenv(__env);
- if (__HAS_SSE())
- __ldmxcsr(__mxcsr);
- return (0);
-}
-
-int feupdateenv(const fenv_t *__envp);
-
-#if __BSD_VISIBLE
-
-int feenableexcept(int __mask);
-int fedisableexcept(int __mask);
-
-static __inline int
-fegetexcept(void)
-{
- int __control;
-
- /*
- * We assume that the masks for the x87 and the SSE unit are
- * the same.
- */
- __fnstcw(&__control);
- return (~__control & FE_ALL_EXCEPT);
-}
-
-#endif /* __BSD_VISIBLE */
-
-__END_DECLS
-
-#endif /* !_FENV_H_ */
diff --git a/libm/include/i387/fenv.h b/libm/include/i387/fenv.h
index b124366ac..4281f10eb 100644
--- a/libm/include/i387/fenv.h
+++ b/libm/include/i387/fenv.h
@@ -102,7 +102,7 @@ extern const fenv_t __fe_dfl_env;
#define __fnclex() __asm __volatile("fnclex")
#define __fnstenv(__env) __asm __volatile("fnstenv %0" : "=m" (*(__env)))
#define __fnstcw(__cw) __asm __volatile("fnstcw %0" : "=m" (*(__cw)))
-#define __fnstsw(__sw) __asm __volatile("fnstsw %0" : "=am" (*(__sw)))
+#define __fnstsw(__sw) __asm __volatile("fnstsw %0" : "=a" (*(__sw)))
#define __fwait() __asm __volatile("fwait")
#define __ldmxcsr(__csr) __asm __volatile("ldmxcsr %0" : : "m" (__csr))
#define __stmxcsr(__csr) __asm __volatile("stmxcsr %0" : "=m" (*(__csr)))
@@ -148,7 +148,8 @@ int feraiseexcept(int __excepts);
static __inline int
fetestexcept(int __excepts)
{
- int __mxcsr, __status;
+ int __mxcsr;
+ short __status;
__fnstsw(&__status);
if (__HAS_SSE())
diff --git a/libm/src/s_logb.c b/libm/src/s_logb.c
index 30edb8749..57f91c44d 100644
--- a/libm/src/s_logb.c
+++ b/libm/src/s_logb.c
@@ -36,9 +36,9 @@ logb(double x)
if(ix>=0x7ff00000) return x*x;
if(ix<0x00100000) {
x *= two54; /* convert subnormal x to normal */
- GET_FLOAT_WORD(ix,x);
+ GET_HIGH_WORD(ix,x);
ix &= 0x7fffffff;
- return (float) ((ix>>20)-1023-54);
+ return (double) ((ix>>20)-1023-54);
} else
return (double) ((ix>>20)-1023);
}
diff --git a/libm/src/s_remquo.c b/libm/src/s_remquo.c
index eee65df4f..37d55e8f1 100644
--- a/libm/src/s_remquo.c
+++ b/libm/src/s_remquo.c
@@ -5,7 +5,7 @@
*
* Developed at SunSoft, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
- * software is freely granted, provided that this notice
+ * software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
@@ -29,124 +29,123 @@ static const double Zero[] = {0.0, -0.0,};
double
remquo(double x, double y, int *quo)
{
- int32_t n,hx,hy,hz,ix,iy,sx,i;
- u_int32_t lx,ly,lz,q,sxy;
+ int32_t n,hx,hy,hz,ix,iy,sx,i;
+ u_int32_t lx,ly,lz,q,sxy;
- EXTRACT_WORDS(hx,lx,x);
- EXTRACT_WORDS(hy,ly,y);
- sxy = (hx ^ hy) & 0x80000000;
- sx = hx&0x80000000; /* sign of x */
- hx ^=sx; /* |x| */
- hy &= 0x7fffffff; /* |y| */
+ EXTRACT_WORDS(hx,lx,x);
+ EXTRACT_WORDS(hy,ly,y);
+ sxy = (hx ^ hy) & 0x80000000;
+ sx = hx&0x80000000; /* sign of x */
+ hx ^=sx; /* |x| */
+ hy &= 0x7fffffff; /* |y| */
/* purge off exception values */
- if((hy|ly)==0||(hx>=0x7ff00000)|| /* y=0,or x not finite */
- ((hy|((ly|-ly)>>31))>0x7ff00000)) /* or y is NaN */
- return (x*y)/(x*y);
- if(hx<=hy) {
- if((hx<hy)||(lx<ly)) {
- q = 0;
- goto fixup; /* |x|<|y| return x or x-y */
- }
- if(lx==ly) {
- *quo = 1;
- return Zero[(u_int32_t)sx>>31]; /* |x|=|y| return x*0*/
- }
- }
+ if((hy|ly)==0||(hx>=0x7ff00000)|| /* y=0,or x not finite */
+ ((hy|((ly|-ly)>>31))>0x7ff00000)) /* or y is NaN */
+ return (x*y)/(x*y);
+ if(hx<=hy) {
+ if((hx<hy)||(lx<ly)) {
+ q = 0;
+ goto fixup; /* |x|<|y| return x or x-y */
+ }
+ if(lx==ly) {
+ *quo = (sxy ? -1 : 1);
+ return Zero[(u_int32_t)sx>>31]; /* |x|=|y| return x*0*/
+ }
+ }
/* determine ix = ilogb(x) */
- if(hx<0x00100000) { /* subnormal x */
- if(hx==0) {
- for (ix = -1043, i=lx; i>0; i<<=1) ix -=1;
- } else {
- for (ix = -1022,i=(hx<<11); i>0; i<<=1) ix -=1;
- }
- } else ix = (hx>>20)-1023;
+ if(hx<0x00100000) { /* subnormal x */
+ if(hx==0) {
+ for (ix = -1043, i=lx; i>0; i<<=1) ix -=1;
+ } else {
+ for (ix = -1022,i=(hx<<11); i>0; i<<=1) ix -=1;
+ }
+ } else ix = (hx>>20)-1023;
/* determine iy = ilogb(y) */
- if(hy<0x00100000) { /* subnormal y */
- if(hy==0) {
- for (iy = -1043, i=ly; i>0; i<<=1) iy -=1;
- } else {
- for (iy = -1022,i=(hy<<11); i>0; i<<=1) iy -=1;
- }
- } else iy = (hy>>20)-1023;
+ if(hy<0x00100000) { /* subnormal y */
+ if(hy==0) {
+ for (iy = -1043, i=ly; i>0; i<<=1) iy -=1;
+ } else {
+ for (iy = -1022,i=(hy<<11); i>0; i<<=1) iy -=1;
+ }
+ } else iy = (hy>>20)-1023;
/* set up {hx,lx}, {hy,ly} and align y to x */
- if(ix >= -1022)
- hx = 0x00100000|(0x000fffff&hx);
- else { /* subnormal x, shift x to normal */
- n = -1022-ix;
- if(n<=31) {
- hx = (hx<<n)|(lx>>(32-n));
- lx <<= n;
- } else {
- hx = lx<<(n-32);
- lx = 0;
- }
- }
- if(iy >= -1022)
- hy = 0x00100000|(0x000fffff&hy);
- else { /* subnormal y, shift y to normal */
- n = -1022-iy;
- if(n<=31) {
- hy = (hy<<n)|(ly>>(32-n));
- ly <<= n;
- } else {
- hy = ly<<(n-32);
- ly = 0;
- }
- }
-
+ if(ix >= -1022)
+ hx = 0x00100000|(0x000fffff&hx);
+ else { /* subnormal x, shift x to normal */
+ n = -1022-ix;
+ if(n<=31) {
+ hx = (hx<<n)|(lx>>(32-n));
+ lx <<= n;
+ } else {
+ hx = lx<<(n-32);
+ lx = 0;
+ }
+ }
+ if(iy >= -1022)
+ hy = 0x00100000|(0x000fffff&hy);
+ else { /* subnormal y, shift y to normal */
+ n = -1022-iy;
+ if(n<=31) {
+ hy = (hy<<n)|(ly>>(32-n));
+ ly <<= n;
+ } else {
+ hy = ly<<(n-32);
+ ly = 0;
+ }
+ }
/* fix point fmod */
- n = ix - iy;
- q = 0;
- while(n--) {
- hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
- if(hz<0){hx = hx+hx+(lx>>31); lx = lx+lx;}
- else {hx = hz+hz+(lz>>31); lx = lz+lz; q++;}
- q <<= 1;
- }
- hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
- if(hz>=0) {hx=hz;lx=lz;q++;}
-
+ n = ix - iy;
+ q = 0;
+ while(n--) {
+ hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
+ if(hz<0){hx = hx+hx+(lx>>31); lx = lx+lx;}
+ else {hx = hz+hz+(lz>>31); lx = lz+lz; q++;}
+ q <<= 1;
+ }
+ hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
+ if(hz>=0) {hx=hz;lx=lz;q++;}
/* convert back to floating value and restore the sign */
- if((hx|lx)==0) { /* return sign(x)*0 */
- *quo = (sxy ? -q : q);
- return Zero[(u_int32_t)sx>>31];
- }
- while(hx<0x00100000) { /* normalize x */
- hx = hx+hx+(lx>>31); lx = lx+lx;
- iy -= 1;
- }
- if(iy>= -1022) { /* normalize output */
- hx = ((hx-0x00100000)|((iy+1023)<<20));
- } else { /* subnormal output */
- n = -1022 - iy;
- if(n<=20) {
- lx = (lx>>n)|((u_int32_t)hx<<(32-n));
- hx >>= n;
- } else if (n<=31) {
- lx = (hx<<(32-n))|(lx>>n); hx = sx;
- } else {
- lx = hx>>(n-32); hx = sx;
- }
- }
+ if((hx|lx)==0) { /* return sign(x)*0 */
+ q &= 0x7fffffff;
+ *quo = (sxy ? -q : q);
+ return Zero[(u_int32_t)sx>>31];
+ }
+ while(hx<0x00100000) { /* normalize x */
+ hx = hx+hx+(lx>>31); lx = lx+lx;
+ iy -= 1;
+ }
+ if(iy>= -1022) { /* normalize output */
+ hx = ((hx-0x00100000)|((iy+1023)<<20));
+ } else { /* subnormal output */
+ n = -1022 - iy;
+ if(n<=20) {
+ lx = (lx>>n)|((u_int32_t)hx<<(32-n));
+ hx >>= n;
+ } else if (n<=31) {
+ lx = (hx<<(32-n))|(lx>>n); hx = 0;
+ } else {
+ lx = hx>>(n-32); hx = 0;
+ }
+ }
fixup:
- INSERT_WORDS(x,hx,lx);
- y = fabs(y);
- if (y < 0x1p-1021) {
- if (x+x>y || (x+x==y && (q & 1))) {
- q++;
- x-=y;
- }
- } else if (x>0.5*y || (x==0.5*y && (q & 1))) {
- q++;
- x-=y;
- }
- GET_HIGH_WORD(hx,x);
- SET_HIGH_WORD(x,hx^sx);
- q &= 0x7fffffff;
- *quo = (sxy ? -q : q);
- return x;
+ INSERT_WORDS(x,hx,lx);
+ y = fabs(y);
+ if (y < 0x1p-1021) {
+ if (x+x>y || (x+x==y && (q & 1))) {
+ q++;
+ x-=y;
+ }
+ } else if (x>0.5*y || (x==0.5*y && (q & 1))) {
+ q++;
+ x-=y;
+ }
+ GET_HIGH_WORD(hx,x);
+ SET_HIGH_WORD(x,hx^sx);
+ q &= 0x7fffffff;
+ *quo = (sxy ? -q : q);
+ return x;
}
diff --git a/libm/src/s_remquof.c b/libm/src/s_remquof.c
index 5d722ceaf..36269a622 100644
--- a/libm/src/s_remquof.c
+++ b/libm/src/s_remquof.c
@@ -5,7 +5,7 @@
*
* Developed at SunSoft, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
- * software is freely granted, provided that this notice
+ * software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
@@ -29,93 +29,94 @@ static const float Zero[] = {0.0, -0.0,};
float
remquof(float x, float y, int *quo)
{
- int32_t n,hx,hy,hz,ix,iy,sx,i;
- u_int32_t q,sxy;
+ int32_t n,hx,hy,hz,ix,iy,sx,i;
+ u_int32_t q,sxy;
- GET_FLOAT_WORD(hx,x);
- GET_FLOAT_WORD(hy,y);
- sxy = (hx ^ hy) & 0x80000000;
- sx = hx&0x80000000; /* sign of x */
- hx ^=sx; /* |x| */
- hy &= 0x7fffffff; /* |y| */
+ GET_FLOAT_WORD(hx,x);
+ GET_FLOAT_WORD(hy,y);
+ sxy = (hx ^ hy) & 0x80000000;
+ sx = hx&0x80000000; /* sign of x */
+ hx ^=sx; /* |x| */
+ hy &= 0x7fffffff; /* |y| */
/* purge off exception values */
- if(hy==0||hx>=0x7f800000||hy>0x7f800000) /* y=0,NaN;or x not finite */
- return (x*y)/(x*y);
- if(hx<hy) {
- q = 0;
- goto fixup; /* |x|<|y| return x or x-y */
- } else if(hx==hy) {
- *quo = 1;
- return Zero[(u_int32_t)sx>>31]; /* |x|=|y| return x*0*/
- }
+ if(hy==0||hx>=0x7f800000||hy>0x7f800000) /* y=0,NaN;or x not finite */
+ return (x*y)/(x*y);
+ if(hx<hy) {
+ q = 0;
+ goto fixup; /* |x|<|y| return x or x-y */
+ } else if(hx==hy) {
+ *quo = (sxy ? -1 : 1);
+ return Zero[(u_int32_t)sx>>31]; /* |x|=|y| return x*0*/
+ }
/* determine ix = ilogb(x) */
- if(hx<0x00800000) { /* subnormal x */
- for (ix = -126,i=(hx<<8); i>0; i<<=1) ix -=1;
- } else ix = (hx>>23)-127;
+ if(hx<0x00800000) { /* subnormal x */
+ for (ix = -126,i=(hx<<8); i>0; i<<=1) ix -=1;
+ } else ix = (hx>>23)-127;
/* determine iy = ilogb(y) */
- if(hy<0x00800000) { /* subnormal y */
- for (iy = -126,i=(hy<<8); i>0; i<<=1) iy -=1;
- } else iy = (hy>>23)-127;
+ if(hy<0x00800000) { /* subnormal y */
+ for (iy = -126,i=(hy<<8); i>0; i<<=1) iy -=1;
+ } else iy = (hy>>23)-127;
/* set up {hx,lx}, {hy,ly} and align y to x */
- if(ix >= -126)
- hx = 0x00800000|(0x007fffff&hx);
- else { /* subnormal x, shift x to normal */
- n = -126-ix;
- hx <<= n;
- }
- if(iy >= -126)
- hy = 0x00800000|(0x007fffff&hy);
- else { /* subnormal y, shift y to normal */
- n = -126-iy;
- hy <<= n;
- }
+ if(ix >= -126)
+ hx = 0x00800000|(0x007fffff&hx);
+ else { /* subnormal x, shift x to normal */
+ n = -126-ix;
+ hx <<= n;
+ }
+ if(iy >= -126)
+ hy = 0x00800000|(0x007fffff&hy);
+ else { /* subnormal y, shift y to normal */
+ n = -126-iy;
+ hy <<= n;
+ }
/* fix point fmod */
- n = ix - iy;
- q = 0;
- while(n--) {
- hz=hx-hy;
- if(hz<0) hx = hx << 1;
- else {hx = hz << 1; q++;}
- q <<= 1;
- }
- hz=hx-hy;
- if(hz>=0) {hx=hz;q++;}
+ n = ix - iy;
+ q = 0;
+ while(n--) {
+ hz=hx-hy;
+ if(hz<0) hx = hx << 1;
+ else {hx = hz << 1; q++;}
+ q <<= 1;
+ }
+ hz=hx-hy;
+ if(hz>=0) {hx=hz;q++;}
/* convert back to floating value and restore the sign */
- if(hx==0) { /* return sign(x)*0 */
- *quo = (sxy ? -q : q);
- return Zero[(u_int32_t)sx>>31];
- }
- while(hx<0x00800000) { /* normalize x */
- hx <<= 1;
- iy -= 1;
- }
- if(iy>= -126) { /* normalize output */
- hx = ((hx-0x00800000)|((iy+127)<<23));
- } else { /* subnormal output */
- n = -126 - iy;
- hx >>= n;
- }
+ if(hx==0) { /* return sign(x)*0 */
+ q &= 0x7fffffff;
+ *quo = (sxy ? -q : q);
+ return Zero[(u_int32_t)sx>>31];
+ }
+ while(hx<0x00800000) { /* normalize x */
+ hx <<= 1;
+ iy -= 1;
+ }
+ if(iy>= -126) { /* normalize output */
+ hx = ((hx-0x00800000)|((iy+127)<<23));
+ } else { /* subnormal output */
+ n = -126 - iy;
+ hx >>= n;
+ }
fixup:
- SET_FLOAT_WORD(x,hx);
- y = fabsf(y);
- if (y < 0x1p-125f) {
- if (x+x>y || (x+x==y && (q & 1))) {
- q++;
- x-=y;
- }
- } else if (x>0.5f*y || (x==0.5f*y && (q & 1))) {
- q++;
- x-=y;
- }
- GET_FLOAT_WORD(hx,x);
- SET_FLOAT_WORD(x,hx^sx);
- q &= 0x7fffffff;
- *quo = (sxy ? -q : q);
- return x;
+ SET_FLOAT_WORD(x,hx);
+ y = fabsf(y);
+ if (y < 0x1p-125f) {
+ if (x+x>y || (x+x==y && (q & 1))) {
+ q++;
+ x-=y;
+ }
+ } else if (x>0.5f*y || (x==0.5f*y && (q & 1))) {
+ q++;
+ x-=y;
+ }
+ GET_FLOAT_WORD(hx,x);
+ SET_FLOAT_WORD(x,hx^sx);
+ q &= 0x7fffffff;
+ *quo = (sxy ? -q : q);
+ return x;
}
diff --git a/libthread_db/Android.mk b/libthread_db/Android.mk
index 922b9cf1b..af506edd9 100644
--- a/libthread_db/Android.mk
+++ b/libthread_db/Android.mk
@@ -22,12 +22,7 @@ include $(CLEAR_VARS)
LOCAL_WHOLE_STATIC_LIBRARIES := libthread_db
LOCAL_MODULE:=libthread_db
LOCAL_SHARED_LIBRARIES := libdl libc
-
-# NOTE: Using --no-undefined results in a missing symbol that is defined inside
-# gdbserver and is resolved at runtime. Since there is no library containing
-# this symbol that we can link against, set LOCAL_ALLOW_UNDEFINED_SYMBOLS so
-# that --no-undefined is removed from the linker flags.
-LOCAL_ALLOW_UNDEFINED_SYMBOLS := true
+LOCAL_ALLOW_UNDEFINED_SYMBOLS := false
LOCAL_SYSTEM_SHARED_LIBRARIES :=
include $(BUILD_SHARED_LIBRARY)
diff --git a/libthread_db/include/sys/procfs.h b/libthread_db/include/sys/procfs.h
new file mode 100644
index 000000000..0ae7a0bd9
--- /dev/null
+++ b/libthread_db/include/sys/procfs.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ */
+
+#ifndef _SYS_PROCFS_H
+#define _SYS_PROCFS_H
+
+#include <sys/types.h>
+
+
+#ifdef __cplusplus
+extern "C"{
+#endif
+
+typedef pid_t lwpid_t;
+typedef void *psaddr_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libthread_db/include/thread_db.h b/libthread_db/include/thread_db.h
index 1b36cb2b4..1ed8ffca8 100644
--- a/libthread_db/include/thread_db.h
+++ b/libthread_db/include/thread_db.h
@@ -9,9 +9,7 @@
#include <signal.h>
#include <stdint.h>
#include <sys/types.h>
-
-typedef void *psaddr_t;
-typedef pid_t lwpid_t;
+#include <sys/procfs.h>
#define TD_THR_ANY_USER_FLAGS 0xffffffff
#define TD_THR_LOWEST_PRIORITY -20
@@ -151,6 +149,10 @@ extern td_err_e td_thr_event_enable(td_thrhandle_t const * handle,
extern td_err_e td_ta_thr_iter(td_thragent_t const * agent, td_thr_iter_f * func, void * cookie,
td_thr_state_e state, int32_t prio, sigset_t * sigmask, uint32_t user_flags);
+extern td_err_e td_thr_event_enable(td_thrhandle_t const * handle, td_event_e event);
+
+extern td_err_e td_thr_get_info(td_thrhandle_t const * handle, td_thrinfo_t * info);
+
extern char const ** td_symbol_list(void);
extern td_err_e td_thr_tls_get_addr(const td_thrhandle_t * th,
diff --git a/libthread_db/libthread_db.c b/libthread_db/libthread_db.c
index 2cf4d3856..86e1cf443 100644
--- a/libthread_db/libthread_db.c
+++ b/libthread_db/libthread_db.c
@@ -81,6 +81,25 @@ _event_getmsg_helper(td_thrhandle_t const * handle, void * bkpt_addr)
{
void * pc;
+#ifdef __i386__
+ /* Get the eip from offset 12*4 = 48 as defined in the struct
+ * user_regs_struct in user_32.h
+ */
+ pc = (void *)ptrace(PTRACE_PEEKUSR, handle->tid, (void *)48 /* eip */, NULL);
+ /* FIXME - pc is a non-decremented breakpoint address, hence the
+ * addition of 1 on test. This seems to work for the thread hook
+ * function in libc.so but should be properly fixed.
+ */
+ if (pc == ((int)bkpt_addr + 1)) {
+ /* The hook function takes the id of the new thread as it's first
+ * param, so grab it from ecx at offset 4 in struct user_regs_struct
+ * (using fastcall convention for x86)
+ */
+ gEventMsgHandle.pid = ptrace(PTRACE_PEEKUSR, handle->tid, (void *)4 /* ecx */, NULL);
+ gEventMsgHandle.tid = gEventMsgHandle.pid;
+ return 0x42;
+ }
+#else
pc = (void *)ptrace(PTRACE_PEEKUSR, handle->tid, (void *)60 /* r15/pc */, NULL);
if (pc == bkpt_addr) {
@@ -90,6 +109,7 @@ _event_getmsg_helper(td_thrhandle_t const * handle, void * bkpt_addr)
gEventMsgHandle.tid = gEventMsgHandle.pid;
return 0x42;
}
+#endif
return 0;
}
@@ -156,7 +176,7 @@ td_ta_event_addr(td_thragent_t const * agent, td_event_e event, td_notify_t * no
{
int32_t err;
- /*
+ /*
* This is nasty, ps_pglobal_lookup is implemented in gdbserver and looks up
* the symbol from it's cache, which is populated at start time with the
* symbols returned from td_symbol_list via calls back to the host.
diff --git a/linker/Android.mk b/linker/Android.mk
index 5ab48a09c..0cbaf366a 100644
--- a/linker/Android.mk
+++ b/linker/Android.mk
@@ -62,6 +62,7 @@ LOCAL_CFLAGS += -DANDROID_ARM_LINKER
else
ifeq ($(TARGET_ARCH),x86)
LOCAL_CFLAGS += -DANDROID_X86_LINKER
+ LOCAL_CFLAGS += -I$(LOCAL_PATH)/../libc/arch-x86/bionic
else
ifeq ($(TARGET_ARCH),sh)
LOCAL_CFLAGS += -DANDROID_SH_LINKER
diff --git a/linker/arch/x86/begin.S b/linker/arch/x86/begin.S
index d8a39ca32..5be59cb15 100644
--- a/linker/arch/x86/begin.S
+++ b/linker/arch/x86/begin.S
@@ -44,9 +44,5 @@ _start:
popl %esp
jmp *%eax
-.section .ctors, "wa"
-.globl __CTOR_LIST__
-
-__CTOR_LIST__:
- .long -1
+#include "__stack_chk_fail_local.S"
diff --git a/linker/dlfcn.c b/linker/dlfcn.c
index a36b42ccf..429c588f6 100644
--- a/linker/dlfcn.c
+++ b/linker/dlfcn.c
@@ -42,7 +42,7 @@ static const char *dl_errors[] = {
#define likely(expr) __builtin_expect (expr, 1)
#define unlikely(expr) __builtin_expect (expr, 0)
-static pthread_mutex_t dl_lock = PTHREAD_MUTEX_INITIALIZER;
+pthread_mutex_t dl_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
static void set_dlerror(int err)
{
diff --git a/linker/linker.c b/linker/linker.c
index bb31703bf..6a80ce7da 100644
--- a/linker/linker.c
+++ b/linker/linker.c
@@ -438,9 +438,16 @@ static unsigned elfhash(const char *_name)
while(*name) {
h = (h << 4) + *name++;
g = h & 0xf0000000;
- h ^= g;
+ /* The hash algorithm in the ELF ABI is as follows:
+ * if (g != 0)
+ * h ^=g >> 24;
+ * h &= ~g;
+ * But we can use the equivalent and faster implementation:
+ */
h ^= g >> 24;
}
+ /* Lift the operation out of the inner loop */
+ h &= 0x0fffffff;
return h;
}