This is the mail archive of the newlib@sourceware.org mailing list for the newlib project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH, ARM] memchr for ARM


Please find below an ARM optimised memchr; it needs
Thumb2 and at least ARMv6T2. In the case of not having thumb or being on
an earlier architecture it just includes the generic .c version.

It's been tested on arm-sim target; and the same assembly
has been tested on eglibc as well on real hardware under Linux.

Performance testing (on a 1GHz A9) shows it's faster
than the plain newlib code in most cases, and the xscale version
in almost all cases, see:

https://wiki.linaro.org/WorkingGroups/ToolChain/Benchmarks/InitialMemchr?action=AttachFile&do=view&target=sizes-memchr-08.png

('this' on the graph is the routine attached).

I wasn't sure whether the right thing was to include the Makefile.in
 diff, or just let you regenerate on release - in the end I included it.

Thanks in advance,

Dave

2011-10-10 Dr David Alan Gilbert <david.gilbert@linaro.org>
  * libc/machine/arm/Makefile.am (lib_a_SOURCES): Add memchr.c
  * libc/machine/arm/memchr.c: New file.
  * libc/machine/arm/Makefile.in: Manually add same change as Makefile.am

diff -urN src.orig/newlib/libc/machine/arm/Makefile.am src/newlib/libc/machine/arm/Makefile.am
--- src.orig/newlib/libc/machine/arm/Makefile.am	2009-01-22 00:02:35.000000000 +0000
+++ src/newlib/libc/machine/arm/Makefile.am	2011-10-06 15:48:55.000000000 +0100
@@ -8,7 +8,7 @@
 
 noinst_LIBRARIES = lib.a
 
-lib_a_SOURCES = setjmp.S access.c strlen.c strcmp.c strcpy.c
+lib_a_SOURCES = setjmp.S access.c strlen.c strcmp.c strcpy.c memchr.c
 lib_a_CCASFLAGS=$(AM_CCASFLAGS)
 lib_a_CFLAGS = $(AM_CFLAGS)
 
diff -urN src.orig/newlib/libc/machine/arm/Makefile.in src/newlib/libc/machine/arm/Makefile.in
--- src.orig/newlib/libc/machine/arm/Makefile.in	2010-12-16 21:58:43.000000000 +0000
+++ src/newlib/libc/machine/arm/Makefile.in	2011-10-06 15:57:12.000000000 +0100
@@ -54,7 +54,7 @@
 lib_a_LIBADD =
 am_lib_a_OBJECTS = lib_a-setjmp.$(OBJEXT) lib_a-access.$(OBJEXT) \
 	lib_a-strlen.$(OBJEXT) lib_a-strcmp.$(OBJEXT) \
-	lib_a-strcpy.$(OBJEXT)
+	lib_a-strcpy.$(OBJEXT) lib_a-memchr.$(OBJEXT)
 lib_a_OBJECTS = $(am_lib_a_OBJECTS)
 DEFAULT_INCLUDES = -I.@am__isrc@
 depcomp =
@@ -174,7 +175,7 @@
 INCLUDES = $(NEWLIB_CFLAGS) $(CROSS_CFLAGS) $(TARGET_CFLAGS)
 AM_CCASFLAGS = $(INCLUDES)
 noinst_LIBRARIES = lib.a
-lib_a_SOURCES = setjmp.S access.c strlen.c strcmp.c strcpy.c
+lib_a_SOURCES = setjmp.S access.c strlen.c strcmp.c strcpy.c memchr.c
 lib_a_CCASFLAGS = $(AM_CCASFLAGS)
 lib_a_CFLAGS = $(AM_CFLAGS)
 ACLOCAL_AMFLAGS = -I ../../.. -I ../../../..
@@ -273,6 +274,12 @@
 lib_a-strcpy.obj: strcpy.c
 	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-strcpy.obj `if test -f 'strcpy.c'; then $(CYGPATH_W) 'strcpy.c'; else $(CYGPATH_W) '$(srcdir)/strcpy.c'; fi`
 
+lib_a-memchr.o: memchr.c
+	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-memchr.o `test -f 'memchr.c' || echo '$(srcdir)/'`memchr.c
+
+lib_a-memchr.obj: memchr.c
+	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-memchr.obj `if test -f 'memchr.c'; then $(CYGPATH_W) 'memchr.c'; else $(CYGPATH_W) '$(srcdir)/memchr.c'; fi`
+
 ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
 	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
 	unique=`for i in $$list; do \
diff -urN src.orig/newlib/libc/machine/arm/memchr.c src/newlib/libc/machine/arm/memchr.c
--- src.orig/newlib/libc/machine/arm/memchr.c	1970-01-01 01:00:00.000000000 +0100
+++ src/newlib/libc/machine/arm/memchr.c	2011-10-10 17:01:00.000000000 +0100
@@ -0,0 +1,172 @@
+/* Copyright (c) 2010-2011, Linaro Limited
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+      * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+      * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+
+      * Neither the name of Linaro Limited nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   Written by Dave Gilbert <david.gilbert@linaro.org>
+
+   This memchr routine is optimised on a Cortex-A9 and should work on
+   all ARMv7 processors.   It has a fast past for short sizes, and has
+   an optimised path for large data sets; the worst case is finding the
+   match early in a large data set. */
+
+/* 2011-02-07 david.gilbert@linaro.org
+    Extracted from local git a5b438d861
+   2011-07-14 david.gilbert@linaro.org
+    Import endianness fix from local git ea786f1b
+   2011-10-06 david.gilbert@linaro.org
+    Import into Newlib from cortex-strings bzr rev 63 */
+
+#include "arm_asm.h"
+
+/* The code makes use of cbz, cbnz (in a way that's difficult to ifdef around
+   so it's thumb2 only, and uses uadd8/sel that are 6T2 (Thumb) and 7a */
+#if !defined( __thumb2__ ) || !(defined(_ISA_ARM_7) || \
+	(defined(__ARM_ARCH_6T2__)))
+  /* Fall back to generic C code */
+#include "../../string/memchr.c"
+#else
+
+
+#include <sys/types.h>
+
+/* this lets us check a flag in a 00/ff byte easily in either endianness */
+#ifdef __ARMEB__
+#define CHARTSTMASK(c) "1<<(31-(" #c "*8))"
+#else
+#define CHARTSTMASK(c) "1<<(" #c "*8)"
+#endif
+
+/* -------------------------------------------------------------------------- */
+void*
+__attribute__((naked)) memchr(const void* s, int c, size_t n)
+{
+  asm(
+	/* r0 = start of memory to scan
+	   r1 = character to look for
+	   r2 = length
+	   returns r0 = pointer to character or NULL if not found */
+	"and	r1,r1,#0xff\n\t"	/* Don't think we can trust the caller
+					   to actually pass a char */
+
+	"cmp	r2,#16\n\t"		/* If it's short don't bother with
+					   anything clever */
+	"blt	20f\n\t"
+
+	"tst	r0, #7\n\t"		/* already aligned? skip the next bit */
+	"beq	10f\n"
+
+	/* Work up to an aligned point */
+"5:\n\t"
+	"ldrb	r3, [r0],#1\n\t"
+	"subs	r2, r2, #1\n\t"
+	"cmp	r3, r1\n\t"
+	"beq	50f\n\t"		/* If it matches exit found */
+	"tst	r0, #7\n\t"
+	"cbz	r2, 40f\n\t"		/* If we run off the end, exit !found */
+	"bne	5b\n"			/* If not aligned yet, do next byte */
+	
+"10:\n\t"
+	/* At this point, we are aligned, we know we have at least 8 bytes
+	   to work with */
+	"push	{r4,r5,r6,r7}\n\t"
+	"orr	r1, r1, r1, lsl #8\n\t"	/* expand the match word to all bytes */
+	"orr	r1, r1, r1, lsl #16\n\t"
+	"bic	r4, r2, #7\n\t"		/* No of double words to work with */
+	"mvns	r7, #0\n\t"		/* all F's */
+	"movs	r3, #0\n"
+	
+"15:\n\t"
+	"ldmia	r0!,{r5,r6}\n\t"
+	"subs	r4, r4, #8\n\t"
+	"eor	r5,r5, r1\n\t"	/* Make r5,r6 00's where bytes match target */
+	"eor	r6,r6, r1\n\t"
+	"uadd8	r5, r5, r7\n\t"	/* Par add 0xff, sets GE bits for bytes!=0 */
+	"sel	r5, r3, r7\n\t"	/* bytes are 00 for none-00 bytes,
+				   or ff for 00 bytes - NOTE INVERSION */
+	"uadd8	r6, r6, r7\n\t"	/* Par add 0xff, sets GE bits for bytes!=0 */
+	"sel	r6, r5, r7\n\t"	/* chained....bytes are 00 for none-00 bytes,
+				   or ff for 00 bytes - NOTE INVERSION */
+	"cbnz	r6, 60f\n\t"
+	"bne	15b\n\t"	/* (Flags from the subs above) */
+
+	"pop	{r4,r5,r6,r7}\n\t"
+	"and	r1,r1,#0xff\n\t" /* r1 back to a single character from above */
+	"and	r2,r2,#7\n"	/* Leave the count remaining as the number
+				   after the double words have been done */
+ 
+"20:\n\t"
+	"cbz	r2, 40f\n\t"	/* 0 length or hit the end already -> !found */
+
+"21:\n\t"  /* Post aligned section, or just a short call */
+	"ldrb	r3,[r0],#1\n\t"
+	"subs	r2,r2,#1\n\t"
+	"eor	r3,r3,r1\n\t"	/* r3 = 0 if match */
+	"cbz	r3, 50f\n\t"
+	"bne	21b\n"		/* on r2 flags */
+
+"40:\n\t"
+	"movs	r0,#0\n\t"	/* not found */
+	"bx	lr\n"
+
+"50:\n\t"
+	"subs	r0,r0,#1\n\t"	/* found */
+	"bx	lr\n"
+
+"60:\n\t"
+	/* We're here because the fast path found a hit - now we have to track
+	   down exactly which word it was
+	   r0 points to the start of the double word after the one that
+		was tested
+	   r5 has the 00/ff pattern for the first word, r6 has the chained
+		value */
+	"cmp	r5, #0\n\t"
+	"itte	eq\n\t"
+	"moveq	r5, r6\n\t"	/* the end is in the 2nd word */
+	"subeq	r0,r0,#3\n\t"	/* Points to 2nd byte of 2nd word */
+	"subne	r0,r0,#7\n\t"	/* or 2nd byte of 1st word */
+
+	/* r0 currently points to the 3rd byte of the word containing the hit */
+	"tst	r5, #" CHARTSTMASK(0) "\n\t"	/* 1st character */
+	"bne	61f\n\t"
+	"adds	r0,r0,#1\n\t"
+	"tst	r5, #" CHARTSTMASK(1) "\n\t"	/* 2nd character */
+	"ittt	eq\n\t"
+	"addeq	r0,r0,#1\n\t"
+	"tsteq	r5, # (3<<15)\n\t"	/* 2nd & 3rd character */
+	/* If not the 3rd must be the last one */
+	"addeq	r0,r0,#1\n"
+
+"61:\n\t"
+	"pop	{r4,r5,r6,r7}\n\t"
+	"subs	r0,r0,#1\n\t"
+	"bx	lr\n"
+  );
+}
+#endif
+


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]