Check Prefer_ERMS in memmove/memcpy/mempcpy/memset

Although the Enhanced REP MOVSB/STOSB (ERMS) implementations of memmove,
memcpy, mempcpy and memset aren't used by the current processors, this
patch adds Prefer_ERMS check in memmove, memcpy, mempcpy and memset so
that they can be used in the future.

	* sysdeps/x86/cpu-features.h (bit_arch_Prefer_ERMS): New.
	(index_arch_Prefer_ERMS): Likewise.
	* sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Return
	__memcpy_erms for Prefer_ERMS.
	* sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
	(__memmove_erms): Enabled for libc.a.
	* ysdeps/x86_64/multiarch/memmove.S (__libc_memmove): Return
	__memmove_erms or Prefer_ERMS.
	* sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Return
	__mempcpy_erms for Prefer_ERMS.
	* sysdeps/x86_64/multiarch/memset.S (memset): Return
	__memset_erms for Prefer_ERMS.
This commit is contained in:
H.J. Lu 2016-06-30 07:57:07 -07:00
parent 73fb56a4d5
commit 13efa86ece
7 changed files with 35 additions and 1 deletions

View File

@ -1,3 +1,18 @@
2016-06-30 H.J. Lu <hongjiu.lu@intel.com>
* sysdeps/x86/cpu-features.h (bit_arch_Prefer_ERMS): New.
(index_arch_Prefer_ERMS): Likewise.
* sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Return
__memcpy_erms for Prefer_ERMS.
* sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
(__memmove_erms): Enabled for libc.a.
* ysdeps/x86_64/multiarch/memmove.S (__libc_memmove): Return
__memmove_erms or Prefer_ERMS.
* sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Return
__mempcpy_erms for Prefer_ERMS.
* sysdeps/x86_64/multiarch/memset.S (memset): Return
__memset_erms for Prefer_ERMS.
2016-06-30 Andreas Schwab <schwab@suse.de>
[BZ #20262]

View File

@ -36,6 +36,7 @@
#define bit_arch_Prefer_MAP_32BIT_EXEC (1 << 16)
#define bit_arch_Prefer_No_VZEROUPPER (1 << 17)
#define bit_arch_Fast_Unaligned_Copy (1 << 18)
#define bit_arch_Prefer_ERMS (1 << 19)
/* CPUID Feature flags. */
@ -105,6 +106,7 @@
# define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1*FEATURE_SIZE
# define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1*FEATURE_SIZE
# define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1*FEATURE_SIZE
# define index_arch_Prefer_ERMS FEATURE_INDEX_1*FEATURE_SIZE
# if defined (_LIBC) && !IS_IN (nonlib)
@ -274,6 +276,7 @@ extern const struct cpu_features *__get_cpu_features (void)
# define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1
# define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1
# define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1
# define index_arch_Prefer_ERMS FEATURE_INDEX_1
#endif /* !__ASSEMBLER__ */

View File

@ -29,6 +29,9 @@
ENTRY(__new_memcpy)
.type __new_memcpy, @gnu_indirect_function
LOAD_RTLD_GLOBAL_RO_RDX
lea __memcpy_erms(%rip), %RAX_LP
HAS_ARCH_FEATURE (Prefer_ERMS)
jnz 2f
# ifdef HAVE_AVX512_ASM_SUPPORT
HAS_ARCH_FEATURE (AVX512F_Usable)
jz 1f

View File

@ -150,13 +150,15 @@ L(nop):
#if defined USE_MULTIARCH && IS_IN (libc)
END (MEMMOVE_SYMBOL (__memmove, unaligned))
# if VEC_SIZE == 16 && defined SHARED
# if VEC_SIZE == 16
# if defined SHARED
/* Only used to measure performance of REP MOVSB. */
ENTRY (__mempcpy_erms)
movq %rdi, %rax
addq %rdx, %rax
jmp L(start_movsb)
END (__mempcpy_erms)
# endif
ENTRY (__memmove_erms)
movq %rdi, %rax
@ -181,7 +183,9 @@ L(movsb_backward):
cld
ret
END (__memmove_erms)
# if defined SHARED
strong_alias (__memmove_erms, __memcpy_erms)
# endif
# endif
# ifdef SHARED

View File

@ -27,6 +27,9 @@
ENTRY(__libc_memmove)
.type __libc_memmove, @gnu_indirect_function
LOAD_RTLD_GLOBAL_RO_RDX
lea __memmove_erms(%rip), %RAX_LP
HAS_ARCH_FEATURE (Prefer_ERMS)
jnz 2f
# ifdef HAVE_AVX512_ASM_SUPPORT
HAS_ARCH_FEATURE (AVX512F_Usable)
jz 1f

View File

@ -29,6 +29,9 @@
ENTRY(__mempcpy)
.type __mempcpy, @gnu_indirect_function
LOAD_RTLD_GLOBAL_RO_RDX
lea __mempcpy_erms(%rip), %RAX_LP
HAS_ARCH_FEATURE (Prefer_ERMS)
jnz 2f
# ifdef HAVE_AVX512_ASM_SUPPORT
HAS_ARCH_FEATURE (AVX512F_Usable)
jz 1f

View File

@ -26,6 +26,9 @@
ENTRY(memset)
.type memset, @gnu_indirect_function
LOAD_RTLD_GLOBAL_RO_RDX
lea __memset_erms(%rip), %RAX_LP
HAS_ARCH_FEATURE (Prefer_ERMS)
jnz 2f
lea __memset_sse2_unaligned_erms(%rip), %RAX_LP
HAS_CPU_FEATURE (ERMS)
jnz 1f