From c6083cd61b5a64a1c73d1634744382f54cb99595 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Fri, 25 May 2007 18:47:47 -0700 Subject: [PATCH] [AVR32] faster avr32 unaligned access Use a more conventional implementation for unaligned access, and include an AT32AP-specific optimization: the CPU will handle unaligned words. The result is always faster and smaller for 8, 16, and 32 bit values. For 64 bit quantities, it's presumably larger. Signed-off-by: David Brownell Signed-off-by: Haavard Skinnemoen --- include/asm-avr32/unaligned.h | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/include/asm-avr32/unaligned.h b/include/asm-avr32/unaligned.h index 3042723fcbf..791361786fc 100644 --- a/include/asm-avr32/unaligned.h +++ b/include/asm-avr32/unaligned.h @@ -6,20 +6,31 @@ * implementation. The AVR32 AP implementation can handle unaligned * words, but halfwords must be halfword-aligned, and doublewords must * be word-aligned. - * - * TODO: Make all this CPU-specific and optimize. */ -#include +#include -/* Use memmove here, so gcc does not insert a __builtin_memcpy. */ +#ifdef CONFIG_CPU_AT32AP7000 +/* REVISIT calling memmove() may be smaller for 64-bit values ... */ + +#undef get_unaligned #define get_unaligned(ptr) \ - ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) + ___get_unaligned(ptr, sizeof((*ptr))) +#define ___get_unaligned(ptr, size) \ + ((size == 4) ? *(ptr) : __get_unaligned(ptr, size)) + +#undef put_unaligned +#define put_unaligned(val, ptr) \ + ___put_unaligned((__u64)(val), ptr, sizeof((*ptr))) +#define ___put_unaligned(val, ptr, size) \ +do { \ + if (size == 4) \ + *(ptr) = (val); \ + else \ + __put_unaligned(val, ptr, size); \ +} while (0) -#define put_unaligned(val, ptr) \ - ({ __typeof__(*(ptr)) __tmp = (val); \ - memmove((ptr), &__tmp, sizeof(*(ptr))); \ - (void)0; }) +#endif #endif /* __ASM_AVR32_UNALIGNED_H */ -- 2.41.1