aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/include/asm/unaligned-sh4a.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 10:08:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 10:08:33 -0700
commitd3d07d941fd80c173b6d690ded00ee5fb8302e06 (patch)
treef1a82c956e393df9933c8544bb564ef1735384ee /arch/sh/include/asm/unaligned-sh4a.h
parent6cd8e300b49332eb9eeda45816c711c198d31505 (diff)
parent54ff328b46e58568c4b3350c2fa3223ef862e5a4 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (266 commits) sh: Tie sparseirq in to Kconfig. sh: Wire up sys_rt_tgsigqueueinfo. sh: Fix sys_pwritev() syscall table entry for sh32. sh: Fix sh4a llsc-based cmpxchg() sh: sh7724: Add JPU support sh: sh7724: INTC setting update sh: sh7722 clock framework rewrite sh: sh7366 clock framework rewrite sh: sh7343 clock framework rewrite sh: sh7724 clock framework rewrite V3 sh: sh7723 clock framework rewrite V2 sh: add enable()/disable()/set_rate() to div6 code sh: add AP325RXA mode pin configuration sh: add Migo-R mode pin configuration sh: sh7722 mode pin definitions sh: sh7724 mode pin comments sh: sh7723 mode pin V2 sh: rework mode pin code sh: clock div6 helper code sh: clock div4 frequency table offset fix ...
Diffstat (limited to 'arch/sh/include/asm/unaligned-sh4a.h')
-rw-r--r--arch/sh/include/asm/unaligned-sh4a.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h
index d8f89770275..9f4dd252c98 100644
--- a/arch/sh/include/asm/unaligned-sh4a.h
+++ b/arch/sh/include/asm/unaligned-sh4a.h
@@ -3,9 +3,9 @@
/*
* SH-4A has support for unaligned 32-bit loads, and 32-bit loads only.
- * Support for 16 and 64-bit accesses are done through shifting and
- * masking relative to the endianness. Unaligned stores are not supported
- * by the instruction encoding, so these continue to use the packed
+ * Support for 64-bit accesses are done through shifting and masking
+ * relative to the endianness. Unaligned stores are not supported by the
+ * instruction encoding, so these continue to use the packed
* struct.
*
* The same note as with the movli.l/movco.l pair applies here, as long
@@ -41,9 +41,9 @@ struct __una_u64 { u64 x __attribute__((packed)); };
static inline u16 __get_unaligned_cpu16(const u8 *p)
{
#ifdef __LITTLE_ENDIAN
- return __get_unaligned_cpu32(p) & 0xffff;
+ return p[0] | p[1] << 8;
#else
- return __get_unaligned_cpu32(p) >> 16;
+ return p[0] << 8 | p[1];
#endif
}