aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/include/asm/sn/bte.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/include/asm/sn/bte.h')
-rw-r--r--arch/ia64/include/asm/sn/bte.h13
1 files changed, 7 insertions, 6 deletions
diff --git a/arch/ia64/include/asm/sn/bte.h b/arch/ia64/include/asm/sn/bte.h
index a0d214f4311..cc6c4dbf53a 100644
--- a/arch/ia64/include/asm/sn/bte.h
+++ b/arch/ia64/include/asm/sn/bte.h
@@ -39,7 +39,7 @@
/* BTE status register only supports 16 bits for length field */
#define BTE_LEN_BITS (16)
#define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1)
-#define BTE_MAX_XFER ((1 << BTE_LEN_BITS) * L1_CACHE_BYTES)
+#define BTE_MAX_XFER (BTE_LEN_MASK << L1_CACHE_SHIFT)
/* Define hardware */
@@ -216,17 +216,18 @@ extern void bte_error_handler(unsigned long);
bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification)
/*
- * The following is the prefered way of calling bte_unaligned_copy
+ * The following is the preferred way of calling bte_unaligned_copy
* If the copy is fully cache line aligned, then bte_copy is
* used instead. Since bte_copy is inlined, this saves a call
* stack. NOTE: bte_copy is called synchronously and does block
* until the transfer is complete. In order to get the asynch
* version of bte_copy, you must perform this check yourself.
*/
-#define BTE_UNALIGNED_COPY(src, dest, len, mode) \
- (((len & L1_CACHE_MASK) || (src & L1_CACHE_MASK) || \
- (dest & L1_CACHE_MASK)) ? \
- bte_unaligned_copy(src, dest, len, mode) : \
+#define BTE_UNALIGNED_COPY(src, dest, len, mode) \
+ (((len & (L1_CACHE_BYTES - 1)) || \
+ (src & (L1_CACHE_BYTES - 1)) || \
+ (dest & (L1_CACHE_BYTES - 1))) ? \
+ bte_unaligned_copy(src, dest, len, mode) : \
bte_copy(src, dest, len, mode, NULL))