aboutsummaryrefslogtreecommitdiff
path: root/arch/tile/lib/atomic_asm_32.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/lib/atomic_asm_32.S')
-rw-r--r--arch/tile/lib/atomic_asm_32.S7
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 5a5514b77e7..6bda3132cd6 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -14,7 +14,7 @@
* Support routines for atomic operations. Each function takes:
*
* r0: address to manipulate
- * r1: pointer to atomic lock guarding this operation (for FUTEX_LOCK_REG)
+ * r1: pointer to atomic lock guarding this operation (for ATOMIC_LOCK_REG)
* r2: new value to write, or for cmpxchg/add_unless, value to compare against
* r3: (cmpxchg/xchg_add_unless) new value to write or add;
* (atomic64 ops) high word of value to write
@@ -59,7 +59,7 @@
* bad kernel addresses).
*
* Note that if the value we would store is the same as what we
- * loaded, we bypass the load. Other platforms with true atomics can
+ * loaded, we bypass the store. Other platforms with true atomics can
* make the guarantee that a non-atomic __clear_bit(), for example,
* can safely race with an atomic test_and_set_bit(); this example is
* from bit_spinlock.h in slub_lock() / slub_unlock(). We can't do
@@ -70,7 +70,7 @@
*/
#include <linux/linkage.h>
-#include <asm/atomic.h>
+#include <asm/atomic_32.h>
#include <asm/page.h>
#include <asm/processor.h>
@@ -164,6 +164,7 @@ STD_ENTRY_SECTION(__atomic\name, .text.atomic)
STD_ENDPROC(__atomic\name)
.ifc \bitwidth,32
.pushsection __ex_table,"a"
+ .align 4
.word 1b, __atomic\name
.word 2b, __atomic\name
.word __atomic\name, __atomic_bad_address