aboutsummaryrefslogtreecommitdiff
path: root/test/MC/ARM/neon-bitwise-encoding.ll
blob: 3a3ba9266a8aaf3354141b0e957be491531cc61b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
; RUN: llc -show-mc-encoding -march=arm -mcpu=cortex-a8 -mattr=+neon < %s | FileCheck %s

; FIXME: The following instructions still require testing:
;  - vand with immediate, vorr with immediate
;  - both vbit and vbif

; CHECK: vand_8xi8
define <8 x i8> @vand_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
	%tmp1 = load <8 x i8>* %A
	%tmp2 = load <8 x i8>* %B
; CHECK: vand	d16, d17, d16           @ encoding: [0xb0,0x01,0x41,0xf2]
	%tmp3 = and <8 x i8> %tmp1, %tmp2
	ret <8 x i8> %tmp3
}

; CHECK: vand_16xi8
define <16 x i8> @vand_16xi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
	%tmp1 = load <16 x i8>* %A
	%tmp2 = load <16 x i8>* %B
; CHECK: vand	q8, q8, q9              @ encoding: [0xf2,0x01,0x40,0xf2]
	%tmp3 = and <16 x i8> %tmp1, %tmp2
	ret <16 x i8> %tmp3
}

; CHECK: veor_8xi8
define <8 x i8> @veor_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
	%tmp1 = load <8 x i8>* %A
	%tmp2 = load <8 x i8>* %B
; CHECK: veor	d16, d17, d16           @ encoding: [0xb0,0x01,0x41,0xf3]
	%tmp3 = xor <8 x i8> %tmp1, %tmp2
	ret <8 x i8> %tmp3
}

; CHECK: veor_16xi8
define <16 x i8> @veor_16xi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
	%tmp1 = load <16 x i8>* %A
	%tmp2 = load <16 x i8>* %B
; CHECK: veor	q8, q8, q9              @ encoding: [0xf2,0x01,0x40,0xf3]
	%tmp3 = xor <16 x i8> %tmp1, %tmp2
	ret <16 x i8> %tmp3
}

; CHECK: vorr_8xi8
define <8 x i8> @vorr_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
	%tmp1 = load <8 x i8>* %A
	%tmp2 = load <8 x i8>* %B
; CHECK: vorr	d16, d17, d16           @ encoding: [0xb0,0x01,0x61,0xf2]
	%tmp3 = or <8 x i8> %tmp1, %tmp2
	ret <8 x i8> %tmp3
}

; CHECK: vorr_16xi8
define <16 x i8> @vorr_16xi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
	%tmp1 = load <16 x i8>* %A
	%tmp2 = load <16 x i8>* %B
; CHECK: vorr	q8, q8, q9              @ encoding: [0xf2,0x01,0x60,0xf2]
	%tmp3 = or <16 x i8> %tmp1, %tmp2
	ret <16 x i8> %tmp3
}

; CHECK: vbic_8xi8
define <8 x i8> @vbic_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
	%tmp1 = load <8 x i8>* %A
	%tmp2 = load <8 x i8>* %B
; CHECK: vbic	d16, d17, d16           @ encoding: [0xb0,0x01,0x51,0xf2]
	%tmp3 = xor <8 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
	%tmp4 = and <8 x i8> %tmp1, %tmp3
	ret <8 x i8> %tmp4
}

; CHECK: vbic_16xi8
define <16 x i8> @vbic_16xi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
	%tmp1 = load <16 x i8>* %A
	%tmp2 = load <16 x i8>* %B
; CHECK: vbic	q8, q8, q9              @ encoding: [0xf2,0x01,0x50,0xf2]
	%tmp3 = xor <16 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
	%tmp4 = and <16 x i8> %tmp1, %tmp3
	ret <16 x i8> %tmp4
}

; CHECK: vorn_8xi8
define <8 x i8> @vorn_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
	%tmp1 = load <8 x i8>* %A
	%tmp2 = load <8 x i8>* %B
; CHECK: vorn	d16, d17, d16           @ encoding: [0xb0,0x01,0x71,0xf2]
	%tmp3 = xor <8 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
	%tmp4 = or <8 x i8> %tmp1, %tmp3
	ret <8 x i8> %tmp4
}

; CHECK: vorn_16xi8
define <16 x i8> @vorn_16xi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
	%tmp1 = load <16 x i8>* %A
	%tmp2 = load <16 x i8>* %B
; CHECK: vorn	q8, q8, q9              @ encoding: [0xf2,0x01,0x70,0xf2]
	%tmp3 = xor <16 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
	%tmp4 = or <16 x i8> %tmp1, %tmp3
	ret <16 x i8> %tmp4
}

; CHECK: vmvn_8xi8
define <8 x i8> @vmvn_8xi8(<8 x i8>* %A) nounwind {
	%tmp1 = load <8 x i8>* %A
; CHECK: vmvn	d16, d16                @ encoding: [0xa0,0x05,0xf0,0xf3]
	%tmp2 = xor <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
	ret <8 x i8> %tmp2
}

; CHECK: vmvn_16xi8
define <16 x i8> @vmvn_16xi8(<16 x i8>* %A) nounwind {
	%tmp1 = load <16 x i8>* %A
; CHECK: vmvn	q8, q8                  @ encoding: [0xe0,0x05,0xf0,0xf3]
	%tmp2 = xor <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
	ret <16 x i8> %tmp2
}

; CHECK: vbsl_8xi8
define <8 x i8> @vbsl_8xi8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
	%tmp1 = load <8 x i8>* %A
	%tmp2 = load <8 x i8>* %B
	%tmp3 = load <8 x i8>* %C
; CHECK: vbsl	d18, d17, d16           @ encoding: [0xb0,0x21,0x51,0xf3]
	%tmp4 = and <8 x i8> %tmp1, %tmp2
	%tmp5 = xor <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
	%tmp6 = and <8 x i8> %tmp5, %tmp3
	%tmp7 = or <8 x i8> %tmp4, %tmp6
	ret <8 x i8> %tmp7
}

; CHECK: vbsl_16xi8
define <16 x i8> @vbsl_16xi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
	%tmp1 = load <16 x i8>* %A
	%tmp2 = load <16 x i8>* %B
	%tmp3 = load <16 x i8>* %C
; CHECK: vbsl	q8, q10, q9             @ encoding: [0xf2,0x01,0x54,0xf3]
	%tmp4 = and <16 x i8> %tmp1,