aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/X86/README-X86-64.txt
blob: 78c4dc00ee729047167991aff50435e8fb94ff41 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
//===- README_X86_64.txt - Notes for X86-64 code gen ----------------------===//

AMD64 Optimization Manual 8.2 has some nice information about optimizing integer
multiplication by a constant. How much of it applies to Intel's X86-64
implementation? There are definite trade-offs to consider: latency vs. register
pressure vs. code size.

//===---------------------------------------------------------------------===//

Are we better off using branches instead of cmove to implement FP to
unsigned i64?

_conv:
	ucomiss	LC0(%rip), %xmm0
	cvttss2siq	%xmm0, %rdx
	jb	L3
	subss	LC0(%rip), %xmm0
	movabsq	$-9223372036854775808, %rax
	cvttss2siq	%xmm0, %rdx
	xorq	%rax, %rdx
L3:
	movq	%rdx, %rax
	ret

instead of

_conv:
	movss LCPI1_0(%rip), %xmm1
	cvttss2siq %xmm0, %rcx
	movaps %xmm0, %xmm2
	subss %xmm1, %xmm2
	cvttss2siq %xmm2, %rax
	movabsq $-9223372036854775808, %rdx
	xorq %rdx, %rax
	ucomiss %xmm1, %xmm0
	cmovb %rcx, %rax
	ret

Seems like the jb branch has high likelyhood of being taken. It would have
saved a few instructions.

//===---------------------------------------------------------------------===//

Poor codegen:

int X[2];
int b;
void test(void) {
  memset(X, b, 2*sizeof(X[0]));
}

llc:
	movq _b@GOTPCREL(%rip), %rax
	movzbq (%rax), %rax
	movq %rax, %rcx
	shlq $8, %rcx
	orq %rax, %rcx
	movq %rcx, %rax
	shlq $16, %rax
	orq %rcx, %rax
	movq %rax, %rcx
	shlq $32, %rcx
	movq _X@GOTPCREL(%rip), %rdx
	orq %rax, %rcx
	movq %rcx, (%rdx)
	ret

gcc:
	movq	_b@GOTPCREL(%rip), %rax
	movabsq	$72340172838076673, %rdx
	movzbq	(%rax), %rax
	imulq	%rdx, %rax
	movq	_X@GOTPCREL(%rip), %rdx
	movq	%rax, (%rdx)
	ret

And the codegen is even worse for the following
(from http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33103):
  void fill1(char *s, int a)
  {
    __builtin_memset(s, a, 15);
  }

For this version, we duplicate the computation of the constant to store.

//===---------------------------------------------------------------------===//

It's not possible to reference AH, BH, CH, and DH registers in an instruction
requiring REX prefix. However, divb and mulb both produce results in AH. If isel
emits a CopyFromReg which gets turned into a movb and that can be allocated a
r8b - r15b.

To get around this, isel emits a CopyFromReg from AX and then right shift it
down by 8 and truncate it. It's not pretty but it works. We need some register
allocation magic to make the hack go away (e.g. putting additional constraints
on the result of the movb).

//===---------------------------------------------------------------------===//

The x86-64 ABI for hidden-argument struct returns requires that the
incoming value of %rdi be copied into %rax by the callee upon return.

The idea is that it saves callers from having to remember this value,
which would often require a callee-saved register. Callees usually
need to keep this value live for most of their body anyway, so it
doesn't add a significant burden on them.

We currently implement this in codegen, however this is suboptimal
because it means that it would be quite awkward to implement the
optimization for callers.

A better implementation would be to relax the LLVM IR rules for sret
arguments to allow a function with an sret argument to have a non-void
return type, and to have the front-end to set up the sret argument value
as the return value of the function. The front-end could more easily
emit uses of the returned struct value to be in terms of the function's
lowered return value, and it would free non-C frontends from a
complication only required by a C-based ABI.

//===---------------------------------------------------------------------===//

We get a redundant zero extension for code like this:

int mask[1000];
int foo(unsigned x) {
 if (x < 10)
   x = x * 45;
 else
   x = x * 78;
 return mask[x];
}

_foo:
LBB1_0:	## entry
	cmpl	$9, %edi
	jbe	LBB1_3	## bb
LBB1_1:	## bb1
	imull	$78, %edi, %eax
LBB1_2:	## bb2
	movl	%eax, %eax                    <----
	movq	_mask@GOTPCREL(%rip), %rcx
	movl	(%rcx,%rax,4), %eax
	ret
LBB1_3:	## bb
	imull	$45, %edi, %eax
	jmp	LBB1_2	## bb2
  
Before regalloc, we have:

        %reg1025<def> = IMUL32rri8 %reg1024, 45, %EFLAGS<imp-def>
        JMP mbb<bb2,0x203afb0>
    Successors according to CFG: 0x203afb0 (#3)

bb1: 0x203af60, LLVM BB @0x1e02310, ID#2:
    Predecessors according to CFG: 0x203aec0 (#0)
        %reg1026<def> = IMUL32rri8 %reg1024, 78, %EFLAGS<imp-def>
    Successors according to CFG: 0x203afb0 (#3)

bb2: 0x203afb0, LLVM BB @0x1e02340, ID#3:
    Predecessors according to CFG: 0x203af10 (#1) 0x203af60 (#2)
        %reg1027<def> = PHI %reg1025, mbb<bb,0x203af10>,
                            %reg1026, mbb<bb1,0x203af60>
        %reg1029<def> = MOVZX64rr32 %reg1027

so we'd have to know that IMUL32rri8 leaves the high word zero extended and to
be able to recognize the zero extend.  This could also presumably be implemented
if we have whole-function selectiondags.

//===---------------------------------------------------------------------===//

Take the following C code
(from http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43640):

struct u1
{
        float x;
        float y;
};

float foo(struct u1 u)
{
        return u.x + u.y;
}

Optimizes to the following IR:
define float @foo(double %u.0) nounwind readnone {
entry:
  %tmp8 = bitcast double %u.0 to i64              ; <i64> [#uses=2]
  %tmp6 = trunc i64 %tmp8 to i32                  ; <i32> [#uses=1]
  %tmp7 = bitcast i32 %tmp6 to float              ; <float> [#uses=1]
  %tmp2 = lshr i64 %tmp8, 32                      ; <i64> [#uses=1]
  %tmp3 = trunc i64 %tmp2 to i32                  ; <i32> [#uses=1]
  %tmp4 = bitcast i32 %tmp3 to float              ; <float> [#uses=1]
  %0 = fadd float %tmp7, %tmp4                    ; <float> [#uses=1]
  ret float %0
}

And current llvm-gcc/clang output:
	movd	%xmm0, %rax
	movd	%eax, %xmm1
	shrq	$32, %rax
	movd	%eax, %xmm0
	addss	%xmm1, %xmm0
	ret

We really shouldn't move the floats to RAX, only to immediately move them
straight back to the XMM registers.

There really isn't any good way to handle this purely in IR optimizers; it
could possibly be handled by changing the output of the fronted, though.  It
would also be feasible to add a x86-specific DAGCombine to optimize the
bitcast+trunc+(lshr+)bitcast combination.

//===---------------------------------------------------------------------===//

Take the following code
(from http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34653):
extern unsigned long table[];
unsigned long foo(unsigned char *p) {
  unsigned long tag = *p;
  return table[tag >> 4] + table[tag & 0xf];
}

Current code generated:
	movzbl	(%rdi), %eax
	movq	%rax, %rcx
	andq	$240, %rcx
	shrq	%rcx
	andq	$15, %rax
	movq	table(,%rax,8), %rax
	addq	table(%rcx), %rax
	ret

Issues:
1. First movq should be movl; saves a byte.
2. Both andq's should be andl; saves another two bytes.  I think this was
   implemented at one point, but subsequently regressed.
3. shrq should be shrl; saves another byte.
4. The first andq can be completely eliminated by using a slightly more
   expensive addressing mode.

//===---------------------------------------------------------------------===//

Consider the following (contrived testcase, but contains common factors):

#include <stdarg.h>
int test(int x, ...) {
  int sum, i;
  va_list l;
  va_start(l, x);
  for (i = 0; i < x; i++)
    sum += va_arg(l, int);
  va_end(l);
  return sum;
}

Testcase given in C because fixing it will likely involve changing the IR
generated for it.  The primary issue with the result is that it doesn't do any
of the optimizations which are possible if we know the address of a va_list
in the current function is never taken:
1. We shouldn't spill the XMM registers because we only call va_arg with "int".
2. It would be nice if we could scalarrepl the va_list.
3. Probably overkill, but it'd be cool if we could peel off the first five
iterations of the loop.

Other optimizations involving functions which use va_arg on floats which don't
have the address of a va_list taken:
1. Conversely to the above, we shouldn't spill general registers if we only
   call va_arg on "double".
2. If we know nothing more than 64 bits wide is read from the XMM registers,
   we can change the spilling code to reduce the amount of stack used by half.

//===---------------------------------------------------------------------===//