aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2007-10-13 06:35:54 +0000
committerChris Lattner <sabre@nondot.org>2007-10-13 06:35:54 +0000
commit2b4c279a8e203fa8b13adf7ba3a5d4c8bef70df3 (patch)
tree22ff6683e796ecc8eba8f949304a24e376528fa2 /lib/CodeGen/SelectionDAG/DAGCombiner.cpp
parentd1a23d573d5408be1cf52358fe03df0bfa07e16e (diff)
Add a simple optimization to simplify the input to
truncate and truncstore instructions, based on the knowledge that they don't demand the top bits. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@42952 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/SelectionDAG/DAGCombiner.cpp')
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp42
1 files changed, 42 insertions, 0 deletions
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index ebda6d1210..ced48166da 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -295,6 +295,8 @@ namespace {
SDNode *MatchRotate(SDOperand LHS, SDOperand RHS);
SDOperand ReduceLoadWidth(SDNode *N);
+ SDOperand GetDemandedBits(SDOperand V, uint64_t Mask);
+
/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
/// looking for aliasing nodes and adding them to the Aliases vector.
void GatherAllAliases(SDNode *N, SDOperand OriginalChain,
@@ -2793,6 +2795,24 @@ SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) {
return SDOperand();
}
+/// GetDemandedBits - See if the specified operand can be simplified with the
+/// knowledge that only the bits specified by Mask are used. If so, return the
+/// simpler operand, otherwise return a null SDOperand.
+SDOperand DAGCombiner::GetDemandedBits(SDOperand V, uint64_t Mask) {
+ switch (V.getOpcode()) {
+ default: break;
+ case ISD::OR:
+ case ISD::XOR:
+ // If the LHS or RHS don't contribute bits to the or, drop them.
+ if (DAG.MaskedValueIsZero(V.getOperand(0), Mask))
+ return V.getOperand(1);
+ if (DAG.MaskedValueIsZero(V.getOperand(1), Mask))
+ return V.getOperand(0);
+ break;
+ }
+ return SDOperand();
+}
+
/// ReduceLoadWidth - If the result of a wider load is shifted to right of N
/// bits and then truncated to a narrower type and where N is a multiple
/// of number of bits of the narrower type, transform it to a narrower load
@@ -2986,6 +3006,13 @@ SDOperand DAGCombiner::visitTRUNCATE(SDNode *N) {
return N0.getOperand(0);
}
+ // See if we can simplify the input to this truncate through knowledge that
+ // only the low bits are being used. For example "trunc (or (shl x, 8), y)"
+ // -> trunc y
+ SDOperand Shorter = GetDemandedBits(N0, MVT::getIntVTBitMask(VT));
+ if (Shorter.Val)
+ return DAG.getNode(ISD::TRUNCATE, VT, Shorter);
+
// fold (truncate (load x)) -> (smaller load x)
// fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
return ReduceLoadWidth(N);
@@ -4000,6 +4027,21 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) {
if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
return SDOperand(N, 0);
+ // FIXME: is there such a think as a truncating indexed store?
+ if (ST->isTruncatingStore() && ST->getAddressingMode() == ISD::UNINDEXED &&
+ MVT::isInteger(Value.getValueType())) {
+ // See if we can simplify the input to this truncstore with knowledge that
+ // only the low bits are being used. For example:
+ // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8"
+ SDOperand Shorter =
+ GetDemandedBits(Value, MVT::getIntVTBitMask(ST->getStoredVT()));
+ AddToWorkList(Value.Val);
+ if (Shorter.Val)
+ return DAG.getTruncStore(Chain, Shorter, Ptr, ST->getSrcValue(),
+ ST->getSrcValueOffset(), ST->getStoredVT(),
+ ST->isVolatile(), ST->getAlignment());
+ }
+
return SDOperand();
}