source: clamav/trunk/libclamav/c++/llvm/lib/CodeGen/CalcSpillWeights.cpp@ 319

Last change on this file since 319 was 319, checked in by Yuri Dario, 14 years ago

clamav: update trunk to 0.97.

File size: 7.6 KB
Line 
1//===------------------------ CalcSpillWeights.cpp ------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#define DEBUG_TYPE "calcspillweights"
11
12#include "llvm/Function.h"
13#include "llvm/ADT/SmallSet.h"
14#include "llvm/CodeGen/CalcSpillWeights.h"
15#include "llvm/CodeGen/LiveIntervalAnalysis.h"
16#include "llvm/CodeGen/MachineFunction.h"
17#include "llvm/CodeGen/MachineLoopInfo.h"
18#include "llvm/CodeGen/MachineRegisterInfo.h"
19#include "llvm/CodeGen/SlotIndexes.h"
20#include "llvm/Support/Debug.h"
21#include "llvm/Support/raw_ostream.h"
22#include "llvm/Target/TargetInstrInfo.h"
23#include "llvm/Target/TargetMachine.h"
24#include "llvm/Target/TargetRegisterInfo.h"
25using namespace llvm;
26
27char CalculateSpillWeights::ID = 0;
28INITIALIZE_PASS(CalculateSpillWeights, "calcspillweights",
29 "Calculate spill weights", false, false);
30
31void CalculateSpillWeights::getAnalysisUsage(AnalysisUsage &au) const {
32 au.addRequired<LiveIntervals>();
33 au.addRequired<MachineLoopInfo>();
34 au.setPreservesAll();
35 MachineFunctionPass::getAnalysisUsage(au);
36}
37
38bool CalculateSpillWeights::runOnMachineFunction(MachineFunction &fn) {
39
40 DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
41 << "********** Function: "
42 << fn.getFunction()->getName() << '\n');
43
44 LiveIntervals &lis = getAnalysis<LiveIntervals>();
45 VirtRegAuxInfo vrai(fn, lis, getAnalysis<MachineLoopInfo>());
46 for (LiveIntervals::iterator I = lis.begin(), E = lis.end(); I != E; ++I) {
47 LiveInterval &li = *I->second;
48 if (TargetRegisterInfo::isVirtualRegister(li.reg))
49 vrai.CalculateWeightAndHint(li);
50 }
51 return false;
52}
53
54// Return the preferred allocation register for reg, given a COPY instruction.
55static unsigned copyHint(const MachineInstr *mi, unsigned reg,
56 const TargetRegisterInfo &tri,
57 const MachineRegisterInfo &mri) {
58 unsigned sub, hreg, hsub;
59 if (mi->getOperand(0).getReg() == reg) {
60 sub = mi->getOperand(0).getSubReg();
61 hreg = mi->getOperand(1).getReg();
62 hsub = mi->getOperand(1).getSubReg();
63 } else {
64 sub = mi->getOperand(1).getSubReg();
65 hreg = mi->getOperand(0).getReg();
66 hsub = mi->getOperand(0).getSubReg();
67 }
68
69 if (!hreg)
70 return 0;
71
72 if (TargetRegisterInfo::isVirtualRegister(hreg))
73 return sub == hsub ? hreg : 0;
74
75 const TargetRegisterClass *rc = mri.getRegClass(reg);
76
77 // Only allow physreg hints in rc.
78 if (sub == 0)
79 return rc->contains(hreg) ? hreg : 0;
80
81 // reg:sub should match the physreg hreg.
82 return tri.getMatchingSuperReg(hreg, sub, rc);
83}
84
85void VirtRegAuxInfo::CalculateWeightAndHint(LiveInterval &li) {
86 MachineRegisterInfo &mri = mf_.getRegInfo();
87 const TargetRegisterInfo &tri = *mf_.getTarget().getRegisterInfo();
88 MachineBasicBlock *mbb = 0;
89 MachineLoop *loop = 0;
90 unsigned loopDepth = 0;
91 bool isExiting = false;
92 float totalWeight = 0;
93 SmallPtrSet<MachineInstr*, 8> visited;
94
95 // Find the best physreg hist and the best virtreg hint.
96 float bestPhys = 0, bestVirt = 0;
97 unsigned hintPhys = 0, hintVirt = 0;
98
99 // Don't recompute a target specific hint.
100 bool noHint = mri.getRegAllocationHint(li.reg).first != 0;
101
102 for (MachineRegisterInfo::reg_iterator I = mri.reg_begin(li.reg);
103 MachineInstr *mi = I.skipInstruction();) {
104 if (mi->isIdentityCopy() || mi->isImplicitDef() || mi->isDebugValue())
105 continue;
106 if (!visited.insert(mi))
107 continue;
108
109 // Get loop info for mi.
110 if (mi->getParent() != mbb) {
111 mbb = mi->getParent();
112 loop = loops_.getLoopFor(mbb);
113 loopDepth = loop ? loop->getLoopDepth() : 0;
114 isExiting = loop ? loop->isLoopExiting(mbb) : false;
115 }
116
117 // Calculate instr weight.
118 bool reads, writes;
119 tie(reads, writes) = mi->readsWritesVirtualRegister(li.reg);
120 float weight = LiveIntervals::getSpillWeight(writes, reads, loopDepth);
121
122 // Give extra weight to what looks like a loop induction variable update.
123 if (writes && isExiting && lis_.isLiveOutOfMBB(li, mbb))
124 weight *= 3;
125
126 totalWeight += weight;
127
128 // Get allocation hints from copies.
129 if (noHint || !mi->isCopy())
130 continue;
131 unsigned hint = copyHint(mi, li.reg, tri, mri);
132 if (!hint)
133 continue;
134 float hweight = hint_[hint] += weight;
135 if (TargetRegisterInfo::isPhysicalRegister(hint)) {
136 if (hweight > bestPhys && lis_.isAllocatable(hint))
137 bestPhys = hweight, hintPhys = hint;
138 } else {
139 if (hweight > bestVirt)
140 bestVirt = hweight, hintVirt = hint;
141 }
142 }
143
144 hint_.clear();
145
146 // Always prefer the physreg hint.
147 if (unsigned hint = hintPhys ? hintPhys : hintVirt) {
148 mri.setRegAllocationHint(li.reg, 0, hint);
149 // Weakly boost the spill weifght of hinted registers.
150 totalWeight *= 1.01F;
151 }
152
153 // Mark li as unspillable if all live ranges are tiny.
154 if (li.isZeroLength()) {
155 li.markNotSpillable();
156 return;
157 }
158
159 // If all of the definitions of the interval are re-materializable,
160 // it is a preferred candidate for spilling. If none of the defs are
161 // loads, then it's potentially very cheap to re-materialize.
162 // FIXME: this gets much more complicated once we support non-trivial
163 // re-materialization.
164 bool isLoad = false;
165 SmallVector<LiveInterval*, 4> spillIs;
166 if (lis_.isReMaterializable(li, spillIs, isLoad)) {
167 if (isLoad)
168 totalWeight *= 0.9F;
169 else
170 totalWeight *= 0.5F;
171 }
172
173 li.weight = totalWeight;
174 lis_.normalizeSpillWeight(li);
175}
176
177void VirtRegAuxInfo::CalculateRegClass(unsigned reg) {
178 MachineRegisterInfo &mri = mf_.getRegInfo();
179 const TargetRegisterInfo *tri = mf_.getTarget().getRegisterInfo();
180 const TargetRegisterClass *orc = mri.getRegClass(reg);
181 SmallPtrSet<const TargetRegisterClass*,8> rcs;
182
183 for (MachineRegisterInfo::reg_nodbg_iterator I = mri.reg_nodbg_begin(reg),
184 E = mri.reg_nodbg_end(); I != E; ++I) {
185 // The targets don't have accurate enough regclass descriptions that we can
186 // handle subregs. We need something similar to
187 // TRI::getMatchingSuperRegClass, but returning a super class instead of a
188 // sub class.
189 if (I.getOperand().getSubReg()) {
190 DEBUG(dbgs() << "Cannot handle subregs: " << I.getOperand() << '\n');
191 return;
192 }
193 if (const TargetRegisterClass *rc =
194 I->getDesc().getRegClass(I.getOperandNo(), tri))
195 rcs.insert(rc);
196 }
197
198 // If we found no regclass constraints, just leave reg as is.
199 // In theory, we could inflate to the largest superclass of reg's existing
200 // class, but that might not be legal for the current cpu setting.
201 // This could happen if reg is only used by COPY instructions, so we may need
202 // to improve on this.
203 if (rcs.empty()) {
204 return;
205 }
206
207 // Compute the intersection of all classes in rcs.
208 // This ought to be independent of iteration order, but if the target register
209 // classes don't form a proper algebra, it is possible to get different
210 // results. The solution is to make sure the intersection of any two register
211 // classes is also a register class or the null set.
212 const TargetRegisterClass *rc = 0;
213 for (SmallPtrSet<const TargetRegisterClass*,8>::iterator I = rcs.begin(),
214 E = rcs.end(); I != E; ++I) {
215 rc = rc ? getCommonSubClass(rc, *I) : *I;
216 assert(rc && "Incompatible regclass constraints found");
217 }
218
219 if (rc == orc)
220 return;
221 DEBUG(dbgs() << "Inflating " << orc->getName() << ":%reg" << reg << " to "
222 << rc->getName() <<".\n");
223 mri.setRegClass(reg, rc);
224}
Note: See TracBrowser for help on using the repository browser.