Commit 6831ec0a authored by Kevin Modzelewski's avatar Kevin Modzelewski

Switch all graph work to be based on Blocks, not their indices

This means we can have non-consecutive block indices, which
is a prereq for things like merging blocks.
Not sure how worth it this was
parent e94df26f
......@@ -43,8 +43,8 @@ typename BBAnalyzer<T>::AllMap computeFixedPoint(CFG* cfg, const BBAnalyzer<T> &
std::vector<CFGBlock*> q;
states.insert(make_pair(cfg->blocks[0], Map()));
q.push_back(cfg->blocks[0]);
states.insert(make_pair(cfg->getStartingBlock(), Map()));
q.push_back(cfg->getStartingBlock());
while (q.size()) {
CFGBlock *block = q.back();
......
......@@ -120,9 +120,10 @@ class DefinednessBBAnalyzer : public BBAnalyzer<DefinednessAnalysis::DefinitionL
private:
typedef DefinednessAnalysis::DefinitionLevel DefinitionLevel;
CFG* cfg;
AST_arguments* arguments;
public:
DefinednessBBAnalyzer(AST_arguments* arguments) : arguments(arguments) {
DefinednessBBAnalyzer(CFG* cfg, AST_arguments* arguments) : cfg(cfg), arguments(arguments) {
}
virtual DefinitionLevel merge(DefinitionLevel from, DefinitionLevel into) const {
......@@ -225,7 +226,7 @@ void DefinednessBBAnalyzer::processBB(Map &starting, CFGBlock *block) const {
for (int i = 0; i < block->body.size(); i++) {
block->body[i]->accept(&visitor);
}
if (block->idx == 0 && arguments) {
if (block == cfg->getStartingBlock() && arguments) {
arguments->accept(&visitor);
}
......@@ -238,7 +239,7 @@ void DefinednessBBAnalyzer::processBB(Map &starting, CFGBlock *block) const {
}
DefinednessAnalysis::DefinednessAnalysis(AST_arguments *args, CFG* cfg, ScopeInfo *scope_info) : scope_info(scope_info) {
results = computeFixedPoint(cfg, DefinednessBBAnalyzer(args), false);
results = computeFixedPoint(cfg, DefinednessBBAnalyzer(cfg, args), false);
for (auto p : results) {
RequiredSet required;
......@@ -267,9 +268,7 @@ const DefinednessAnalysis::RequiredSet& DefinednessAnalysis::getDefinedNamesAt(C
PhiAnalysis::PhiAnalysis(AST_arguments* args, CFG* cfg, LivenessAnalysis *liveness, ScopeInfo *scope_info) :
definedness(args, cfg, scope_info), liveness(liveness) {
for (int i = 0; i < cfg->blocks.size(); i++) {
CFGBlock *block = cfg->blocks[i];
for (CFGBlock *block : cfg->blocks) {
RequiredSet required;
if (block->predecessors.size() < 2)
continue;
......
......@@ -73,7 +73,7 @@ static BoxedClass* simpleCallSpeculation(AST_Call* node, CompilerType* rtn_type,
}
typedef std::unordered_map<std::string, CompilerType*> TypeMap;
typedef std::unordered_map<int, TypeMap> AllTypeMap;
typedef std::unordered_map<CFGBlock*, TypeMap> AllTypeMap;
typedef std::unordered_map<AST_expr*, CompilerType*> ExprTypeMap;
typedef std::unordered_map<AST_expr*, BoxedClass*> TypeSpeculations;
class BasicBlockTypePropagator : public ExprVisitor, public StmtVisitor {
......@@ -479,7 +479,7 @@ class PropagatingTypeAnalysis : public TypeAnalysis {
return getTypeAtBlockStart(name, block->successors[0]);
}
virtual ConcreteCompilerType* getTypeAtBlockStart(const std::string &name, CFGBlock* block) {
CompilerType *base = starting_types[block->idx][name];
CompilerType *base = starting_types[block][name];
ASSERT(base != NULL, "%s %d", name.c_str(), block->idx);
ConcreteCompilerType *rtn = base->getConcreteType();
......@@ -538,7 +538,7 @@ class PropagatingTypeAnalysis : public TypeAnalysis {
assert(arg_names.size() == arg_types.size());
{
TypeMap &initial_types = starting_types[0];
TypeMap &initial_types = starting_types[cfg->getStartingBlock()];
for (int i = 0; i < arg_names.size(); i++) {
AST_expr* arg = arg_names[i];
assert(arg->type == AST_TYPE::Name);
......@@ -547,36 +547,34 @@ class PropagatingTypeAnalysis : public TypeAnalysis {
}
}
std::unordered_set<int> in_queue;
std::deque<int> queue;
queue.push_back(0);
std::unordered_set<CFGBlock*> in_queue;
std::deque<CFGBlock*> queue;
queue.push_back(cfg->getStartingBlock());
while (queue.size()) {
int block_id = queue.front();
CFGBlock *block = queue.front();
queue.pop_front();
in_queue.erase(block_id);
CFGBlock *block = cfg->blocks[block_id];
in_queue.erase(block);
TypeMap ending;
if (VERBOSITY("types")) {
printf("processing types for block %d\n", block_id);
printf("processing types for block %d\n", block->idx);
}
if (VERBOSITY("types") >= 2) {
printf("before:\n");
TypeMap &starting = starting_types[block_id];
TypeMap &starting = starting_types[block];
for (auto p : starting) {
ASSERT(p.second, "%s", p.first.c_str());
printf("%s: %s\n", p.first.c_str(), p.second->debugName().c_str());
}
}
BasicBlockTypePropagator::propagate(block, starting_types[block_id], ending, expr_types, type_speculations, speculation, scope_info);
BasicBlockTypePropagator::propagate(block, starting_types[block], ending, expr_types, type_speculations, speculation, scope_info);
if (VERBOSITY("types") >= 2) {
printf("before (after):\n");
TypeMap &starting = starting_types[block_id];
TypeMap &starting = starting_types[block];
for (auto p : starting) {
ASSERT(p.second, "%s", p.first.c_str());
printf("%s: %s\n", p.first.c_str(), p.second->debugName().c_str());
......@@ -589,21 +587,20 @@ class PropagatingTypeAnalysis : public TypeAnalysis {
}
for (int i = 0; i < block->successors.size(); i++) {
int next_id = block->successors[i]->idx;
bool first = (starting_types.count(next_id) == 0);
bool changed = merge(ending, starting_types[next_id]);
if ((first || changed) && in_queue.insert(next_id).second) {
queue.push_back(next_id);
CFGBlock *next_block = block->successors[i];
bool first = (starting_types.count(next_block) == 0);
bool changed = merge(ending, starting_types[next_block]);
if ((first || changed) && in_queue.insert(next_block).second) {
queue.push_back(next_block);
}
}
}
if (VERBOSITY("types") >= 2) {
for (int i = 0; i < cfg->blocks.size(); i++) {
printf("Types at beginning of block %d:\n", i);
CFGBlock *b = cfg->blocks[i];
for (CFGBlock *b : cfg->blocks) {
printf("Types at beginning of block %d:\n", b->idx);
TypeMap &starting = starting_types[i];
TypeMap &starting = starting_types[b];
for (auto p : starting) {
ASSERT(p.second, "%s", p.first.c_str());
printf("%s: %s\n", p.first.c_str(), p.second->debugName().c_str());
......
This diff is collapsed.
......@@ -163,7 +163,7 @@ class IRGeneratorImpl : public IRGenerator {
IREmitterImpl emitter;
SymbolTable symbol_table;
std::vector<llvm::BasicBlock*> &entry_blocks;
std::unordered_map<CFGBlock*, llvm::BasicBlock*> &entry_blocks;
llvm::BasicBlock *curblock;
CFGBlock *myblock;
TypeAnalysis *types;
......@@ -178,8 +178,8 @@ class IRGeneratorImpl : public IRGenerator {
} state;
public:
IRGeneratorImpl(IRGenState *irstate, std::vector<llvm::BasicBlock*> &entry_blocks, CFGBlock *myblock, TypeAnalysis *types, GuardList &out_guards, const GuardList &in_guards, bool is_partial) : irstate(irstate), emitter(irstate), entry_blocks(entry_blocks), myblock(myblock), types(types), out_guards(out_guards), in_guards(in_guards), state(is_partial ? PARTIAL : RUNNING) {
llvm::BasicBlock* entry_block = entry_blocks[myblock->idx];
IRGeneratorImpl(IRGenState *irstate, std::unordered_map<CFGBlock*, llvm::BasicBlock*> &entry_blocks, CFGBlock *myblock, TypeAnalysis *types, GuardList &out_guards, const GuardList &in_guards, bool is_partial) : irstate(irstate), emitter(irstate), entry_blocks(entry_blocks), myblock(myblock), types(types), out_guards(out_guards), in_guards(in_guards), state(is_partial ? PARTIAL : RUNNING) {
llvm::BasicBlock* entry_block = entry_blocks[myblock];
emitter.getBuilder()->SetInsertPoint(entry_block);
curblock = entry_block;
}
......@@ -1286,8 +1286,8 @@ class IRGeneratorImpl : public IRGenerator {
val->decvref(emitter);
llvm::Value *llvm_nonzero = nonzero->getValue();
llvm::BasicBlock *iftrue = entry_blocks[node->iftrue->idx];
llvm::BasicBlock *iffalse = entry_blocks[node->iffalse->idx];
llvm::BasicBlock *iftrue = entry_blocks[node->iftrue];
llvm::BasicBlock *iffalse = entry_blocks[node->iffalse];
nonzero->decvref(emitter);
......@@ -1461,7 +1461,7 @@ class IRGeneratorImpl : public IRGenerator {
endBlock(FINISHED);
llvm::BasicBlock *target = entry_blocks[node->target->idx];
llvm::BasicBlock *target = entry_blocks[node->target];
if (ENABLE_OSR && node->target->idx < myblock->idx && irstate->getEffortLevel() < EffortLevel::MAXIMAL) {
assert(node->target->predecessors.size() > 1);
......@@ -1714,7 +1714,7 @@ class IRGeneratorImpl : public IRGenerator {
};
IRGenerator *createIRGenerator(IRGenState *irstate, std::vector<llvm::BasicBlock*> &entry_blocks, CFGBlock *myblock, TypeAnalysis *types, GuardList &out_guards, const GuardList &in_guards, bool is_partial) {
IRGenerator *createIRGenerator(IRGenState *irstate, std::unordered_map<CFGBlock*, llvm::BasicBlock*> &entry_blocks, CFGBlock *myblock, TypeAnalysis *types, GuardList &out_guards, const GuardList &in_guards, bool is_partial) {
return new IRGeneratorImpl(irstate, entry_blocks, myblock, types, out_guards, in_guards, is_partial);
}
......
......@@ -198,7 +198,7 @@ class IRGenerator {
};
IREmitter *createIREmitter(IRGenState *irstate);
IRGenerator *createIRGenerator(IRGenState *irstate, std::vector<llvm::BasicBlock*> &entry_blocks, CFGBlock *myblock, TypeAnalysis *types, GuardList &out_guards, const GuardList &in_guards, bool is_partial);
IRGenerator *createIRGenerator(IRGenState *irstate, std::unordered_map<CFGBlock*, llvm::BasicBlock*> &entry_blocks, CFGBlock *myblock, TypeAnalysis *types, GuardList &out_guards, const GuardList &in_guards, bool is_partial);
}
......
......@@ -63,7 +63,7 @@ class BufferedReader {
uint8_t readByte() {
ensure(1);
assert(end > start);
assert(end > start && "premature eof");
if (VERBOSITY("parsing") >= 2)
printf("readByte, now %d %d\n", start+1, end);
return buf[start++];
......
......@@ -29,10 +29,20 @@ void CFGBlock::connectTo(CFGBlock *successor, bool allow_backedge) {
assert(this->idx >= 0);
ASSERT(successor->idx == -1 || successor->idx > this->idx, "edge from %d to %d", this->idx, successor->idx);
}
//assert(successors.count(successor) == 0);
//assert(successor->predecessors.count(this) == 0);
successors.push_back(successor);
successor->predecessors.push_back(this);
}
void CFGBlock::unconnectFrom(CFGBlock *successor) {
//assert(successors.count(successor));
//assert(successor->predecessors.count(this));
successors.erase(std::remove(successors.begin(), successors.end(), successor), successors.end());
successor->predecessors.erase(std::remove(successor->predecessors.begin(), successor->predecessors.end(), this), successor->predecessors.end());
}
class CFGVisitor : public ASTVisitor {
private:
AST_TYPE::AST_TYPE root_type;
......@@ -664,7 +674,7 @@ class CFGVisitor : public ASTVisitor {
virtual bool visit_functiondef(AST_FunctionDef* node) { push_back(node); return true; }
virtual bool visit_global(AST_Global* node) { push_back(node); return true; }
virtual bool visit_import(AST_Import* node) { push_back(node); return true; }
virtual bool visit_pass(AST_Pass* node) { push_back(node); return true; }
virtual bool visit_pass(AST_Pass* node) { return true; }
virtual bool visit_assign(AST_Assign* node) {
AST_Assign* remapped = new AST_Assign();
......@@ -1184,8 +1194,8 @@ void CFG::print() {
printf("%ld blocks\n", blocks.size());
PrintVisitor *pv = new PrintVisitor(4);
for (int i = 0; i < blocks.size(); i++) {
printf("Block %d", i);
CFGBlock *b = blocks[i];
printf("Block %d", b->idx);
if (b->info)
printf(" '%s'", b->info);
......@@ -1228,9 +1238,7 @@ CFG* computeCFG(AST_TYPE::AST_TYPE root_type, std::vector<AST_stmt*> body) {
////
// Check some properties expected by later stages:
// Block 0 is hard-coded to be the entry block, and shouldn't have any
// predecessors:
assert(rtn->blocks[0]->predecessors.size() == 0);
assert(rtn->getStartingBlock()->predecessors.size() == 0);
// We need to generate the CFG in a way that doesn't have any critical edges,
// since the ir generation requires that.
......@@ -1268,6 +1276,40 @@ CFG* computeCFG(AST_TYPE::AST_TYPE root_type, std::vector<AST_stmt*> body) {
assert(rtn->blocks[i]->predecessors[0]->idx < i);
}
// Prune unnecessary blocks from the CFG.
// Not strictly necessary, but makes the output easier to look at,
// and can make the analyses more efficient.
// The extra blocks would get merged by LLVM passes, so I'm not sure
// how much overall improvement there is.
for (CFGBlock* b : rtn->blocks) {
while (b->successors.size() == 1) {
CFGBlock *b2 = b->successors[0];
if (b2->predecessors.size() != 1)
break;
if (VERBOSITY()) {
//rtn->print();
printf("Joining blocks %d and %d\n", b->idx, b2->idx);
}
assert(b->body[b->body.size()-1]->type == AST_TYPE::Jump);
b->body.pop_back();
b->body.insert(b->body.end(), b2->body.begin(), b2->body.end());
b->unconnectFrom(b2);
for (CFGBlock *b3 : b2->successors) {
b->connectTo(b3, true);
b2->unconnectFrom(b3);
}
rtn->blocks.erase(std::remove(rtn->blocks.begin(), rtn->blocks.end(), b2), rtn->blocks.end());
delete b2;
}
}
assert(rtn->getStartingBlock()->idx == 0);
/*
// I keep on going back and forth about whether or not it's ok to reuse AST nodes.
// On the one hand, it's nice to say that an AST* pointer uniquely identifies a spot
......
......@@ -51,6 +51,7 @@ class CFGBlock {
}
void connectTo(CFGBlock *successor, bool allow_backedge=false);
void unconnectFrom(CFGBlock *successor);
void push_back(AST_stmt* node) {
body.push_back(node);
......@@ -60,11 +61,19 @@ class CFGBlock {
// Control Flow Graph
class CFG {
private:
int next_idx;
public:
std::vector<CFGBlock*> blocks;
CFG() : next_idx(0) {}
CFGBlock* getStartingBlock() {
return blocks[0];
}
CFGBlock* addBlock() {
int idx = blocks.size();
int idx = next_idx;
next_idx++;
CFGBlock* block = new CFGBlock(this, idx);
blocks.push_back(block);
......@@ -78,7 +87,8 @@ class CFG {
void placeBlock(CFGBlock *block) {
assert(block->idx == -1);
block->idx = blocks.size();
block->idx = next_idx;
next_idx++;
blocks.push_back(block);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment