flow.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818
  1. /*
  2. * Flow - walk the linearized flowgraph, simplifying it as we
  3. * go along.
  4. *
  5. * Copyright (C) 2004 Linus Torvalds
  6. */
  7. #include <string.h>
  8. #include <stdarg.h>
  9. #include <stdlib.h>
  10. #include <stdio.h>
  11. #include <stddef.h>
  12. #include <assert.h>
  13. #include "parse.h"
  14. #include "expression.h"
  15. #include "linearize.h"
  16. #include "flow.h"
  17. #include "target.h"
  18. unsigned long bb_generation;
  19. /*
  20. * Dammit, if we have a phi-node followed by a conditional
  21. * branch on that phi-node, we should damn well be able to
  22. * do something about the source. Maybe.
  23. */
  24. static int rewrite_branch(struct basic_block *bb,
  25. struct basic_block **ptr,
  26. struct basic_block *old,
  27. struct basic_block *new)
  28. {
  29. if (*ptr != old || new == old || !bb->ep)
  30. return 0;
  31. /* We might find new if-conversions or non-dominating CSEs */
  32. /* we may also create new dead cycles */
  33. repeat_phase |= REPEAT_CSE | REPEAT_CFG_CLEANUP;
  34. *ptr = new;
  35. replace_bb_in_list(&bb->children, old, new, 1);
  36. remove_bb_from_list(&old->parents, bb, 1);
  37. add_bb(&new->parents, bb);
  38. return 1;
  39. }
  40. /*
  41. * Return the known truth value of a pseudo, or -1 if
  42. * it's not known.
  43. */
  44. static int pseudo_truth_value(pseudo_t pseudo)
  45. {
  46. switch (pseudo->type) {
  47. case PSEUDO_VAL:
  48. return !!pseudo->value;
  49. case PSEUDO_REG: {
  50. struct instruction *insn = pseudo->def;
  51. /* A symbol address is always considered true.. */
  52. if (insn->opcode == OP_SYMADDR && insn->target == pseudo)
  53. return 1;
  54. }
  55. /* Fall through */
  56. default:
  57. return -1;
  58. }
  59. }
  60. /*
  61. * Does a basic block depend on the pseudos that "src" defines?
  62. */
  63. static int bb_depends_on(struct basic_block *target, struct basic_block *src)
  64. {
  65. pseudo_t pseudo;
  66. FOR_EACH_PTR(src->defines, pseudo) {
  67. if (pseudo_in_list(target->needs, pseudo))
  68. return 1;
  69. } END_FOR_EACH_PTR(pseudo);
  70. return 0;
  71. }
  72. /*
  73. * This really should be handled by bb_depends_on()
  74. * which efficiently check the dependence using the
  75. * defines - needs liveness info. Problem is that
  76. * there is no liveness done on OP_PHI & OP_PHISRC.
  77. *
  78. * This function add the missing dependency checks.
  79. */
  80. static int bb_depends_on_phi(struct basic_block *target, struct basic_block *src)
  81. {
  82. struct instruction *insn;
  83. FOR_EACH_PTR(src->insns, insn) {
  84. if (!insn->bb)
  85. continue;
  86. if (insn->opcode != OP_PHI)
  87. continue;
  88. if (pseudo_in_list(target->needs, insn->target))
  89. return 1;
  90. } END_FOR_EACH_PTR(insn);
  91. return 0;
  92. }
  93. /*
  94. * When we reach here, we have:
  95. * - a basic block that ends in a conditional branch and
  96. * that has no side effects apart from the pseudos it
  97. * may change.
  98. * - the phi-node that the conditional branch depends on
  99. * - full pseudo liveness information
  100. *
  101. * We need to check if any of the _sources_ of the phi-node
  102. * may be constant, and not actually need this block at all.
  103. */
  104. static int try_to_simplify_bb(struct basic_block *bb, struct instruction *first, struct instruction *second)
  105. {
  106. int changed = 0;
  107. pseudo_t phi;
  108. int bogus;
  109. /*
  110. * This a due to improper dominance tracking during
  111. * simplify_symbol_usage()/conversion to SSA form.
  112. * No sane simplification can be done when we have this.
  113. */
  114. bogus = bb_list_size(bb->parents) != pseudo_list_size(first->phi_list);
  115. FOR_EACH_PTR(first->phi_list, phi) {
  116. struct instruction *def = phi->def;
  117. struct basic_block *source, *target;
  118. pseudo_t pseudo;
  119. struct instruction *br;
  120. int cond;
  121. if (!def)
  122. continue;
  123. source = def->bb;
  124. pseudo = def->src1;
  125. if (!pseudo || !source)
  126. continue;
  127. br = last_instruction(source->insns);
  128. if (!br)
  129. continue;
  130. if (br->opcode != OP_CBR && br->opcode != OP_BR)
  131. continue;
  132. cond = pseudo_truth_value(pseudo);
  133. if (cond < 0)
  134. continue;
  135. target = cond ? second->bb_true : second->bb_false;
  136. if (bb_depends_on(target, bb))
  137. continue;
  138. if (bb_depends_on_phi(target, bb))
  139. continue;
  140. changed |= rewrite_branch(source, &br->bb_true, bb, target);
  141. changed |= rewrite_branch(source, &br->bb_false, bb, target);
  142. if (changed && !bogus)
  143. kill_use(THIS_ADDRESS(phi));
  144. } END_FOR_EACH_PTR(phi);
  145. return changed;
  146. }
  147. static int bb_has_side_effects(struct basic_block *bb)
  148. {
  149. struct instruction *insn;
  150. FOR_EACH_PTR(bb->insns, insn) {
  151. if (!insn->bb)
  152. continue;
  153. switch (insn->opcode) {
  154. case OP_CALL:
  155. /* FIXME! This should take "const" etc into account */
  156. return 1;
  157. case OP_LOAD:
  158. if (!insn->type)
  159. return 1;
  160. if (insn->is_volatile)
  161. return 1;
  162. continue;
  163. case OP_STORE:
  164. case OP_CONTEXT:
  165. return 1;
  166. case OP_ASM:
  167. /* FIXME! This should take "volatile" etc into account */
  168. return 1;
  169. default:
  170. continue;
  171. }
  172. } END_FOR_EACH_PTR(insn);
  173. return 0;
  174. }
  175. static int simplify_phi_branch(struct basic_block *bb, struct instruction *br)
  176. {
  177. pseudo_t cond = br->cond;
  178. struct instruction *def;
  179. if (cond->type != PSEUDO_REG)
  180. return 0;
  181. def = cond->def;
  182. if (def->bb != bb || def->opcode != OP_PHI)
  183. return 0;
  184. if (bb_has_side_effects(bb))
  185. return 0;
  186. return try_to_simplify_bb(bb, def, br);
  187. }
  188. static int simplify_branch_branch(struct basic_block *bb, struct instruction *br,
  189. struct basic_block **target_p, int bb_true)
  190. {
  191. struct basic_block *target = *target_p, *final;
  192. struct instruction *insn;
  193. int retval;
  194. if (target == bb)
  195. return 0;
  196. insn = last_instruction(target->insns);
  197. if (!insn || insn->opcode != OP_CBR || insn->cond != br->cond)
  198. return 0;
  199. /*
  200. * Ahhah! We've found a branch to a branch on the same conditional!
  201. * Now we just need to see if we can rewrite the branch..
  202. */
  203. retval = 0;
  204. final = bb_true ? insn->bb_true : insn->bb_false;
  205. if (bb_has_side_effects(target))
  206. goto try_to_rewrite_target;
  207. if (bb_depends_on(final, target))
  208. goto try_to_rewrite_target;
  209. if (bb_depends_on_phi(final, target))
  210. return 0;
  211. return rewrite_branch(bb, target_p, target, final);
  212. try_to_rewrite_target:
  213. /*
  214. * If we're the only parent, at least we can rewrite the
  215. * now-known second branch.
  216. */
  217. if (bb_list_size(target->parents) != 1)
  218. return retval;
  219. insert_branch(target, insn, final);
  220. return 1;
  221. }
  222. static int simplify_one_branch(struct basic_block *bb, struct instruction *br)
  223. {
  224. if (simplify_phi_branch(bb, br))
  225. return 1;
  226. return simplify_branch_branch(bb, br, &br->bb_true, 1) |
  227. simplify_branch_branch(bb, br, &br->bb_false, 0);
  228. }
  229. static int simplify_branch_nodes(struct entrypoint *ep)
  230. {
  231. int changed = 0;
  232. struct basic_block *bb;
  233. FOR_EACH_PTR(ep->bbs, bb) {
  234. struct instruction *br = last_instruction(bb->insns);
  235. if (!br || br->opcode != OP_CBR)
  236. continue;
  237. changed |= simplify_one_branch(bb, br);
  238. } END_FOR_EACH_PTR(bb);
  239. return changed;
  240. }
  241. /*
  242. * This is called late - when we have intra-bb liveness information..
  243. */
  244. int simplify_flow(struct entrypoint *ep)
  245. {
  246. return simplify_branch_nodes(ep);
  247. }
  248. static inline void concat_user_list(struct pseudo_user_list *src, struct pseudo_user_list **dst)
  249. {
  250. copy_ptr_list((struct ptr_list **)dst, (struct ptr_list *)src);
  251. }
  252. void convert_instruction_target(struct instruction *insn, pseudo_t src)
  253. {
  254. pseudo_t target;
  255. struct pseudo_user *pu;
  256. /*
  257. * Go through the "insn->users" list and replace them all..
  258. */
  259. target = insn->target;
  260. if (target == src)
  261. return;
  262. FOR_EACH_PTR(target->users, pu) {
  263. if (*pu->userp != VOID) {
  264. assert(*pu->userp == target);
  265. *pu->userp = src;
  266. }
  267. } END_FOR_EACH_PTR(pu);
  268. if (has_use_list(src))
  269. concat_user_list(target->users, &src->users);
  270. target->users = NULL;
  271. }
  272. void convert_load_instruction(struct instruction *insn, pseudo_t src)
  273. {
  274. convert_instruction_target(insn, src);
  275. kill_instruction(insn);
  276. repeat_phase |= REPEAT_SYMBOL_CLEANUP;
  277. }
  278. static int overlapping_memop(struct instruction *a, struct instruction *b)
  279. {
  280. unsigned int a_start = bytes_to_bits(a->offset);
  281. unsigned int b_start = bytes_to_bits(b->offset);
  282. unsigned int a_size = a->size;
  283. unsigned int b_size = b->size;
  284. if (a_size + a_start <= b_start)
  285. return 0;
  286. if (b_size + b_start <= a_start)
  287. return 0;
  288. return 1;
  289. }
  290. static inline int same_memop(struct instruction *a, struct instruction *b)
  291. {
  292. return a->offset == b->offset && a->size == b->size;
  293. }
  294. static inline int distinct_symbols(pseudo_t a, pseudo_t b)
  295. {
  296. if (a->type != PSEUDO_SYM)
  297. return 0;
  298. if (b->type != PSEUDO_SYM)
  299. return 0;
  300. return a->sym != b->sym;
  301. }
  302. /*
  303. * Return 1 if "dom" dominates the access to "pseudo"
  304. * in "insn".
  305. *
  306. * Return 0 if it doesn't, and -1 if you don't know.
  307. */
  308. int dominates(pseudo_t pseudo, struct instruction *insn, struct instruction *dom, int local)
  309. {
  310. int opcode = dom->opcode;
  311. if (opcode == OP_CALL || opcode == OP_ENTRY)
  312. return local ? 0 : -1;
  313. if (opcode != OP_LOAD && opcode != OP_STORE)
  314. return 0;
  315. if (dom->src != pseudo) {
  316. if (local)
  317. return 0;
  318. /* We don't think two explicitly different symbols ever alias */
  319. if (distinct_symbols(insn->src, dom->src))
  320. return 0;
  321. /* We could try to do some alias analysis here */
  322. return -1;
  323. }
  324. if (!same_memop(insn, dom)) {
  325. if (dom->opcode == OP_LOAD)
  326. return 0;
  327. if (!overlapping_memop(insn, dom))
  328. return 0;
  329. return -1;
  330. }
  331. return 1;
  332. }
  333. /*
  334. * We should probably sort the phi list just to make it easier to compare
  335. * later for equality.
  336. */
  337. void rewrite_load_instruction(struct instruction *insn, struct pseudo_list *dominators)
  338. {
  339. pseudo_t new, phi;
  340. /*
  341. * Check for somewhat common case of duplicate
  342. * phi nodes.
  343. */
  344. new = first_pseudo(dominators)->def->phi_src;
  345. FOR_EACH_PTR(dominators, phi) {
  346. if (new != phi->def->phi_src)
  347. goto complex_phi;
  348. new->ident = new->ident ? : phi->ident;
  349. } END_FOR_EACH_PTR(phi);
  350. /*
  351. * All the same pseudo - mark the phi-nodes unused
  352. * and convert the load into a LNOP and replace the
  353. * pseudo.
  354. */
  355. convert_load_instruction(insn, new);
  356. FOR_EACH_PTR(dominators, phi) {
  357. kill_instruction(phi->def);
  358. } END_FOR_EACH_PTR(phi);
  359. goto end;
  360. complex_phi:
  361. /* We leave symbol pseudos with a bogus usage list here */
  362. if (insn->src->type != PSEUDO_SYM)
  363. kill_use(&insn->src);
  364. insn->opcode = OP_PHI;
  365. insn->phi_list = dominators;
  366. end:
  367. repeat_phase |= REPEAT_SYMBOL_CLEANUP;
  368. }
  369. /* Kill a pseudo that is dead on exit from the bb */
  370. // The context is:
  371. // * the variable is not global but may have its address used (local/non-local)
  372. // * the stores are only needed by others functions which would do some
  373. // loads via the escaped address
  374. // We start by the terminating BB (normal exit BB + no-return/unreachable)
  375. // We walkup the BB' intruction backward
  376. // * we're only concerned by loads, stores & calls
  377. // * if we reach a call -> we have to stop if var is non-local
  378. // * if we reach a load of our var -> we have to stop
  379. // * if we reach a store of our var -> we can kill it, it's dead
  380. // * we can ignore other stores & loads if the var is local
  381. // * if we reach another store or load done via non-symbol access
  382. // (so done via some address calculation) -> we have to stop
  383. // If we reach the top of the BB we can recurse into the parents BBs.
  384. static void kill_dead_stores_bb(pseudo_t pseudo, unsigned long generation, struct basic_block *bb, int local)
  385. {
  386. struct instruction *insn;
  387. struct basic_block *parent;
  388. if (bb->generation == generation)
  389. return;
  390. bb->generation = generation;
  391. FOR_EACH_PTR_REVERSE(bb->insns, insn) {
  392. if (!insn->bb)
  393. continue;
  394. switch (insn->opcode) {
  395. case OP_LOAD:
  396. if (insn->src == pseudo)
  397. return;
  398. break;
  399. case OP_STORE:
  400. if (insn->src == pseudo) {
  401. kill_instruction_force(insn);
  402. continue;
  403. }
  404. break;
  405. case OP_CALL:
  406. if (!local)
  407. return;
  408. default:
  409. continue;
  410. }
  411. if (!local && insn->src->type != PSEUDO_SYM)
  412. return;
  413. } END_FOR_EACH_PTR_REVERSE(insn);
  414. FOR_EACH_PTR(bb->parents, parent) {
  415. if (bb_list_size(parent->children) > 1)
  416. continue;
  417. kill_dead_stores_bb(pseudo, generation, parent, local);
  418. } END_FOR_EACH_PTR(parent);
  419. }
  420. void check_access(struct instruction *insn)
  421. {
  422. pseudo_t pseudo = insn->src;
  423. if (insn->bb && pseudo->type == PSEUDO_SYM) {
  424. int offset = insn->offset, bit = bytes_to_bits(offset) + insn->size;
  425. struct symbol *sym = pseudo->sym;
  426. if (sym->bit_size > 0 && (offset < 0 || bit > sym->bit_size)) {
  427. if (insn->tainted)
  428. return;
  429. warning(insn->pos, "invalid access %s '%s' (%d %d)",
  430. offset < 0 ? "below" : "past the end of",
  431. show_ident(sym->ident), offset,
  432. bits_to_bytes(sym->bit_size));
  433. insn->tainted = 1;
  434. }
  435. }
  436. }
  437. static struct pseudo_user *first_user(pseudo_t p)
  438. {
  439. struct pseudo_user *pu;
  440. FOR_EACH_PTR(p->users, pu) {
  441. if (!pu)
  442. continue;
  443. return pu;
  444. } END_FOR_EACH_PTR(pu);
  445. return NULL;
  446. }
  447. void kill_dead_stores(struct entrypoint *ep, pseudo_t addr, int local)
  448. {
  449. unsigned long generation;
  450. struct basic_block *bb;
  451. switch (pseudo_user_list_size(addr->users)) {
  452. case 0:
  453. return;
  454. case 1:
  455. if (local) {
  456. struct pseudo_user *pu = first_user(addr);
  457. struct instruction *insn = pu->insn;
  458. if (insn->opcode == OP_STORE) {
  459. kill_instruction_force(insn);
  460. return;
  461. }
  462. }
  463. default:
  464. break;
  465. }
  466. generation = ++bb_generation;
  467. FOR_EACH_PTR(ep->bbs, bb) {
  468. if (bb->children)
  469. continue;
  470. kill_dead_stores_bb(addr, generation, bb, local);
  471. } END_FOR_EACH_PTR(bb);
  472. }
  473. static void mark_bb_reachable(struct basic_block *bb, unsigned long generation)
  474. {
  475. struct basic_block *child;
  476. if (bb->generation == generation)
  477. return;
  478. bb->generation = generation;
  479. FOR_EACH_PTR(bb->children, child) {
  480. mark_bb_reachable(child, generation);
  481. } END_FOR_EACH_PTR(child);
  482. }
  483. static void kill_defs(struct instruction *insn)
  484. {
  485. pseudo_t target = insn->target;
  486. if (!has_use_list(target))
  487. return;
  488. if (target->def != insn)
  489. return;
  490. convert_instruction_target(insn, VOID);
  491. }
  492. void kill_bb(struct basic_block *bb)
  493. {
  494. struct instruction *insn;
  495. struct basic_block *child, *parent;
  496. FOR_EACH_PTR(bb->insns, insn) {
  497. if (!insn->bb)
  498. continue;
  499. kill_instruction_force(insn);
  500. kill_defs(insn);
  501. /*
  502. * We kill unreachable instructions even if they
  503. * otherwise aren't "killable" (e.g. volatile loads)
  504. */
  505. } END_FOR_EACH_PTR(insn);
  506. bb->insns = NULL;
  507. FOR_EACH_PTR(bb->children, child) {
  508. remove_bb_from_list(&child->parents, bb, 0);
  509. } END_FOR_EACH_PTR(child);
  510. bb->children = NULL;
  511. FOR_EACH_PTR(bb->parents, parent) {
  512. remove_bb_from_list(&parent->children, bb, 0);
  513. } END_FOR_EACH_PTR(parent);
  514. bb->parents = NULL;
  515. }
  516. void kill_unreachable_bbs(struct entrypoint *ep)
  517. {
  518. struct basic_block *bb;
  519. unsigned long generation = ++bb_generation;
  520. mark_bb_reachable(ep->entry->bb, generation);
  521. FOR_EACH_PTR(ep->bbs, bb) {
  522. if (bb->generation == generation)
  523. continue;
  524. /* Mark it as being dead */
  525. kill_bb(bb);
  526. bb->ep = NULL;
  527. DELETE_CURRENT_PTR(bb);
  528. } END_FOR_EACH_PTR(bb);
  529. PACK_PTR_LIST(&ep->bbs);
  530. }
  531. static int rewrite_parent_branch(struct basic_block *bb, struct basic_block *old, struct basic_block *new)
  532. {
  533. int changed = 0;
  534. struct instruction *insn = last_instruction(bb->insns);
  535. if (!insn)
  536. return 0;
  537. /* Infinite loops: let's not "optimize" them.. */
  538. if (old == new)
  539. return 0;
  540. switch (insn->opcode) {
  541. case OP_CBR:
  542. changed |= rewrite_branch(bb, &insn->bb_false, old, new);
  543. /* fall through */
  544. case OP_BR:
  545. changed |= rewrite_branch(bb, &insn->bb_true, old, new);
  546. assert(changed);
  547. return changed;
  548. case OP_SWITCH: {
  549. struct multijmp *jmp;
  550. FOR_EACH_PTR(insn->multijmp_list, jmp) {
  551. changed |= rewrite_branch(bb, &jmp->target, old, new);
  552. } END_FOR_EACH_PTR(jmp);
  553. assert(changed);
  554. return changed;
  555. }
  556. default:
  557. return 0;
  558. }
  559. }
  560. static struct basic_block * rewrite_branch_bb(struct basic_block *bb, struct instruction *br)
  561. {
  562. struct basic_block *parent;
  563. struct basic_block *target = br->bb_true;
  564. if (br->opcode == OP_CBR) {
  565. pseudo_t cond = br->cond;
  566. if (cond->type != PSEUDO_VAL)
  567. return NULL;
  568. target = cond->value ? target : br->bb_false;
  569. }
  570. /*
  571. * We can't do FOR_EACH_PTR() here, because the parent list
  572. * may change when we rewrite the parent.
  573. */
  574. while ((parent = first_basic_block(bb->parents)) != NULL) {
  575. if (!rewrite_parent_branch(parent, bb, target))
  576. return NULL;
  577. }
  578. return target;
  579. }
  580. static void vrfy_bb_in_list(struct basic_block *bb, struct basic_block_list *list)
  581. {
  582. if (bb) {
  583. struct basic_block *tmp;
  584. int no_bb_in_list = 0;
  585. FOR_EACH_PTR(list, tmp) {
  586. if (bb == tmp)
  587. return;
  588. } END_FOR_EACH_PTR(tmp);
  589. assert(no_bb_in_list);
  590. }
  591. }
  592. static void vrfy_parents(struct basic_block *bb)
  593. {
  594. struct basic_block *tmp;
  595. FOR_EACH_PTR(bb->parents, tmp) {
  596. vrfy_bb_in_list(bb, tmp->children);
  597. } END_FOR_EACH_PTR(tmp);
  598. }
  599. static void vrfy_children(struct basic_block *bb)
  600. {
  601. struct basic_block *tmp;
  602. struct instruction *br = last_instruction(bb->insns);
  603. if (!br) {
  604. assert(!bb->children);
  605. return;
  606. }
  607. switch (br->opcode) {
  608. struct multijmp *jmp;
  609. case OP_CBR:
  610. vrfy_bb_in_list(br->bb_false, bb->children);
  611. /* fall through */
  612. case OP_BR:
  613. vrfy_bb_in_list(br->bb_true, bb->children);
  614. break;
  615. case OP_SWITCH:
  616. case OP_COMPUTEDGOTO:
  617. FOR_EACH_PTR(br->multijmp_list, jmp) {
  618. vrfy_bb_in_list(jmp->target, bb->children);
  619. } END_FOR_EACH_PTR(jmp);
  620. break;
  621. default:
  622. break;
  623. }
  624. FOR_EACH_PTR(bb->children, tmp) {
  625. vrfy_bb_in_list(bb, tmp->parents);
  626. } END_FOR_EACH_PTR(tmp);
  627. }
  628. static void vrfy_bb_flow(struct basic_block *bb)
  629. {
  630. vrfy_children(bb);
  631. vrfy_parents(bb);
  632. }
  633. void vrfy_flow(struct entrypoint *ep)
  634. {
  635. struct basic_block *bb;
  636. struct basic_block *entry = ep->entry->bb;
  637. FOR_EACH_PTR(ep->bbs, bb) {
  638. if (bb == entry)
  639. entry = NULL;
  640. vrfy_bb_flow(bb);
  641. } END_FOR_EACH_PTR(bb);
  642. assert(!entry);
  643. }
  644. void pack_basic_blocks(struct entrypoint *ep)
  645. {
  646. struct basic_block *bb;
  647. /* See if we can merge a bb into another one.. */
  648. FOR_EACH_PTR(ep->bbs, bb) {
  649. struct instruction *first, *insn;
  650. struct basic_block *parent, *child, *last;
  651. if (!bb_reachable(bb))
  652. continue;
  653. /*
  654. * Just a branch?
  655. */
  656. FOR_EACH_PTR(bb->insns, first) {
  657. if (!first->bb)
  658. continue;
  659. switch (first->opcode) {
  660. case OP_NOP:
  661. case OP_INLINED_CALL:
  662. continue;
  663. case OP_CBR:
  664. case OP_BR: {
  665. struct basic_block *replace;
  666. replace = rewrite_branch_bb(bb, first);
  667. if (replace) {
  668. kill_bb(bb);
  669. goto no_merge;
  670. }
  671. }
  672. /* fallthrough */
  673. default:
  674. goto out;
  675. }
  676. } END_FOR_EACH_PTR(first);
  677. out:
  678. /*
  679. * See if we only have one parent..
  680. */
  681. last = NULL;
  682. FOR_EACH_PTR(bb->parents, parent) {
  683. if (last) {
  684. if (last != parent)
  685. goto no_merge;
  686. continue;
  687. }
  688. last = parent;
  689. } END_FOR_EACH_PTR(parent);
  690. parent = last;
  691. if (!parent || parent == bb)
  692. continue;
  693. /*
  694. * Goodie. See if the parent can merge..
  695. */
  696. FOR_EACH_PTR(parent->children, child) {
  697. if (child != bb)
  698. goto no_merge;
  699. } END_FOR_EACH_PTR(child);
  700. /*
  701. * Merge the two.
  702. */
  703. repeat_phase |= REPEAT_CFG_CLEANUP;
  704. parent->children = bb->children;
  705. bb->children = NULL;
  706. bb->parents = NULL;
  707. FOR_EACH_PTR(parent->children, child) {
  708. replace_bb_in_list(&child->parents, bb, parent, 0);
  709. } END_FOR_EACH_PTR(child);
  710. kill_instruction(delete_last_instruction(&parent->insns));
  711. FOR_EACH_PTR(bb->insns, insn) {
  712. if (!insn->bb)
  713. continue;
  714. assert(insn->bb == bb);
  715. insn->bb = parent;
  716. add_instruction(&parent->insns, insn);
  717. } END_FOR_EACH_PTR(insn);
  718. bb->insns = NULL;
  719. no_merge:
  720. /* nothing to do */;
  721. } END_FOR_EACH_PTR(bb);
  722. }