forked from dotnet/runtime
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfgopt.cpp
7109 lines (6159 loc) · 242 KB
/
fgopt.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "lower.h" // for LowerRange()
// Flowgraph Optimization
//------------------------------------------------------------------------
// fgComputeReturnBlocks: Compute the set of BBJ_RETURN blocks.
//
// Initialize `fgReturnBlocks` to a list of the BBJ_RETURN blocks in the function.
//
void Compiler::fgComputeReturnBlocks()
{
fgReturnBlocks = nullptr;
for (BasicBlock* const block : Blocks())
{
// If this is a BBJ_RETURN block, add it to our list of all BBJ_RETURN blocks. This list is only
// used to find return blocks.
if (block->KindIs(BBJ_RETURN))
{
fgReturnBlocks = new (this, CMK_Reachability) BasicBlockList(block, fgReturnBlocks);
}
}
#ifdef DEBUG
if (verbose)
{
printf("Return blocks:");
if (fgReturnBlocks == nullptr)
{
printf(" NONE");
}
else
{
for (const BasicBlockList* bl = fgReturnBlocks; bl != nullptr; bl = bl->next)
{
printf(" " FMT_BB, bl->block->bbNum);
}
}
printf("\n");
}
#endif // DEBUG
}
//------------------------------------------------------------------------
// fgRemoveUnreachableBlocks: Remove unreachable blocks.
//
// Some blocks (marked with BBF_DONT_REMOVE) can't be removed even if unreachable, in which case they
// are converted to `throw` blocks. Internal throw helper blocks and the single return block (if any)
// are never considered unreachable.
//
// Arguments:
// canRemoveBlock - Method that determines if a block can be removed or not. In earlier phases, it
// relies on the reachability set. During final phase, it depends on the DFS walk of the flowgraph
// and considering blocks that are not visited as unreachable.
//
// Return Value:
// Return true if changes were made that may cause additional blocks to be removable.
//
// Notes:
// Unreachable blocks removal phase happens twice.
//
// During early phases RecomputeLoopInfo, the logic to determine if a block is reachable
// or not is based on the reachability sets, and hence it must be computed and valid.
//
// During late phase, all the reachable blocks from fgFirstBB are traversed and everything
// else are marked as unreachable (with exceptions of handler/filter blocks). As such, it
// is not dependent on the validity of reachability sets.
//
template <typename CanRemoveBlockBody>
bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock)
{
bool hasUnreachableBlocks = false;
bool changed = false;
// Mark unreachable blocks with BBF_REMOVED.
for (BasicBlock* const block : Blocks())
{
// Internal throw blocks are always reachable.
if (fgIsThrowHlpBlk(block))
{
continue;
}
else if (block == genReturnBB)
{
// Don't remove statements for the genReturnBB block, as we might have special hookups there.
// For example, the profiler hookup needs to have the "void GT_RETURN" statement
// to properly set the info.compProfilerCallback flag.
continue;
}
else if (block->HasFlag(BBF_DONT_REMOVE) && block->isEmpty() && block->KindIs(BBJ_THROW))
{
// We already converted a non-removable block to a throw; don't bother processing it again.
continue;
}
else if (!canRemoveBlock(block))
{
continue;
}
// Remove all the code for the block
fgUnreachableBlock(block);
// Make sure that the block was marked as removed */
noway_assert(block->HasFlag(BBF_REMOVED));
if (block->HasFlag(BBF_DONT_REMOVE))
{
// Unmark the block as removed, clear BBF_INTERNAL, and set BBF_IMPORTED
JITDUMP("Converting BBF_DONT_REMOVE block " FMT_BB " to BBJ_THROW\n", block->bbNum);
// If the CALLFINALLY is being replaced by a throw, then the CALLFINALLYRET is unreachable.
if (block->isBBCallFinallyPair())
{
BasicBlock* const leaveBlock = block->Next();
fgPrepareCallFinallyRetForRemoval(leaveBlock);
}
// The successors may be unreachable after this change.
changed |= block->NumSucc() > 0;
block->RemoveFlags(BBF_REMOVED | BBF_INTERNAL);
block->SetFlags(BBF_IMPORTED);
block->SetKindAndTargetEdge(BBJ_THROW);
block->bbSetRunRarely();
}
else
{
/* We have to call fgRemoveBlock next */
hasUnreachableBlocks = true;
changed = true;
}
}
if (hasUnreachableBlocks)
{
// Now remove the unreachable blocks: if we marked a block with BBF_REMOVED then we need to
// call fgRemoveBlock() on it.
BasicBlock* bNext;
for (BasicBlock* block = fgFirstBB; block != nullptr; block = bNext)
{
if (block->HasFlag(BBF_REMOVED))
{
bNext = fgRemoveBlock(block, /* unreachable */ true);
}
else
{
bNext = block->Next();
}
}
}
return changed;
}
//-------------------------------------------------------------
// fgComputeDominators: Compute dominators
//
// Returns:
// Suitable phase status.
//
PhaseStatus Compiler::fgComputeDominators()
{
if (m_dfsTree == nullptr)
{
m_dfsTree = fgComputeDfs();
}
if (m_domTree == nullptr)
{
m_domTree = FlowGraphDominatorTree::Build(m_dfsTree);
}
bool anyHandlers = false;
for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->HasFilter())
{
BasicBlock* filter = HBtab->ebdFilter;
if (m_dfsTree->Contains(filter))
{
filter->SetDominatedByExceptionalEntryFlag();
anyHandlers = true;
}
}
BasicBlock* handler = HBtab->ebdHndBeg;
if (m_dfsTree->Contains(handler))
{
handler->SetDominatedByExceptionalEntryFlag();
anyHandlers = true;
}
}
if (anyHandlers)
{
assert(m_dfsTree->GetPostOrder(m_dfsTree->GetPostOrderCount() - 1) == fgFirstBB);
// Now propagate dominator flag in reverse post-order, skipping first BB.
// (This could walk the dominator tree instead, but this linear order
// is more efficient to visit and still guarantees we see the
// dominators before the dominated blocks).
for (unsigned i = m_dfsTree->GetPostOrderCount() - 1; i != 0; i--)
{
BasicBlock* block = m_dfsTree->GetPostOrder(i - 1);
assert(block->bbIDom != nullptr);
if (block->bbIDom->IsDominatedByExceptionalEntryFlag())
{
block->SetDominatedByExceptionalEntryFlag();
}
}
}
return PhaseStatus::MODIFIED_NOTHING;
}
//-------------------------------------------------------------
// fgInitBlockVarSets: Initialize the per-block variable sets (used for liveness analysis).
//
// Notes:
// Initializes:
// bbVarUse, bbVarDef, bbLiveIn, bbLiveOut,
// bbMemoryUse, bbMemoryDef, bbMemoryLiveIn, bbMemoryLiveOut,
// bbScope
//
void Compiler::fgInitBlockVarSets()
{
for (BasicBlock* const block : Blocks())
{
block->InitVarSets(this);
}
fgBBVarSetsInited = true;
}
//------------------------------------------------------------------------
// fgPostImportationCleanups: clean up flow graph after importation
//
// Returns:
// suitable phase status
//
// Notes:
//
// Find and remove any basic blocks that are useless (e.g. they have not been
// imported because they are not reachable, or they have been optimized away).
//
// Remove try regions where no blocks in the try were imported.
// Update the end of try and handler regions where trailing blocks were not imported.
// Update the start of try regions that were partially imported (OSR)
//
// For OSR, add "step blocks" and conditional logic to ensure the path from
// method entry to the OSR logical entry point always flows through the first
// block of any enclosing try.
//
// In particular, given a method like
//
// S0;
// try {
// S1;
// try {
// S2;
// for (...) {} // OSR logical entry here
// }
// }
//
// Where the Sn are arbitrary hammocks of code, the OSR logical entry point
// would be in the middle of a nested try. We can't branch there directly
// from the OSR method entry. So we transform the flow to:
//
// _firstCall = 0;
// goto pt1;
// S0;
// pt1:
// try {
// if (_firstCall == 0) goto pt2;
// S1;
// pt2:
// try {
// if (_firstCall == 0) goto pp;
// S2;
// pp:
// _firstCall = 1;
// for (...)
// }
// }
//
// where the "state variable" _firstCall guides execution appropriately
// from OSR method entry, and flow always enters the try blocks at the
// first block of the try.
//
PhaseStatus Compiler::fgPostImportationCleanup()
{
// Bail, if this is a failed inline
//
if (compDonotInline())
{
return PhaseStatus::MODIFIED_NOTHING;
}
if (compIsForInlining())
{
// Update type of return spill temp if we have gathered
// better info when importing the inlinee, and the return
// spill temp is single def.
if (fgNeedReturnSpillTemp())
{
CORINFO_CLASS_HANDLE retExprClassHnd = impInlineInfo->retExprClassHnd;
if (retExprClassHnd != nullptr)
{
LclVarDsc* returnSpillVarDsc = lvaGetDesc(lvaInlineeReturnSpillTemp);
if ((returnSpillVarDsc->lvType == TYP_REF) && returnSpillVarDsc->lvSingleDef)
{
lvaUpdateClass(lvaInlineeReturnSpillTemp, retExprClassHnd, impInlineInfo->retExprClassHndIsExact);
}
}
}
}
BasicBlock* cur;
BasicBlock* nxt;
// If we remove any blocks, we'll have to do additional work
unsigned removedBlks = 0;
for (cur = fgFirstBB; cur != nullptr; cur = nxt)
{
// Get hold of the next block (in case we delete 'cur')
nxt = cur->Next();
// Should this block be removed?
if (!cur->HasFlag(BBF_IMPORTED))
{
noway_assert(cur->isEmpty());
if (ehCanDeleteEmptyBlock(cur))
{
JITDUMP(FMT_BB " was not imported, marking as removed (%d)\n", cur->bbNum, removedBlks);
// Notify all successors that cur is no longer a pred.
//
// This may not be necessary once we have pred lists built before importation.
// When we alter flow in the importer branch opts, we should be able to make
// suitable updates there for blocks that we plan to keep.
//
for (BasicBlock* succ : cur->Succs(this))
{
fgRemoveAllRefPreds(succ, cur);
}
cur->SetFlags(BBF_REMOVED);
removedBlks++;
// Drop the block from the list.
//
// We rely on the fact that this does not clear out
// cur->bbNext or cur->bbPrev in the code that
// follows.
fgUnlinkBlockForRemoval(cur);
}
else
{
// We were prevented from deleting this block by EH
// normalization. Mark the block as imported.
cur->SetFlags(BBF_IMPORTED);
}
}
}
// If no blocks were removed, we're done.
// Unless we are an OSR method with a try entry.
//
if ((removedBlks == 0) && !(opts.IsOSR() && fgOSREntryBB->hasTryIndex()))
{
return PhaseStatus::MODIFIED_NOTHING;
}
// Update all references in the exception handler table.
//
// We may have made the entire try block unreachable.
// Check for this case and remove the entry from the EH table.
//
// For OSR, just the initial part of a try range may become
// unreachable; if so we need to shrink the try range down
// to the portion that was imported.
unsigned XTnum;
EHblkDsc* HBtab;
unsigned delCnt = 0;
// Walk the EH regions from inner to outer
for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
AGAIN:
// If start of a try region was not imported, then we either
// need to trim the region extent, or remove the region
// entirely.
//
// In normal importation, it is not valid to jump into the
// middle of a try, so if the try entry was not imported, the
// entire try can be removed.
//
// In OSR importation the entry patchpoint may be in the
// middle of a try, and we need to determine how much of the
// try ended up getting imported. Because of backwards
// branches we may end up importing the entire try even though
// execution starts in the middle.
//
// Note it is common in both cases for the ends of trys (and
// associated handlers) to end up not getting imported, so if
// the try region is not removed, we always check if we need
// to trim the ends.
//
if (HBtab->ebdTryBeg->HasFlag(BBF_REMOVED))
{
// Usual case is that the entire try can be removed.
bool removeTryRegion = true;
if (opts.IsOSR())
{
// For OSR we may need to trim the try region start.
//
// We rely on the fact that removed blocks have been snipped from
// the main block list, but that those removed blocks have kept
// their bbprev (and bbnext) links.
//
// Find the first unremoved block before the try entry block.
//
BasicBlock* const oldTryEntry = HBtab->ebdTryBeg;
BasicBlock* tryEntryPrev = oldTryEntry->Prev();
assert(tryEntryPrev != nullptr);
while (tryEntryPrev->HasFlag(BBF_REMOVED))
{
tryEntryPrev = tryEntryPrev->Prev();
// Because we've added an unremovable scratch block as
// fgFirstBB, this backwards walk should always find
// some block.
assert(tryEntryPrev != nullptr);
}
// If there is a next block of this prev block, and that block is
// contained in the current try, we'd like to make that block
// the new start of the try, and keep the region.
BasicBlock* newTryEntry = tryEntryPrev->Next();
bool updateTryEntry = false;
if ((newTryEntry != nullptr) && bbInTryRegions(XTnum, newTryEntry))
{
// We want to trim the begin extent of the current try region to newTryEntry.
//
// This method is invoked after EH normalization, so we may need to ensure all
// try regions begin at blocks that are not the start or end of some other try.
//
// So, see if this block is already the start or end of some other EH region.
if (bbIsTryBeg(newTryEntry))
{
// We've already end-trimmed the inner try. Do the same now for the
// current try, so it is easier to detect when they mutually protect.
// (we will call this again later, which is harmless).
fgSkipRmvdBlocks(HBtab);
// If this try and the inner try form a "mutually protected try region"
// then we must continue to share the try entry block.
EHblkDsc* const HBinner = ehGetBlockTryDsc(newTryEntry);
assert(HBinner->ebdTryBeg == newTryEntry);
if (HBtab->ebdTryLast != HBinner->ebdTryLast)
{
updateTryEntry = true;
}
}
// Also, a try and handler cannot start at the same block
else if (bbIsHandlerBeg(newTryEntry))
{
updateTryEntry = true;
}
if (updateTryEntry)
{
// We need to trim the current try to begin at a different block. Normally
// this would be problematic as we don't have enough context to redirect
// all the incoming edges, but we know oldTryEntry is unreachable.
// So there are no incoming edges to worry about.
//
assert(!tryEntryPrev->bbFallsThrough());
// What follows is similar to fgNewBBInRegion, but we can't call that
// here as the oldTryEntry is no longer in the main bb list.
newTryEntry = BasicBlock::New(this);
newTryEntry->SetFlags(BBF_IMPORTED | BBF_INTERNAL);
newTryEntry->bbRefs = 0;
// Set the right EH region indices on this new block.
//
// Patchpoints currently cannot be inside handler regions,
// and so likewise the old and new try region entries.
assert(!oldTryEntry->hasHndIndex());
newTryEntry->setTryIndex(XTnum);
newTryEntry->clearHndIndex();
fgInsertBBafter(tryEntryPrev, newTryEntry);
// Generally this (unreachable) empty new try entry block can fall through
// to the next block, but in cases where there's a nested try with an
// out of order handler, the next block may be a handler. So even though
// this new try entry block is unreachable, we need to give it a
// plausible flow target. Simplest is to just mark it as a throw.
if (bbIsHandlerBeg(newTryEntry->Next()))
{
newTryEntry->SetKindAndTargetEdge(BBJ_THROW);
}
else
{
FlowEdge* const newEdge = fgAddRefPred(newTryEntry->Next(), newTryEntry);
newTryEntry->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
}
JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to new " FMT_BB "\n",
XTnum + delCnt, oldTryEntry->bbNum, newTryEntry->bbNum);
}
else
{
// We can just trim the try to newTryEntry as it is not part of some inner try or handler.
JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to " FMT_BB "\n", XTnum + delCnt,
oldTryEntry->bbNum, newTryEntry->bbNum);
}
// Update the handler table
fgSetTryBeg(HBtab, newTryEntry);
// Try entry blocks get specially marked and have special protection.
HBtab->ebdTryBeg->SetFlags(BBF_DONT_REMOVE);
// We are keeping this try region
removeTryRegion = false;
}
}
if (removeTryRegion)
{
// In the dump, refer to the region by its original index.
JITDUMP("Try region #%u (" FMT_BB " -- " FMT_BB ") not imported, removing try from the EH table\n",
XTnum + delCnt, HBtab->ebdTryBeg->bbNum, HBtab->ebdTryLast->bbNum);
delCnt++;
fgRemoveEHTableEntry(XTnum);
if (XTnum < compHndBBtabCount)
{
// There are more entries left to process, so do more. Note that
// HBtab now points to the next entry, that we copied down to the
// current slot. XTnum also stays the same.
goto AGAIN;
}
// no more entries (we deleted the last one), so exit the loop
break;
}
}
// If we get here, the try entry block was not removed.
// Check some invariants.
assert(HBtab->ebdTryBeg->HasFlag(BBF_IMPORTED));
assert(HBtab->ebdTryBeg->HasFlag(BBF_DONT_REMOVE));
assert(HBtab->ebdHndBeg->HasFlag(BBF_IMPORTED));
assert(HBtab->ebdHndBeg->HasFlag(BBF_DONT_REMOVE));
if (HBtab->HasFilter())
{
assert(HBtab->ebdFilter->HasFlag(BBF_IMPORTED));
assert(HBtab->ebdFilter->HasFlag(BBF_DONT_REMOVE));
}
// Finally, do region end trimming -- update try and handler ends to reflect removed blocks.
fgSkipRmvdBlocks(HBtab);
}
// If this is OSR, and the OSR entry was mid-try or in a nested try entry,
// add the appropriate step block logic.
//
unsigned addedBlocks = 0;
bool addedTemps = 0;
if (opts.IsOSR())
{
BasicBlock* const osrEntry = fgOSREntryBB;
BasicBlock* entryJumpTarget = osrEntry;
if (osrEntry->hasTryIndex())
{
EHblkDsc* enclosingTry = ehGetBlockTryDsc(osrEntry);
BasicBlock* tryEntry = enclosingTry->ebdTryBeg;
bool const inNestedTry = (enclosingTry->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX);
bool const osrEntryMidTry = (osrEntry != tryEntry);
if (inNestedTry || osrEntryMidTry)
{
JITDUMP("OSR Entry point at IL offset 0x%0x (" FMT_BB ") is %s%s try region EH#%u\n", info.compILEntry,
osrEntry->bbNum, osrEntryMidTry ? "within " : "at the start of ", inNestedTry ? "nested" : "",
osrEntry->getTryIndex());
// We'll need a state variable to control the branching.
//
// It will be initialized to zero when the OSR method is entered and set to one
// once flow reaches the osrEntry.
//
unsigned const entryStateVar = lvaGrabTemp(false DEBUGARG("OSR entry state var"));
lvaTable[entryStateVar].lvType = TYP_INT;
addedTemps = true;
// Zero the entry state at method entry.
//
GenTree* const initEntryState = gtNewTempStore(entryStateVar, gtNewZeroConNode(TYP_INT));
fgNewStmtAtBeg(fgFirstBB, initEntryState);
// Set the state variable once control flow reaches the OSR entry.
//
GenTree* const setEntryState = gtNewTempStore(entryStateVar, gtNewOneConNode(TYP_INT));
fgNewStmtAtBeg(osrEntry, setEntryState);
// Helper method to add flow
//
auto addConditionalFlow = [this, entryStateVar, &entryJumpTarget, &addedBlocks](BasicBlock* fromBlock,
BasicBlock* toBlock) {
BasicBlock* const newBlock = fgSplitBlockAtBeginning(fromBlock);
newBlock->inheritWeight(fromBlock);
fromBlock->SetFlags(BBF_INTERNAL);
newBlock->RemoveFlags(BBF_DONT_REMOVE);
addedBlocks++;
FlowEdge* const normalTryEntryEdge = fromBlock->GetTargetEdge();
GenTree* const entryStateLcl = gtNewLclvNode(entryStateVar, TYP_INT);
GenTree* const compareEntryStateToZero =
gtNewOperNode(GT_EQ, TYP_INT, entryStateLcl, gtNewZeroConNode(TYP_INT));
GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero);
fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero);
FlowEdge* const osrTryEntryEdge = fgAddRefPred(toBlock, fromBlock);
fromBlock->SetCond(osrTryEntryEdge, normalTryEntryEdge);
if (fgHaveProfileWeights())
{
// We are adding a path from (ultimately) the method entry to "fromBlock"
// Update the profile weight.
//
weight_t const entryWeight = fgFirstBB->bbWeight;
JITDUMP("Updating block weight for now-reachable try entry " FMT_BB " via " FMT_BB "\n",
fromBlock->bbNum, fgFirstBB->bbNum);
fromBlock->increaseBBProfileWeight(entryWeight);
// We updated the weight of fromBlock above.
//
// Set the likelihoods such that the additional weight flows to toBlock
// (and so the "normal path" profile out of fromBlock to newBlock is unaltered)
//
// In some stress cases we may have a zero-weight OSR entry.
// Tolerate this by capping the fromToLikelihood.
//
weight_t const fromWeight = fromBlock->bbWeight;
weight_t const fromToLikelihood = min(1.0, entryWeight / fromWeight);
osrTryEntryEdge->setLikelihood(fromToLikelihood);
normalTryEntryEdge->setLikelihood(1.0 - fromToLikelihood);
}
else
{
// Just set likelihoods arbitrarily
//
osrTryEntryEdge->setLikelihood(0.9);
normalTryEntryEdge->setLikelihood(0.1);
}
entryJumpTarget = fromBlock;
};
// If this is a mid-try entry, add a conditional branch from the start of the try to osr entry point.
//
if (osrEntryMidTry)
{
addConditionalFlow(tryEntry, osrEntry);
}
// Add conditional branches for each successive enclosing try with a distinct
// entry block.
//
while (enclosingTry->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX)
{
EHblkDsc* const nextTry = ehGetDsc(enclosingTry->ebdEnclosingTryIndex);
BasicBlock* const nextTryEntry = nextTry->ebdTryBeg;
// We don't need to add flow for mutual-protect regions
// (multiple tries that all share the same entry block).
//
if (nextTryEntry != tryEntry)
{
addConditionalFlow(nextTryEntry, tryEntry);
}
enclosingTry = nextTry;
tryEntry = nextTryEntry;
}
// Transform the method entry flow, if necessary.
//
// Note even if the OSR is in a nested try, if it's a mutual protect try
// it can be reached directly from "outside".
//
assert(fgFirstBB->TargetIs(osrEntry));
assert(fgFirstBB->KindIs(BBJ_ALWAYS));
if (entryJumpTarget != osrEntry)
{
fgRedirectTargetEdge(fgFirstBB, entryJumpTarget);
JITDUMP("OSR: redirecting flow from method entry " FMT_BB " to OSR entry " FMT_BB
" via step blocks.\n",
fgFirstBB->bbNum, fgOSREntryBB->bbNum);
}
else
{
JITDUMP("OSR: leaving direct flow from method entry " FMT_BB " to OSR entry " FMT_BB
", no step blocks needed.\n",
fgFirstBB->bbNum, fgOSREntryBB->bbNum);
}
}
else
{
// If OSR entry is the start of an un-nested try, no work needed.
//
// We won't hit this case today as we don't allow the try entry to be the target of a backedge,
// and currently patchpoints only appear at targets of backedges.
//
JITDUMP("OSR Entry point at IL offset 0x%0x (" FMT_BB
") is start of an un-nested try region, no step blocks needed.\n",
info.compILEntry, osrEntry->bbNum);
assert(entryJumpTarget == osrEntry);
assert(fgOSREntryBB == osrEntry);
}
}
else
{
// If OSR entry is not within a try, no work needed.
//
JITDUMP("OSR Entry point at IL offset 0x%0x (" FMT_BB ") is not in a try region, no step blocks needed.\n",
info.compILEntry, osrEntry->bbNum);
assert(entryJumpTarget == osrEntry);
assert(fgOSREntryBB == osrEntry);
}
}
#ifdef DEBUG
fgVerifyHandlerTab();
#endif // DEBUG
// Did we make any changes?
//
const bool madeChanges = (addedBlocks > 0) || (delCnt > 0) || (removedBlks > 0) || addedTemps;
// Note that we have now run post importation cleanup,
// so we can enable more stringent checking.
//
compPostImportationCleanupDone = true;
return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING;
}
//-------------------------------------------------------------
// fgCanCompactBlock: Determine if a BBJ_ALWAYS block and its target can be compacted.
//
// Arguments:
// block - BBJ_ALWAYS block to check
//
// Returns:
// true if compaction is allowed
//
bool Compiler::fgCanCompactBlock(BasicBlock* block)
{
assert(block != nullptr);
if (!block->KindIs(BBJ_ALWAYS) || block->HasFlag(BBF_KEEP_BBJ_ALWAYS))
{
return false;
}
BasicBlock* const target = block->GetTarget();
if (block == target)
{
return false;
}
if (target->IsFirst() || (target == fgEntryBB) || (target == fgOSREntryBB))
{
return false;
}
// Don't bother compacting a call-finally pair if it doesn't succeed block
//
if (target->isBBCallFinallyPair() && !block->NextIs(target))
{
return false;
}
// If target has multiple incoming edges, we can still compact if block is empty.
// However, not if it is the beginning of a handler.
//
if (target->countOfInEdges() != 1 &&
(!block->isEmpty() || block->HasFlag(BBF_FUNCLET_BEG) || (block->bbCatchTyp != BBCT_NONE)))
{
return false;
}
if (target->HasFlag(BBF_DONT_REMOVE))
{
return false;
}
// Ensure we leave a valid init BB around.
//
if ((block == fgFirstBB) && !fgCanCompactInitBlock())
{
return false;
}
// We cannot compact two blocks in different EH regions.
//
if (!BasicBlock::sameEHRegion(block, target))
{
return false;
}
// If there is a switch predecessor don't bother because we'd have to update the uniquesuccs as well
// (if they are valid).
//
for (BasicBlock* const predBlock : target->PredBlocks())
{
if (predBlock->KindIs(BBJ_SWITCH))
{
return false;
}
}
return true;
}
//-------------------------------------------------------------
// fgCanCompactInitBlock: Check if the first BB (the init BB) can be compacted
// into its target.
//
// Returns:
// true if compaction is allowed
//
bool Compiler::fgCanCompactInitBlock()
{
assert(fgFirstBB->KindIs(BBJ_ALWAYS));
BasicBlock* target = fgFirstBB->GetTarget();
if (target->hasTryIndex())
{
// Inside a try region
return false;
}
assert(target->bbPreds != nullptr);
if (target->bbPreds->getNextPredEdge() != nullptr)
{
// Multiple preds
return false;
}
if (opts.compDbgCode && !target->HasFlag(BBF_INTERNAL))
{
// Init BB must be internal for debug code to avoid conflating
// JIT-inserted code with user code.
return false;
}
return true;
}
//-------------------------------------------------------------
// fgCompactBlock: Compact BBJ_ALWAYS block and its target into one.
//
// Requires that all necessary checks have been performed, i.e. fgCanCompactBlock returns true.
//
// Uses for this function - whenever we change links, insert blocks, ...
// It will keep the flowgraph data in synch - bbNum, bbRefs, bbPreds
//
// Arguments:
// block - move all code into this block from its target.
//
void Compiler::fgCompactBlock(BasicBlock* block)
{
assert(fgCanCompactBlock(block));
// We shouldn't churn the flowgraph after doing hot/cold splitting
assert(fgFirstColdBlock == nullptr);
BasicBlock* const target = block->GetTarget();
JITDUMP("\nCompacting " FMT_BB " into " FMT_BB ":\n", target->bbNum, block->bbNum);
fgRemoveRefPred(block->GetTargetEdge());
if (target->countOfInEdges() > 0)
{
JITDUMP("Second block has %u other incoming edges\n", target->countOfInEdges());
assert(block->isEmpty());
// Retarget all the other edges incident on target
for (BasicBlock* const predBlock : target->PredBlocksEditing())
{
fgReplaceJumpTarget(predBlock, target, block);
}
}
assert(target->countOfInEdges() == 0);
assert(target->bbPreds == nullptr);
/* Start compacting - move all the statements in the second block to the first block */
// First move any phi definitions of the second block after the phi defs of the first.
// TODO-CQ: This may be the wrong thing to do. If we're compacting blocks, it's because a
// control-flow choice was constant-folded away. So probably phi's need to go away,
// as well, in favor of one of the incoming branches. Or at least be modified.
assert(block->IsLIR() == target->IsLIR());
if (block->IsLIR())
{
LIR::Range& blockRange = LIR::AsRange(block);
LIR::Range& targetRange = LIR::AsRange(target);
// Does target have any phis?
GenTree* targetNode = targetRange.FirstNode();
// Does the block have any code?
if (targetNode != nullptr)
{
LIR::Range targetNodes = targetRange.Remove(targetNode, targetRange.LastNode());
blockRange.InsertAtEnd(std::move(targetNodes));
}
}
else
{
Statement* blkNonPhi1 = block->FirstNonPhiDef();
Statement* targetNonPhi1 = target->FirstNonPhiDef();
Statement* blkFirst = block->firstStmt();
Statement* targetFirst = target->firstStmt();
// Does the second have any phis?
if ((targetFirst != nullptr) && (targetFirst != targetNonPhi1))
{
Statement* targetLast = targetFirst->GetPrevStmt();
assert(targetLast->GetNextStmt() == nullptr);
// Does "blk" have phis?
if (blkNonPhi1 != blkFirst)
{
// Yes, has phis.
// Insert after the last phi of "block."
// First, targetPhis after last phi of block.
Statement* blkLastPhi = (blkNonPhi1 != nullptr) ? blkNonPhi1->GetPrevStmt() : blkFirst->GetPrevStmt();
blkLastPhi->SetNextStmt(targetFirst);
targetFirst->SetPrevStmt(blkLastPhi);
// Now, rest of "block" after last phi of "target".
Statement* targetLastPhi =
(targetNonPhi1 != nullptr) ? targetNonPhi1->GetPrevStmt() : targetFirst->GetPrevStmt();
targetLastPhi->SetNextStmt(blkNonPhi1);
if (blkNonPhi1 != nullptr)
{
blkNonPhi1->SetPrevStmt(targetLastPhi);
}
else
{
// block has no non phis, so make the last statement be the last added phi.
blkFirst->SetPrevStmt(targetLastPhi);
}
// Now update the bbStmtList of "target".
target->bbStmtList = targetNonPhi1;
if (targetNonPhi1 != nullptr)
{
targetNonPhi1->SetPrevStmt(targetLast);
}
}
else
{
if (blkFirst != nullptr) // If "block" has no statements, fusion will work fine...
{
// First, targetPhis at start of block.
Statement* blkLast = blkFirst->GetPrevStmt();
block->bbStmtList = targetFirst;