Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 036d2de

Browse files
committedAug 13, 2024
comments
1 parent 569d014 commit 036d2de

File tree

1 file changed

+57
-11
lines changed
  • compiler/rustc_type_ir/src/search_graph

1 file changed

+57
-11
lines changed
 

‎compiler/rustc_type_ir/src/search_graph/mod.rs

+57-11
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,9 @@ impl AvailableDepth {
179179
}
180180
}
181181

182-
/// All cycle heads a given goal depends on.
182+
/// All cycle heads a given goal depends on, ordered by their stack depth.
183+
///
184+
/// We therefore pop the cycle heads from highest to lowest.
183185
#[derive(Clone, Debug, PartialEq, Eq, Default)]
184186
struct CycleHeads {
185187
heads: BTreeSet<StackDepth>,
@@ -217,6 +219,9 @@ impl CycleHeads {
217219
}
218220
}
219221

222+
/// Update the cycle heads of a goal at depth `this` given the cycle heads
223+
/// of a nested goal. This merges the heads after filtering the parent goal
224+
/// itself.
220225
fn extend_from_child(&mut self, this: StackDepth, child: &CycleHeads) {
221226
for &head in child.heads.iter() {
222227
match head.cmp(&this) {
@@ -264,6 +269,12 @@ impl<X: Cx> NestedGoals<X> {
264269
}
265270
}
266271

272+
/// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal
273+
/// to the parent goal.
274+
///
275+
/// If the path from this goal to the nested goal is inductive, the paths from this goal
276+
/// to all nested goals of that nested goal are also inductive. Otherwise the paths are
277+
/// the same as for the child.
267278
fn extend_from_child(&mut self, step_kind: PathKind, nested_goals: &NestedGoals<X>) {
268279
#[allow(rustc::potential_query_instability)]
269280
for (input, path_from_entry) in nested_goals.iter() {
@@ -332,8 +343,13 @@ struct StackEntry<X: Cx> {
332343
/// goals still on the stack.
333344
#[derive_where(Debug; X: Cx)]
334345
struct ProvisionalCacheEntry<X: Cx> {
346+
/// Whether evaluating the goal encountered overflow. This is used to
347+
/// disable the cache entry except if the last goal on the stack is
348+
/// already involved in this cycle.
335349
encountered_overflow: bool,
350+
/// All cycle heads this cache entry depends on.
336351
heads: CycleHeads,
352+
/// The path from the highest cycle head to this goal.
337353
path_from_head: PathKind,
338354
nested_goals: NestedGoals<X>,
339355
result: X::Result,
@@ -345,6 +361,10 @@ pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
345361
///
346362
/// An element is *deeper* in the stack if its index is *lower*.
347363
stack: IndexVec<StackDepth, StackEntry<X>>,
364+
/// The provisional cache contains entries for already computed goals which
365+
/// still depend on goals higher-up in the stack. We don't move them to the
366+
/// global cache and track them locally instead. A provisional cache entry
367+
/// is only valid until the result of one of its cycle heads changes.
348368
provisional_cache: HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>,
349369

350370
_marker: PhantomData<D>,
@@ -585,6 +605,17 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
585605
/// provisional cache entry is involved in would stay the same when computing the
586606
/// goal without its cycle head on the stack. For more details, see the relevant
587607
/// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html).
608+
///
609+
/// This can be thought of rotating the sub-tree of this provisional result and changing
610+
/// its entry point while making sure that all paths through this sub-tree stay the same.
611+
///
612+
///
613+
/// In case the popped cycle head failed to reach a fixpoint anything which depends on
614+
/// its provisional result is invalid. Actually discarding provisional cache entries in
615+
/// this case would cause hangs, so we instead change the result of dependant provisional
616+
/// cache entries to also be ambiguous. This causes some undesirable ambiguity for nested
617+
/// goals whose result doesn't actually depend on this cycle head, but that's acceptable
618+
/// to me.
588619
fn rebase_provisional_cache_entries(
589620
&mut self,
590621
cx: X,
@@ -610,31 +641,37 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
610641
// to the cache entry is not coinductive or if the path from
611642
// the cache entry to the current head is not coinductive.
612643
//
613-
// Both of these constraints could be lowered, but by only
644+
// Both of these constraints could be weakened, but by only
614645
// accepting coinductive paths we don't have to worry about
615646
// changing the cycle kind of the remaining cycles. We can
616647
// extend this in the future once there's a known issue
617648
// caused by it.
618-
if *path_from_head != PathKind::Coinductive {
619-
return false;
620-
}
621-
622-
if nested_goals.get(stack_entry.input).unwrap()
623-
!= UsageKind::Single(PathKind::Coinductive)
649+
if *path_from_head != PathKind::Coinductive
650+
|| nested_goals.get(stack_entry.input).unwrap()
651+
!= UsageKind::Single(PathKind::Coinductive)
624652
{
625653
return false;
626654
}
627655

656+
// Merge the cycle heads of the provisional cache entry and the
657+
// popped head. If the popped cycle head was a root, discard all
658+
// provisional cache entries which depend on it.
628659
heads.remove_highest_cycle_head();
629660
heads.merge(&stack_entry.heads);
630-
// If the popped cycle head was a root, discard all provisional
631-
// cache entries.
632661
let Some(head) = heads.opt_highest_cycle_head() else {
633662
return false;
634663
};
635664

665+
// As we've made sure that the path from the new highest cycle
666+
// head to the uses of the popped cycle head are fully coinductive,
667+
// we can be sure that the paths to all nested goals of the popped
668+
// cycle head remain the same. We can simply merge them.
636669
nested_goals.merge(&stack_entry.nested_goals);
670+
// We now care about the path from the next highest cycle head to the
671+
// provisional cache entry.
637672
*path_from_head = Self::stack_path_kind(cx, &self.stack, head);
673+
// Mutate the result of the provisional cache entry in case we did
674+
// not reach a fixpoint.
638675
*result = mutate_result(input, *result);
639676
true
640677
});
@@ -673,6 +710,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
673710
}
674711
}
675712

713+
/// A provisional cache entry is only valid if the current path from its
714+
/// highest cycle head to the goal is the same.
676715
if path_from_head == Self::stack_path_kind(cx, &self.stack, head) {
677716
// While we don't have to track the full depth of the provisional cache entry,
678717
// we do have to increment the required depth by one as we'd have already failed
@@ -714,11 +753,15 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
714753
return true;
715754
}
716755

756+
// If a nested goal of the global cache entry is on the stack, we would
757+
// definitely encounter a cycle.
717758
if stack.iter().any(|e| nested_goals.contains(e.input)) {
718759
debug!("cache entry not applicable due to stack");
719760
return false;
720761
}
721762

763+
// The global cache entry is also invalid if there's a provisional cache entry
764+
// would apply for any of its nested goals.
722765
#[allow(rustc::potential_query_instability)]
723766
for (input, path_from_global_entry) in nested_goals.iter() {
724767
let Some(entries) = provisional_cache.get(&input) else {
@@ -742,6 +785,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
742785
continue;
743786
}
744787

788+
// A provisional cache entry only applies if the path from its highest head
789+
// matches the path when encountering the goal.
745790
let head = heads.highest_cycle_head();
746791
let full_path = match Self::stack_path_kind(cx, stack, head) {
747792
PathKind::Coinductive => path_from_global_entry,
@@ -813,7 +858,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
813858
// its state is the same regardless of whether we've used the
814859
// global cache or not.
815860
let reached_depth = self.stack.next_index().plus(additional_depth);
816-
// We don't move cycle participants to the global cache.
861+
// We don't move cycle participants to the global cache, so the
862+
// cycle heads are always empty.
817863
let heads = Default::default();
818864
Self::update_parent_goal(
819865
cx,

0 commit comments

Comments
 (0)
Failed to load comments.