@@ -179,7 +179,9 @@ impl AvailableDepth {
179
179
}
180
180
}
181
181
182
- /// All cycle heads a given goal depends on.
182
+ /// All cycle heads a given goal depends on, ordered by their stack depth.
183
+ ///
184
+ /// We therefore pop the cycle heads from highest to lowest.
183
185
#[ derive( Clone , Debug , PartialEq , Eq , Default ) ]
184
186
struct CycleHeads {
185
187
heads : BTreeSet < StackDepth > ,
@@ -217,6 +219,9 @@ impl CycleHeads {
217
219
}
218
220
}
219
221
222
+ /// Update the cycle heads of a goal at depth `this` given the cycle heads
223
+ /// of a nested goal. This merges the heads after filtering the parent goal
224
+ /// itself.
220
225
fn extend_from_child ( & mut self , this : StackDepth , child : & CycleHeads ) {
221
226
for & head in child. heads . iter ( ) {
222
227
match head. cmp ( & this) {
@@ -264,6 +269,12 @@ impl<X: Cx> NestedGoals<X> {
264
269
}
265
270
}
266
271
272
+ /// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal
273
+ /// to the parent goal.
274
+ ///
275
+ /// If the path from this goal to the nested goal is inductive, the paths from this goal
276
+ /// to all nested goals of that nested goal are also inductive. Otherwise the paths are
277
+ /// the same as for the child.
267
278
fn extend_from_child ( & mut self , step_kind : PathKind , nested_goals : & NestedGoals < X > ) {
268
279
#[ allow( rustc:: potential_query_instability) ]
269
280
for ( input, path_from_entry) in nested_goals. iter ( ) {
@@ -332,8 +343,13 @@ struct StackEntry<X: Cx> {
332
343
/// goals still on the stack.
333
344
#[ derive_where( Debug ; X : Cx ) ]
334
345
struct ProvisionalCacheEntry < X : Cx > {
346
+ /// Whether evaluating the goal encountered overflow. This is used to
347
+ /// disable the cache entry except if the last goal on the stack is
348
+ /// already involved in this cycle.
335
349
encountered_overflow : bool ,
350
+ /// All cycle heads this cache entry depends on.
336
351
heads : CycleHeads ,
352
+ /// The path from the highest cycle head to this goal.
337
353
path_from_head : PathKind ,
338
354
nested_goals : NestedGoals < X > ,
339
355
result : X :: Result ,
@@ -345,6 +361,10 @@ pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
345
361
///
346
362
/// An element is *deeper* in the stack if its index is *lower*.
347
363
stack : IndexVec < StackDepth , StackEntry < X > > ,
364
+ /// The provisional cache contains entries for already computed goals which
365
+ /// still depend on goals higher-up in the stack. We don't move them to the
366
+ /// global cache and track them locally instead. A provisional cache entry
367
+ /// is only valid until the result of one of its cycle heads changes.
348
368
provisional_cache : HashMap < X :: Input , Vec < ProvisionalCacheEntry < X > > > ,
349
369
350
370
_marker : PhantomData < D > ,
@@ -585,6 +605,17 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
585
605
/// provisional cache entry is involved in would stay the same when computing the
586
606
/// goal without its cycle head on the stack. For more details, see the relevant
587
607
/// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html).
608
+ ///
609
+ /// This can be thought of rotating the sub-tree of this provisional result and changing
610
+ /// its entry point while making sure that all paths through this sub-tree stay the same.
611
+ ///
612
+ ///
613
+ /// In case the popped cycle head failed to reach a fixpoint anything which depends on
614
+ /// its provisional result is invalid. Actually discarding provisional cache entries in
615
+ /// this case would cause hangs, so we instead change the result of dependant provisional
616
+ /// cache entries to also be ambiguous. This causes some undesirable ambiguity for nested
617
+ /// goals whose result doesn't actually depend on this cycle head, but that's acceptable
618
+ /// to me.
588
619
fn rebase_provisional_cache_entries (
589
620
& mut self ,
590
621
cx : X ,
@@ -610,31 +641,37 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
610
641
// to the cache entry is not coinductive or if the path from
611
642
// the cache entry to the current head is not coinductive.
612
643
//
613
- // Both of these constraints could be lowered , but by only
644
+ // Both of these constraints could be weakened , but by only
614
645
// accepting coinductive paths we don't have to worry about
615
646
// changing the cycle kind of the remaining cycles. We can
616
647
// extend this in the future once there's a known issue
617
648
// caused by it.
618
- if * path_from_head != PathKind :: Coinductive {
619
- return false ;
620
- }
621
-
622
- if nested_goals. get ( stack_entry. input ) . unwrap ( )
623
- != UsageKind :: Single ( PathKind :: Coinductive )
649
+ if * path_from_head != PathKind :: Coinductive
650
+ || nested_goals. get ( stack_entry. input ) . unwrap ( )
651
+ != UsageKind :: Single ( PathKind :: Coinductive )
624
652
{
625
653
return false ;
626
654
}
627
655
656
+ // Merge the cycle heads of the provisional cache entry and the
657
+ // popped head. If the popped cycle head was a root, discard all
658
+ // provisional cache entries which depend on it.
628
659
heads. remove_highest_cycle_head ( ) ;
629
660
heads. merge ( & stack_entry. heads ) ;
630
- // If the popped cycle head was a root, discard all provisional
631
- // cache entries.
632
661
let Some ( head) = heads. opt_highest_cycle_head ( ) else {
633
662
return false ;
634
663
} ;
635
664
665
+ // As we've made sure that the path from the new highest cycle
666
+ // head to the uses of the popped cycle head are fully coinductive,
667
+ // we can be sure that the paths to all nested goals of the popped
668
+ // cycle head remain the same. We can simply merge them.
636
669
nested_goals. merge ( & stack_entry. nested_goals ) ;
670
+ // We now care about the path from the next highest cycle head to the
671
+ // provisional cache entry.
637
672
* path_from_head = Self :: stack_path_kind ( cx, & self . stack , head) ;
673
+ // Mutate the result of the provisional cache entry in case we did
674
+ // not reach a fixpoint.
638
675
* result = mutate_result ( input, * result) ;
639
676
true
640
677
} ) ;
@@ -673,6 +710,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
673
710
}
674
711
}
675
712
713
+ /// A provisional cache entry is only valid if the current path from its
714
+ /// highest cycle head to the goal is the same.
676
715
if path_from_head == Self :: stack_path_kind ( cx, & self . stack , head) {
677
716
// While we don't have to track the full depth of the provisional cache entry,
678
717
// we do have to increment the required depth by one as we'd have already failed
@@ -714,11 +753,15 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
714
753
return true ;
715
754
}
716
755
756
+ // If a nested goal of the global cache entry is on the stack, we would
757
+ // definitely encounter a cycle.
717
758
if stack. iter ( ) . any ( |e| nested_goals. contains ( e. input ) ) {
718
759
debug ! ( "cache entry not applicable due to stack" ) ;
719
760
return false ;
720
761
}
721
762
763
+ // The global cache entry is also invalid if there's a provisional cache entry
764
+ // would apply for any of its nested goals.
722
765
#[ allow( rustc:: potential_query_instability) ]
723
766
for ( input, path_from_global_entry) in nested_goals. iter ( ) {
724
767
let Some ( entries) = provisional_cache. get ( & input) else {
@@ -742,6 +785,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
742
785
continue ;
743
786
}
744
787
788
+ // A provisional cache entry only applies if the path from its highest head
789
+ // matches the path when encountering the goal.
745
790
let head = heads. highest_cycle_head ( ) ;
746
791
let full_path = match Self :: stack_path_kind ( cx, stack, head) {
747
792
PathKind :: Coinductive => path_from_global_entry,
@@ -813,7 +858,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
813
858
// its state is the same regardless of whether we've used the
814
859
// global cache or not.
815
860
let reached_depth = self . stack . next_index ( ) . plus ( additional_depth) ;
816
- // We don't move cycle participants to the global cache.
861
+ // We don't move cycle participants to the global cache, so the
862
+ // cycle heads are always empty.
817
863
let heads = Default :: default ( ) ;
818
864
Self :: update_parent_goal (
819
865
cx,
0 commit comments