|
2 | 2 | //! generate the actual methods on tcx which find and execute the provider, |
3 | 3 | //! manage the caches, and so forth. |
4 | 4 |
|
5 | | -use crate::dep_graph; |
| 5 | +use crate::dep_graph::{self, DepKind, DepNode, DepNodeExt, DepNodeIndex, SerializedDepNodeIndex}; |
6 | 6 | use crate::ty::query::{on_disk_cache, Queries, Query}; |
7 | 7 | use crate::ty::tls::{self, ImplicitCtxt}; |
8 | 8 | use crate::ty::{self, TyCtxt}; |
@@ -72,6 +72,95 @@ impl QueryContext for QueryCtxt<'tcx> { |
72 | 72 | (dep_node.kind.try_load_from_on_disk_cache)(*self, dep_node) |
73 | 73 | } |
74 | 74 |
|
| 75 | + fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool { |
| 76 | + // FIXME: This match is just a workaround for incremental bugs and should |
| 77 | + // be removed. https://github.com/rust-lang/rust/issues/62649 is one such |
| 78 | + // bug that must be fixed before removing this. |
| 79 | + match dep_node.kind { |
| 80 | + DepKind::hir_owner | DepKind::hir_owner_nodes => { |
| 81 | + if let Some(def_id) = dep_node.extract_def_id(**self) { |
| 82 | + let def_id = def_id.expect_local(); |
| 83 | + let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id); |
| 84 | + if def_id != hir_id.owner { |
| 85 | + // This `DefPath` does not have a |
| 86 | + // corresponding `DepNode` (e.g. a |
| 87 | + // struct field), and the ` DefPath` |
| 88 | + // collided with the `DefPath` of a |
| 89 | + // proper item that existed in the |
| 90 | + // previous compilation session. |
| 91 | + // |
| 92 | + // Since the given `DefPath` does not |
| 93 | + // denote the item that previously |
| 94 | + // existed, we just fail to mark green. |
| 95 | + return false; |
| 96 | + } |
| 97 | + } else { |
| 98 | + // If the node does not exist anymore, we |
| 99 | + // just fail to mark green. |
| 100 | + return false; |
| 101 | + } |
| 102 | + } |
| 103 | + _ => { |
| 104 | + // For other kinds of nodes it's OK to be |
| 105 | + // forced. |
| 106 | + } |
| 107 | + } |
| 108 | + |
| 109 | + debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node); |
| 110 | + |
| 111 | + // We must avoid ever having to call `force_from_dep_node()` for a |
| 112 | + // `DepNode::codegen_unit`: |
| 113 | + // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we |
| 114 | + // would always end up having to evaluate the first caller of the |
| 115 | + // `codegen_unit` query that *is* reconstructible. This might very well be |
| 116 | + // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just |
| 117 | + // to re-trigger calling the `codegen_unit` query with the right key. At |
| 118 | + // that point we would already have re-done all the work we are trying to |
| 119 | + // avoid doing in the first place. |
| 120 | + // The solution is simple: Just explicitly call the `codegen_unit` query for |
| 121 | + // each CGU, right after partitioning. This way `try_mark_green` will always |
| 122 | + // hit the cache instead of having to go through `force_from_dep_node`. |
| 123 | + // This assertion makes sure, we actually keep applying the solution above. |
| 124 | + debug_assert!( |
| 125 | + dep_node.kind != DepKind::codegen_unit, |
| 126 | + "calling force_from_dep_node() on DepKind::codegen_unit" |
| 127 | + ); |
| 128 | + |
| 129 | + (dep_node.kind.force_from_dep_node)(**self, dep_node) |
| 130 | + } |
| 131 | + |
| 132 | + fn has_errors_or_delayed_span_bugs(&self) -> bool { |
| 133 | + self.sess.has_errors_or_delayed_span_bugs() |
| 134 | + } |
| 135 | + |
| 136 | + fn diagnostic(&self) -> &rustc_errors::Handler { |
| 137 | + self.sess.diagnostic() |
| 138 | + } |
| 139 | + |
| 140 | + // Interactions with on_disk_cache |
| 141 | + fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> { |
| 142 | + self.on_disk_cache |
| 143 | + .as_ref() |
| 144 | + .map(|c| c.load_diagnostics(**self, prev_dep_node_index)) |
| 145 | + .unwrap_or_default() |
| 146 | + } |
| 147 | + |
| 148 | + fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) { |
| 149 | + if let Some(c) = self.on_disk_cache.as_ref() { |
| 150 | + c.store_diagnostics(dep_node_index, diagnostics) |
| 151 | + } |
| 152 | + } |
| 153 | + |
| 154 | + fn store_diagnostics_for_anon_node( |
| 155 | + &self, |
| 156 | + dep_node_index: DepNodeIndex, |
| 157 | + diagnostics: ThinVec<Diagnostic>, |
| 158 | + ) { |
| 159 | + if let Some(c) = self.on_disk_cache.as_ref() { |
| 160 | + c.store_diagnostics_for_anon_node(dep_node_index, diagnostics) |
| 161 | + } |
| 162 | + } |
| 163 | + |
75 | 164 | /// Executes a job by changing the `ImplicitCtxt` to point to the |
76 | 165 | /// new query job while it executes. It returns the diagnostics |
77 | 166 | /// captured during execution and the actual result. |
|
0 commit comments