Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit b7a6d74

Browse files
committed
Auto merge of #107071 - Mark-Simulacrum:beta-next, r=Mark-Simulacrum
[beta] backport rollup * Revert "Make nested RPITIT inherit the parent opaque's generics." #106759 * Fix mpsc::SyncSender spinning behavior #106701 * rustdoc: fix outdated lint section of the book #106605 * Do not filter substs in remap_generic_params_to_declaration_params. #106503 * Correct detection of elided lifetimes in impl-trait. #106501 * Bump rust-installer #106196 * Don't panic on stable since miri is not available there #105901
2 parents 7a9ae0c + 8669f7e commit b7a6d74

File tree

18 files changed

+127
-71
lines changed

18 files changed

+127
-71
lines changed

‎compiler/rustc_borrowck/src/region_infer/opaque_types.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
250250
}
251251

252252
let definition_ty = instantiated_ty
253-
.remap_generic_params_to_declaration_params(opaque_type_key, self.tcx, false, origin)
253+
.remap_generic_params_to_declaration_params(opaque_type_key, self.tcx, false)
254254
.ty;
255255

256256
if !check_opaque_type_parameter_valid(

‎compiler/rustc_hir_analysis/src/collect/generics_of.rs

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ use hir::{
44
GenericParamKind, HirId, Node,
55
};
66
use rustc_hir as hir;
7+
use rustc_hir::def::DefKind;
78
use rustc_hir::def_id::DefId;
89
use rustc_middle::ty::{self, TyCtxt};
910
use rustc_session::lint;
@@ -142,7 +143,20 @@ pub(super) fn generics_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::Generics {
142143
Some(tcx.typeck_root_def_id(def_id))
143144
}
144145
Node::Item(item) => match item.kind {
145-
ItemKind::OpaqueTy(hir::OpaqueTy { .. }) => {
146+
ItemKind::OpaqueTy(hir::OpaqueTy {
147+
origin:
148+
hir::OpaqueTyOrigin::FnReturn(fn_def_id) | hir::OpaqueTyOrigin::AsyncFn(fn_def_id),
149+
in_trait,
150+
..
151+
}) => {
152+
if in_trait {
153+
assert!(matches!(tcx.def_kind(fn_def_id), DefKind::AssocFn))
154+
} else {
155+
assert!(matches!(tcx.def_kind(fn_def_id), DefKind::AssocFn | DefKind::Fn))
156+
}
157+
Some(fn_def_id.to_def_id())
158+
}
159+
ItemKind::OpaqueTy(hir::OpaqueTy { origin: hir::OpaqueTyOrigin::TyAlias, .. }) => {
146160
let parent_id = tcx.hir().get_parent_item(hir_id);
147161
assert_ne!(parent_id, hir::CRATE_OWNER_ID);
148162
debug!("generics_of: parent of opaque ty {:?} is {:?}", def_id, parent_id);

‎compiler/rustc_hir_analysis/src/collect/lifetimes.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1195,8 +1195,10 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> {
11951195
// Fresh lifetimes in APIT used to be allowed in async fns and forbidden in
11961196
// regular fns.
11971197
if let Some(hir::PredicateOrigin::ImplTrait) = where_bound_origin
1198-
&& let hir::LifetimeName::Param(_) = lifetime_ref.res
1199-
&& lifetime_ref.is_anonymous()
1198+
&& let hir::LifetimeName::Param(param_id) = lifetime_ref.res
1199+
&& let Some(generics) = self.tcx.hir().get_generics(self.tcx.local_parent(param_id))
1200+
&& let Some(param) = generics.params.iter().find(|p| p.def_id == param_id)
1201+
&& param.is_elided_lifetime()
12001202
&& let hir::IsAsync::NotAsync = self.tcx.asyncness(lifetime_ref.hir_id.owner.def_id)
12011203
&& !self.tcx.features().anonymous_lifetime_in_impl_trait
12021204
{

‎compiler/rustc_hir_typeck/src/writeback.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -565,7 +565,6 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
565565
opaque_type_key,
566566
self.fcx.infcx.tcx,
567567
true,
568-
decl.origin,
569568
);
570569

571570
self.typeck_results.concrete_opaque_types.insert(opaque_type_key.def_id, hidden_type);

‎compiler/rustc_middle/src/ty/mod.rs

Lines changed: 1 addition & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@ use crate::ty::util::Discr;
2828
pub use adt::*;
2929
pub use assoc::*;
3030
pub use generics::*;
31-
use hir::OpaqueTyOrigin;
3231
use rustc_ast as ast;
3332
use rustc_ast::node_id::NodeMap;
3433
use rustc_attr as attr;
@@ -1287,7 +1286,6 @@ impl<'tcx> OpaqueHiddenType<'tcx> {
12871286
tcx: TyCtxt<'tcx>,
12881287
// typeck errors have subpar spans for opaque types, so delay error reporting until borrowck.
12891288
ignore_errors: bool,
1290-
origin: OpaqueTyOrigin,
12911289
) -> Self {
12921290
let OpaqueTypeKey { def_id, substs } = opaque_type_key;
12931291

@@ -1303,30 +1301,7 @@ impl<'tcx> OpaqueHiddenType<'tcx> {
13031301
// This zip may have several times the same lifetime in `substs` paired with a different
13041302
// lifetime from `id_substs`. Simply `collect`ing the iterator is the correct behaviour:
13051303
// it will pick the last one, which is the one we introduced in the impl-trait desugaring.
1306-
let map = substs.iter().zip(id_substs);
1307-
1308-
let map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>> = match origin {
1309-
// HACK: The HIR lowering for async fn does not generate
1310-
// any `+ Captures<'x>` bounds for the `impl Future<...>`, so all async fns with lifetimes
1311-
// would now fail to compile. We should probably just make hir lowering fill this in properly.
1312-
OpaqueTyOrigin::AsyncFn(_) => map.collect(),
1313-
OpaqueTyOrigin::FnReturn(_) | OpaqueTyOrigin::TyAlias => {
1314-
// Opaque types may only use regions that are bound. So for
1315-
// ```rust
1316-
// type Foo<'a, 'b, 'c> = impl Trait<'a> + 'b;
1317-
// ```
1318-
// we may not use `'c` in the hidden type.
1319-
let variances = tcx.variances_of(def_id);
1320-
debug!(?variances);
1321-
1322-
map.filter(|(_, v)| {
1323-
let ty::GenericArgKind::Lifetime(lt) = v.unpack() else { return true };
1324-
let ty::ReEarlyBound(ebr) = lt.kind() else { bug!() };
1325-
variances[ebr.index as usize] == ty::Variance::Invariant
1326-
})
1327-
.collect()
1328-
}
1329-
};
1304+
let map = substs.iter().zip(id_substs).collect();
13301305
debug!("map = {:#?}", map);
13311306

13321307
// Convert the type from the function into a type valid outside

‎library/std/src/sync/mpmc/array.rs

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ impl<T> Channel<T> {
168168
return true;
169169
}
170170
Err(_) => {
171-
backoff.spin();
171+
backoff.spin_light();
172172
tail = self.tail.load(Ordering::Relaxed);
173173
}
174174
}
@@ -182,11 +182,11 @@ impl<T> Channel<T> {
182182
return false;
183183
}
184184

185-
backoff.spin();
185+
backoff.spin_light();
186186
tail = self.tail.load(Ordering::Relaxed);
187187
} else {
188188
// Snooze because we need to wait for the stamp to get updated.
189-
backoff.snooze();
189+
backoff.spin_heavy();
190190
tail = self.tail.load(Ordering::Relaxed);
191191
}
192192
}
@@ -251,7 +251,7 @@ impl<T> Channel<T> {
251251
return true;
252252
}
253253
Err(_) => {
254-
backoff.spin();
254+
backoff.spin_light();
255255
head = self.head.load(Ordering::Relaxed);
256256
}
257257
}
@@ -273,11 +273,11 @@ impl<T> Channel<T> {
273273
}
274274
}
275275

276-
backoff.spin();
276+
backoff.spin_light();
277277
head = self.head.load(Ordering::Relaxed);
278278
} else {
279279
// Snooze because we need to wait for the stamp to get updated.
280-
backoff.snooze();
280+
backoff.spin_heavy();
281281
head = self.head.load(Ordering::Relaxed);
282282
}
283283
}
@@ -330,7 +330,7 @@ impl<T> Channel<T> {
330330
if backoff.is_completed() {
331331
break;
332332
} else {
333-
backoff.spin();
333+
backoff.spin_light();
334334
}
335335
}
336336

‎library/std/src/sync/mpmc/list.rs

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ impl<T> Slot<T> {
4646
fn wait_write(&self) {
4747
let backoff = Backoff::new();
4848
while self.state.load(Ordering::Acquire) & WRITE == 0 {
49-
backoff.snooze();
49+
backoff.spin_heavy();
5050
}
5151
}
5252
}
@@ -82,7 +82,7 @@ impl<T> Block<T> {
8282
if !next.is_null() {
8383
return next;
8484
}
85-
backoff.snooze();
85+
backoff.spin_heavy();
8686
}
8787
}
8888

@@ -191,7 +191,7 @@ impl<T> Channel<T> {
191191

192192
// If we reached the end of the block, wait until the next one is installed.
193193
if offset == BLOCK_CAP {
194-
backoff.snooze();
194+
backoff.spin_heavy();
195195
tail = self.tail.index.load(Ordering::Acquire);
196196
block = self.tail.block.load(Ordering::Acquire);
197197
continue;
@@ -247,7 +247,7 @@ impl<T> Channel<T> {
247247
return true;
248248
},
249249
Err(_) => {
250-
backoff.spin();
250+
backoff.spin_light();
251251
tail = self.tail.index.load(Ordering::Acquire);
252252
block = self.tail.block.load(Ordering::Acquire);
253253
}
@@ -286,7 +286,7 @@ impl<T> Channel<T> {
286286

287287
// If we reached the end of the block, wait until the next one is installed.
288288
if offset == BLOCK_CAP {
289-
backoff.snooze();
289+
backoff.spin_heavy();
290290
head = self.head.index.load(Ordering::Acquire);
291291
block = self.head.block.load(Ordering::Acquire);
292292
continue;
@@ -320,7 +320,7 @@ impl<T> Channel<T> {
320320
// The block can be null here only if the first message is being sent into the channel.
321321
// In that case, just wait until it gets initialized.
322322
if block.is_null() {
323-
backoff.snooze();
323+
backoff.spin_heavy();
324324
head = self.head.index.load(Ordering::Acquire);
325325
block = self.head.block.load(Ordering::Acquire);
326326
continue;
@@ -351,7 +351,7 @@ impl<T> Channel<T> {
351351
return true;
352352
},
353353
Err(_) => {
354-
backoff.spin();
354+
backoff.spin_light();
355355
head = self.head.index.load(Ordering::Acquire);
356356
block = self.head.block.load(Ordering::Acquire);
357357
}
@@ -542,7 +542,7 @@ impl<T> Channel<T> {
542542
// New updates to tail will be rejected by MARK_BIT and aborted unless it's
543543
// at boundary. We need to wait for the updates take affect otherwise there
544544
// can be memory leaks.
545-
backoff.snooze();
545+
backoff.spin_heavy();
546546
tail = self.tail.index.load(Ordering::Acquire);
547547
}
548548

‎library/std/src/sync/mpmc/mod.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ mod zero;
4343
use crate::fmt;
4444
use crate::panic::{RefUnwindSafe, UnwindSafe};
4545
use crate::time::{Duration, Instant};
46-
use error::*;
46+
pubuse error::*;
4747

4848
/// Creates a channel of unbounded capacity.
4949
///

‎library/std/src/sync/mpmc/utils.rs

Lines changed: 15 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -91,9 +91,8 @@ impl<T> DerefMut for CachePadded<T> {
9191
}
9292

9393
const SPIN_LIMIT: u32 = 6;
94-
const YIELD_LIMIT: u32 = 10;
9594

96-
/// Performs exponential backoff in spin loops.
95+
/// Performs quadratic backoff in spin loops.
9796
pub struct Backoff {
9897
step: Cell<u32>,
9998
}
@@ -104,25 +103,27 @@ impl Backoff {
104103
Backoff { step: Cell::new(0) }
105104
}
106105

107-
/// Backs off in a lock-free loop.
106+
/// Backs off using lightweight spinning.
108107
///
109-
/// This method should be used when we need to retry an operation because another thread made
110-
/// progress.
108+
/// This method should be used for:
109+
/// - Retrying an operation because another thread made progress. i.e. on CAS failure.
110+
/// - Waiting for an operation to complete by spinning optimistically for a few iterations
111+
/// before falling back to parking the thread (see `Backoff::is_completed`).
111112
#[inline]
112-
pub fn spin(&self) {
113+
pub fn spin_light(&self) {
113114
let step = self.step.get().min(SPIN_LIMIT);
114115
for _ in 0..step.pow(2) {
115116
crate::hint::spin_loop();
116117
}
117118

118-
if self.step.get() <= SPIN_LIMIT {
119-
self.step.set(self.step.get() + 1);
120-
}
119+
self.step.set(self.step.get() + 1);
121120
}
122121

123-
/// Backs off in a blocking loop.
122+
/// Backs off using heavyweight spinning.
123+
///
124+
/// This method should be used in blocking loops where parking the thread is not an option.
124125
#[inline]
125-
pub fn snooze(&self) {
126+
pub fn spin_heavy(&self) {
126127
if self.step.get() <= SPIN_LIMIT {
127128
for _ in 0..self.step.get().pow(2) {
128129
crate::hint::spin_loop()
@@ -131,14 +132,12 @@ impl Backoff {
131132
crate::thread::yield_now();
132133
}
133134

134-
if self.step.get() <= YIELD_LIMIT {
135-
self.step.set(self.step.get() + 1);
136-
}
135+
self.step.set(self.step.get() + 1);
137136
}
138137

139-
/// Returns `true` if exponential backoff has completed and blocking the thread is advised.
138+
/// Returns `true` if quadratic backoff has completed and parking the thread is advised.
140139
#[inline]
141140
pub fn is_completed(&self) -> bool {
142-
self.step.get() > YIELD_LIMIT
141+
self.step.get() > SPIN_LIMIT
143142
}
144143
}

‎library/std/src/sync/mpmc/zero.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ impl<T> Packet<T> {
5757
fn wait_ready(&self) {
5858
let backoff = Backoff::new();
5959
while !self.ready.load(Ordering::Acquire) {
60-
backoff.snooze();
60+
backoff.spin_heavy();
6161
}
6262
}
6363
}

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /