diff --git a/Cargo.lock b/Cargo.lock index de14121..013f5d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -218,6 +218,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + [[package]] name = "half" version = "2.7.1" @@ -246,6 +252,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" +[[package]] +name = "indexmap" +version = "2.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +dependencies = [ + "equivalent", + "hashbrown", +] + [[package]] name = "is-terminal" version = "0.4.17" @@ -325,6 +341,7 @@ dependencies = [ "oscars_derive", "rustc-hash", "thin-vec", + "trybuild", ] [[package]] @@ -497,6 +514,15 @@ dependencies = [ "zmij", ] +[[package]] +name = "serde_spanned" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "876ac351060d4f882bb1032b6369eb0aef79ad9df1ea8bc404874d8cc3d0cd98" +dependencies = [ + "serde_core", +] + [[package]] name = "syn" version = "2.0.117" @@ -519,6 +545,21 @@ dependencies = [ "syn", ] +[[package]] +name = "target-triple" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "591ef38edfb78ca4771ee32cf494cb8771944bee237a9b91fc9c1424ac4b777b" + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + [[package]] name = "thin-vec" version = "0.2.14" @@ -535,6 +576,60 @@ dependencies = [ "serde_json", ] +[[package]] +name = "toml" +version = "1.1.0+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8195ca05e4eb728f4ba94f3e3291661320af739c4e43779cbdfae82ab239fcc" +dependencies = [ + "indexmap", + "serde_core", + "serde_spanned", + "toml_datetime", + "toml_parser", + "toml_writer", + "winnow", +] + +[[package]] +name = "toml_datetime" +version = "1.1.0+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97251a7c317e03ad83774a8752a7e81fb6067740609f75ea2b585b569a59198f" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_parser" +version = "1.1.0+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2334f11ee363607eb04df9b8fc8a13ca1715a72ba8662a26ac285c98aabb4011" +dependencies = [ + "winnow", +] + +[[package]] +name = "toml_writer" +version = "1.1.0+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d282ade6016312faf3e41e57ebbba0c073e4056dab1232ab1cb624199648f8ed" + +[[package]] +name = "trybuild" +version = "1.0.116" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47c635f0191bd3a2941013e5062667100969f8c4e9cd787c14f977265d73616e" +dependencies = [ + "glob", + "serde", + "serde_derive", + "serde_json", + "target-triple", + "termcolor", + "toml", +] + [[package]] name = "unicode-ident" version = "1.0.22" @@ -630,6 +725,12 @@ dependencies = [ "windows-link", ] +[[package]] +name = "winnow" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8" + [[package]] name = "zerocopy" version = "0.8.40" diff --git a/notes/api-redesign-prototype/api_redesign_proposal.md b/notes/api-redesign-prototype/api_redesign_proposal.md new file mode 100644 index 0000000..d70c852 --- /dev/null +++ b/notes/api-redesign-prototype/api_redesign_proposal.md @@ -0,0 +1,169 @@ +# GC API Redesign Proposal + +**Status**: RFC + +## Problem + +Current `boa_gc` uses implicit rooting via `Clone`/`Drop` on `Gc`. Every clone touches root counts, adding overhead in hot VM paths. It also needs `thread_local`, blocking `no_std`. + +This proposes lifetime-branded `Gc<'gc, T>` for zero cost pointers and explicit `Root` for persistence. + +## Core API + +### Gc Pointer + +```rust +pub struct Gc<'gc, T: Trace + ?Sized + 'gc> { + ptr: NonNull>, + _marker: PhantomData<(&'gc T, *const ())>, +} + +impl<'gc, T: Trace + ?Sized + 'gc> Copy for Gc<'gc, T> {} +``` + +### Mutability via GcRefCell +```rust +pub struct GcRefCell { + inner: RefCell, +} +``` +`GcRefCell` safely traces internal values statically behind a dynamically borrowed `RefCell`, providing `GcRef` and `GcRefMut` access similar to native `Rc/RefCell` combinations. Allows internal JavaScript arrays and objects to be mutated during the GC trace safely. + +### Weak Reference Separation +```rust +pub struct WeakGc { + ptr: NonNull>, +} + +impl WeakGc { + pub fn upgrade<'gc>(&self, cx: &MutationContext<'gc>) -> Option> { ... } +} +``` +Weak references drop their tie to the single `'gc` lifetime. Instead, they are upgraded back into strong `Gc` pointers only when explicitly bound against an active safe `MutationContext<'gc>`. + +The `'gc` lifetime ties the pointer to its collector. Copying is free, no root count manipulation. + +### Root for Persistence + +```rust +pub struct Root { + link: RootLink, // Intrusive list node (prev/next only), at offset 0 so bare link* == Root* + gc_ptr: NonNull>, // T: Sized keeps this thin for type erased offset_of! + /// Cross collector misuse detection only, plays no role in unlinking. + collector_id: u64, + _marker: PhantomData<*const ()>, +} + +impl Root { + pub fn get<'gc>(&self, cx: &MutationContext<'gc>) -> Gc<'gc, T> { + assert_eq!(self.collector_id, cx.collector.id); + // ... + } +} + +impl Drop for Root { + fn drop(&mut self) { + // O(1) self unlink: splice prev/next together, no list reference needed + if self.link.is_linked() { + unsafe { + RootLink::unlink(NonNull::from(&self.link)); + } + } + } +} +``` + +`Root` escapes the `'gc` lifetime. Returned as `Pin>>` for stable addresses (required by the intrusive list). Stores `collector_id` to catch cross-collector misuse at runtime — it is **not** used during unlink; `Drop` only touches the embedded `prev`/`next` pointers. + +**No `Rc` required.** A root only needs its own embedded `prev`/`next` pointers to remove itself from the list. The `Collector` owns a **sentinel** node; insertion and removal are pure pointer surgery with no allocation and no reference counting. + +### MutationContext + +```rust +pub struct MutationContext<'gc> { + collector: &'gc Collector, +} + +impl<'gc> MutationContext<'gc> { + pub fn alloc(&self, value: T) -> Gc<'gc, T> { ... } + pub fn root(&self, gc: Gc<'gc, T>) -> Pin>> { ... } + pub fn collect(&self) { ... } +} +``` + +Uses `&self` with `RefCell` inside for multiple concurrent allocations. + +### Sentinel Node & Root Traversal + +The `Collector` owns one **pinned sentinel** `RootLink` (a bare link node with no payload): + +```text +Collector::sentinel -> root_a.link -> root_b.link -> root_c.link -> None +``` + +Roots insert themselves immediately after the sentinel via `RootLink::link_after`. During collection, `RootLink::iter_from_sentinel(sentinel)` starts from `sentinel.next`, so the sentinel itself is never yielded. For each link, `gc_ptr` is recovered via `offset_of!(Root, gc_ptr)` and used to mark the allocation. + +### Entry Point + +```rust +pub struct GcContext { + collector: Collector, +} + +impl GcContext { + pub fn new() -> Self { ... } + pub fn mutate(&self, f: impl for<'gc> FnOnce(&MutationContext<'gc>) -> R) -> R { ... } +} +``` + +By owning the `Collector`, `GcContext` defines the entire host timeline. The `for<'gc>` pattern from gc-arena creates a unique lifetime isolating active context mutations per arena. + +### Tracing Mechanism +```rust +pub trait Trace { + fn trace(&mut self, tracer: &mut Tracer); +} + +pub trait Finalize { + fn finalize(&self) {} +} +``` +Note: `trace` takes `&mut self` instead of `&self`, ensuring that potential moving collectors have exclusive layout rights during traces. + +## vs Current Oscars + +| | Current | Proposed | +|---|---------|----------| +| **Pointer** | `Gc` | `Gc<'gc, T>` | +| **Lifetime** | `'static` + `extend_lifetime()` | `'gc` branded | +| **Rooting** | Implicit (inc/dec on clone/drop) | Explicit (`Root`) | +| **Copy cost** | Cell write | Zero | +| **Drop cost** | TLS access (futex lock) | Zero (Copy type) | +| **Isolation** | Runtime only | Compile-time + runtime validation | + +## Why This Works + +**no_std Compatible**: No `thread_local` needed. + +**Performance**: `Gc` copying is just memcpy, no root count overhead. + +**Allocation**: Uses `mempool3::PoolAllocator` with size-class pooling instead of individual `Box` allocations, avoiding fragmentation. + +**Safety**: +- Cross-context caught at compile time for `Gc` +- Cross-collector caught at runtime for `Root` +- Explicit `!Send`/`!Sync` prevents threading bugs +- Intrusive sentinel based linked list for O(1) insertion and self-unlink +- `Root` holds **no `Rc`**, unlink is pure pointer surgery on embedded `prev`/`next` +- `Pin>>` guarantees stable node addresses while linked + +## Open Questions + +- FFI boundaries (native functions receiving `Gc` pointers) +- Migration path (thousands of `Gc` uses in Boa) +- Real benchmark numbers + +## References + +- gc-arena: https://github.com/kyren/gc-arena +- boa#2631: https://github.com/boa-dev/boa/issues/2631 diff --git a/notes/api-redesign-prototype/prototype_findings.md b/notes/api-redesign-prototype/prototype_findings.md new file mode 100644 index 0000000..93b6c1a --- /dev/null +++ b/notes/api-redesign-prototype/prototype_findings.md @@ -0,0 +1,211 @@ +# Prototype Findings + +Prototyping lifetime-branded GC API for Boa. Testing if `Gc<'gc, T>` + `Root` is viable. + +Works, but migration will be challenging. + +## Current Oscars Model + +```rust +// Clone/Drop touch root_count +impl Clone for Gc { + fn clone(&self) -> Self { + self.inner_ptr().as_inner_ref().inc_roots(); + // ... + } +} +``` + +Every clone/drop modifies root count. Adds up in hot loops. + +## Proposed Alternative + +```rust +impl<'gc, T: Trace> Copy for Gc<'gc, T> {} +``` + +Zero cost. Lifetime proves validity. + +## Design Decisions + +### Lifetime Branding + +**Runtime check**: `assert_eq!(self.context_id, CURRENT.get())` - cost on every access + +**Lifetime**: `Gc<'gc, T>` - compiler enforces, zero runtime cost + +### Interior Mutability + +`&mut dyn GarbageCollector` breaks: + +```rust +let a = cx.allocate(1); // cx borrowed mutably +let b = cx.allocate(2); // ERROR: still borrowed +``` + +Fix: `RefCell` inside collector, take `&self`. + +### Explicit Rooting + +`'gc` lifetime must end. Long-lived refs need escape hatch: + +```rust +struct JsContext { + global_object: Root, // escapes 'gc +} +``` + +Root re-enters via `root.get(&cx)`. + +### Collector ID Validation + +Problem: `Root` from collector A used with context B → dangling pointer. + +Solution: Each collector gets unique ID, `Root` validates: + +```rust +impl Root { + pub fn get<'gc>(&self, cx: &MutationContext<'gc>) -> Gc<'gc, T> { + assert_eq!(self.collector_id, cx.collector.id); + // ... + } +} +``` + +Catches cross-collector misuse where lifetimes can't help. + +### Gc Access Safety + +**Q**: How do we prevent `Gc::get()` from accessing dead allocations? + +Lifetime branding: `Gc<'gc, T>` can only exist within a `mutate()` closure and collection happens in the same scope via `cx.collect()`. The `'gc` lifetime ensures we can't hold a `Gc` pointer across a collection point. The compiler statically guarantees that all live `Gc<'gc, T>` values are on the stack during the `'gc` lifetime, so no runtime checks are needed in `Gc::get()` + +```rust +ctx.mutate(|cx| { + let obj = cx.alloc(JsObject { ... }); // Gc<'gc, JsObject> + cx.collect(); + obj.get() // Safe! 'gc lifetime proves it survived collection +}); +// obj is gone here - 'gc lifetime ended +``` + +See compile-fail tests in `examples/api_prototype/tests/ui/` for examples of what the compiler prevents (escaping mutate(), cross context usage). + +### Root Cleanup - `intrusive_collections` Design + +Problem: Root registered but never removed -> memory leak. Collector dropped before root -> UAF if roots were a raw pointer. + +Taking the `intrusive_collections` crate as inspiration, here is what we adopted and why: + +#### What we adopted + +1. **Pure Link Type (`RootLink`)**: Contains only `prev` and `next` pointers. No payload. +2. **O(1) Self Removal**: `unlink` drops nodes safely without a reference to the `Collector`. +3. **Double Unlink Protection**: `is_linked()` enforces safe dropping. +4. **Sentinel Node**: `Collector` owns a pinned `RootLink` as the list head. +5. **Type Erased Marking**: `Root` is `#[repr(C)]` with `link` at offset 0. The GC walks the links and recovers pointers using `offset_of!`. No `Trace` bound is needed. + +#### Evolution of approaches + +| Approach | Problem | +|---|---| +| `Vec` + `retain` | O(n^2) worst case to drop n roots | +| `Rc>` | Extra allocation and `Rc` clone per root | +| Impure link with `gc_ptr` inside | Mixes list logic with payload data | +| **Current: Pure `RootLink`** | O(1) operations, zero `Rc`, clean separation | + + +### Allocation Strategy + +Prototype now uses `mempool3::PoolAllocator`: + +- Size-class pooling with slot reuse +- O(1) allocation with cached slot pools +- O(log n) deallocation via sorted range index +- Arena recycling reduces OS allocation pressure +- Uses `try_alloc_bytes` for layout based allocation to support `'gc` lifetimes in user types + + +### !Send/!Sync + +Single threaded GC. Explicit bounds prevent cross thread bugs. + +## Validated + +**Compile-time isolation**: Borrow checker prevents mixing `Gc` from different contexts. + +**Runtime cross-collector detection**: `Root::get()` panics on wrong collector. + +**Root cleanup**: Drop removes from root list. + +**Interior Mutability Tracing**: Using `GcRefCell` allows `RefCell` semantics to persist efficiently while fulfilling `Trace` safety requirements without borrowing errors. + +**Scopeless Weak Binding**: `WeakGc` survives successfully unbranded and can trace/upgrade against an arbitrary temporal `MutationContext` when actively touched again. + +**Functional Builtin Prototyping**: Explicit tests matching exactly against definitions like `Array.prototype.push` (taking a `&Gc<'gc, GcRefCell>>` + `arg` buffer bound to `_cx: &MutationContext<'gc>`) compiled gracefully and safely. + +### Performance + +| Operation | Current | Proposed | +|-----------|---------|----------| +| `Gc::clone()` | Cell write | memcpy | +| `Gc::drop()` | Cell write | nothing | +| Root creation | N/A | O(1) | +| Root drop | N/A | O(1) | + +## Challenges + +**Collection timing**: When can GC run safely? Safe because all `Gc<'gc, T>` are on stack. Lifetime ensures no use after collection. + +**FFI**: Native functions receive values but lifetimes don't cross FFI. Need handle scopes or root at boundary. + +**Migration**: Boa has thousands of `Gc` uses. Need to add `'gc` everywhere. Phasing gradually starting with isolated systems can be done + +### `Pin<&mut Root>` for Escaping Roots + +Raised during review: could we use `Pin<&mut Root>` instead of `Pin>>` to avoid a heap allocation per root? + +**No, not for escaping roots.** Stack allocation fails because: + +1. `Root` is created inside `mutate()`. +2. Escaping roots must outlive `mutate()`. +3. `Pin<&mut>` requires a stable address. + +We cannot move a `&mut` out of its closure frame without changing its address and violating `Pin` + +`Pin>` fixes this: the pointer moves out, but the heap allocation stays fixed. Cost belongs to one `Box` per root. + +#### Workaround: `root_in_place` + +Zero allocation is possible if the caller pre-allocates the `Root` slot on the outer stack: + +```rust +let mut slot = std::mem::MaybeUninit::>::uninit(); + +ctx.mutate(|cx| { + let obj = cx.alloc(JsObject { name: "global".into(), value: 0 }); + let root = cx.root_in_place(&mut slot, obj); +}); + +let root = unsafe { slot.assume_init_ref() }; +``` + +`root_in_place` writes into the slot, pins it, links it and returns `Pin<&mut Root>`. This matches V8's `HandleScope`: no allocation, O(1) creation. + +**Reasons to skip this for now:** +1. Caller must know `T` upfront to size the `MaybeUninit` slot. +2. Requires `unsafe` to read the slot later. +3. `Pin>` is simpler and safer for validating the core API right now. + +*We can prototype this later if needed.* + + +## Conclusion + +`Gc<'gc, T>` + `Root` is: +- **Sound**: Compile-time catches misuse +- **Runtime-safe**: Collector ID validation catches Root misuse +- **Fast**: Zero cost transient pointers +- **Feasible**: Can coexist with current API + +Main risk is migration effort, we can go with the phased approach diff --git a/oscars/Cargo.toml b/oscars/Cargo.toml index 1f5ff97..afe9ddf 100644 --- a/oscars/Cargo.toml +++ b/oscars/Cargo.toml @@ -11,6 +11,7 @@ thin-vec = { version = "0.2", optional = true } [dev-dependencies] criterion = { version = "0.5", features = ["html_reports"] } +trybuild = "1.0" boa_gc = { git = "https://github.com/boa-dev/boa", branch = "main" } diff --git a/oscars/examples/api_prototype/cell.rs b/oscars/examples/api_prototype/cell.rs new file mode 100644 index 0000000..a7f37d4 --- /dev/null +++ b/oscars/examples/api_prototype/cell.rs @@ -0,0 +1,50 @@ +use crate::trace::{Finalize, Trace, Tracer}; +use core::cell::{Ref, RefCell, RefMut}; + +pub struct GcRefCell { + inner: RefCell, +} + +impl GcRefCell { + pub fn new(value: T) -> Self { + Self { + inner: RefCell::new(value), + } + } + pub fn borrow(&self) -> GcRef<'_, T> { + GcRef(self.inner.borrow()) + } + pub fn borrow_mut(&self) -> GcRefMut<'_, T> { + GcRefMut(self.inner.borrow_mut()) + } +} + +pub struct GcRef<'a, T: Trace>(Ref<'a, T>); +impl<'a, T: Trace> core::ops::Deref for GcRef<'a, T> { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +pub struct GcRefMut<'a, T: Trace>(RefMut<'a, T>); +impl<'a, T: Trace> core::ops::Deref for GcRefMut<'a, T> { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.0 + } +} +impl<'a, T: Trace> core::ops::DerefMut for GcRefMut<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl Trace for GcRefCell { + fn trace(&mut self, tracer: &mut Tracer) { + if let Ok(mut inner) = self.inner.try_borrow_mut() { + inner.trace(tracer); + } + } +} +impl Finalize for GcRefCell {} diff --git a/oscars/examples/api_prototype/gc.rs b/oscars/examples/api_prototype/gc.rs new file mode 100644 index 0000000..bc64cd1 --- /dev/null +++ b/oscars/examples/api_prototype/gc.rs @@ -0,0 +1,307 @@ +use crate::root_list::RootLink; +use crate::trace::{Finalize, Trace}; +use crate::weak::WeakGc; +use core::alloc::Layout; +use core::cell::{Cell, RefCell}; +use core::marker::PhantomData; +use core::mem::offset_of; +use core::pin::Pin; +use core::ptr::NonNull; +use oscars::alloc::mempool3::PoolAllocator; +use std::sync::atomic::{AtomicU64, Ordering}; + +static NEXT_COLLECTOR_ID: AtomicU64 = AtomicU64::new(1); + +pub(crate) struct GcBox { + pub(crate) marked: Cell, + pub(crate) value: T, +} + +#[derive(Debug)] +pub struct Gc<'gc, T: Trace + ?Sized + 'gc> { + pub(crate) ptr: NonNull>, + pub(crate) _marker: PhantomData<(&'gc T, *const ())>, +} + +impl<'gc, T: Trace + ?Sized + 'gc> Copy for Gc<'gc, T> {} +impl<'gc, T: Trace + ?Sized + 'gc> Clone for Gc<'gc, T> { + fn clone(&self) -> Self { + *self + } +} + +impl<'gc, T: Trace + 'gc> Gc<'gc, T> { + pub fn get(&self) -> &T { + unsafe { &(*self.ptr.as_ptr()).value } + } +} + +/// Pinned root handle that keeps a GC allocation live across `mutate()` boundaries. +/// +/// Uses an intrusive linked list. `#[repr(C)]` with `link` first allows +/// casting `*mut RootLink` directly to `*mut Root` without pointer math. +/// +/// `Pin>>` keeps the list link stable in memory. +/// `T: Sized` ensures `gc_ptr` is a single word thin pointer, making +/// type-erased `*mut u8` collector reads sound +#[must_use = "roots must be kept alive to prevent collection"] +#[repr(C)] +pub struct Root { + /// Intrusive list node. Placed at offset 0 for direct base pointer casting. + pub(crate) link: RootLink, + /// GC allocation pointer. `T: Sized` ensures this is a thin pointer + pub(crate) gc_ptr: NonNull>, + /// ID of the `Collector` that owns this root (for misuse detection). + pub(crate) collector_id: u64, + pub(crate) _marker: PhantomData<*const ()>, +} + +impl Root { + pub fn get<'gc>(&self, cx: &MutationContext<'gc>) -> Gc<'gc, T> { + assert_eq!( + self.collector_id, cx.collector.id, + "root from different collector" + ); + Gc { + ptr: self.gc_ptr, + _marker: PhantomData, + } + } + + pub fn belongs_to(&self, cx: &MutationContext<'_>) -> bool { + self.collector_id == cx.collector.id + } +} + +impl Drop for Root { + fn drop(&mut self) { + // SAFETY: Node address is stable, neighbors outlive this node. + // Unlinking touches only the embedded prev/next pointers. + if self.link.is_linked() { + unsafe { + RootLink::unlink(NonNull::from(&self.link)); + } + } + } +} + +// `link` is at offset 0, so its pointer represents the entire `Root`. +// To find `gc_ptr`, we just add its expected offset. `T: Sized` ensures +// `gc_ptr` is a thin pointer, making the raw `*mut u8` read safe. + +/// Gets gc_ptr from a RootLink pointer without knowing the generic type `T` +/// +/// # Safety +/// * `link` must point to a real Root for some `T: Trace + Sized` +/// * `gc_ptr_offset` must be the exact distance to `gc_ptr` for that `T` +#[inline(always)] +unsafe fn gc_ptr_from_link(link: NonNull, gc_ptr_offset: usize) -> *mut u8 { + // Both point to the exact same memory address + let gc_ptr_field = unsafe { (link.as_ptr() as *mut u8).add(gc_ptr_offset) }; + unsafe { *(gc_ptr_field as *const *mut u8) } +} + +struct PoolEntry { + ptr: NonNull, + drop_fn: unsafe fn(&mut PoolAllocator<'static>, NonNull), +} + +/// Owns the sentinel node that heads the intrusive root list. +/// +/// ```text +/// sentinel -> root_a.link -> root_b.link -> None +/// ``` +/// +/// Roots insert after the sentinel on creation and self-unlink on drop (both O(1)). +/// Marking walks the chain and reads `gc_ptr` via `offset_of!`. +pub struct Collector { + pub(crate) id: u64, + pool: RefCell>, + pool_entries: RefCell>, + /// Pinned sentinel: pure `RootLink`, no payload. Head of the root chain. + pub(crate) sentinel: Pin>, + allocation_count: Cell, +} + +impl Collector { + pub fn new() -> Self { + Self { + id: NEXT_COLLECTOR_ID.fetch_add(1, Ordering::Relaxed), + pool: RefCell::new(PoolAllocator::default()), + pool_entries: RefCell::new(Vec::new()), + sentinel: Box::pin(RootLink::new()), + allocation_count: Cell::new(0), + } + } + + pub(crate) fn alloc<'gc, T: Trace + Finalize + 'gc>(&'gc self, value: T) -> Gc<'gc, T> { + let gcbox = GcBox { + marked: Cell::new(false), + value, + }; + + let layout = Layout::new::>(); + let slot = self + .pool + .borrow_mut() + .try_alloc_bytes(layout) + .expect("pool allocation failed"); + + // SAFETY: slot has correct layout and alignment for GcBox + unsafe { + let ptr = slot.cast::>(); + ptr.as_ptr().write(gcbox); + + unsafe fn drop_and_free( + pool: &mut PoolAllocator<'static>, + ptr: NonNull, + ) { + unsafe { + core::ptr::drop_in_place(ptr.cast::>().as_ptr()); + pool.dealloc_bytes(ptr); + } + } + + self.pool_entries.borrow_mut().push(PoolEntry { + ptr: ptr.cast::(), + drop_fn: drop_and_free::, + }); + + self.allocation_count.set(self.allocation_count.get() + 1); + Gc { + ptr, + _marker: PhantomData, + } + } + } + + pub(crate) fn collect(&self) { + // SAFETY: sentinel is pinned and valid for the lifetime of Self. + let sentinel_ptr = unsafe { + NonNull::new_unchecked( + self.sentinel.as_ref().get_ref() as *const RootLink as *mut RootLink + ) + }; + + // `link` is at offset 0, so a pointer to `link` is a pointer to the start of the struct. + // `gc_ptr` comes right after it. Because `T` is `Sized`, the distance to `gc_ptr` + // is exactly the same no matter what generic type `T` is. We use `Root` as a + // dummy type just to calculate this fixed offset. + let gc_ptr_offset = offset_of!(Root, gc_ptr); + + let root_count = RootLink::iter_from_sentinel(sentinel_ptr).count(); + println!( + "Collecting garbage: {} objects, {} roots", + self.allocation_count.get(), + root_count, + ); + + for link_ptr in RootLink::iter_from_sentinel(sentinel_ptr) { + // SAFETY: + // * link_ptr points to root.link inside a live Root. + // * link is at offset 0 (repr(C)), so link_ptr == root_ptr. + // * gc_ptr is thin (T: Sized); the *mut u8 read is sound. + // * GcBox.marked is at offset 0, so the cast to GcBox<()> is safe. + unsafe { + let raw_gc_ptr = gc_ptr_from_link(link_ptr, gc_ptr_offset); + let gcbox = raw_gc_ptr as *mut GcBox<()>; + (*gcbox).marked.set(true); + } + } + } +} + +impl Default for Collector { + fn default() -> Self { + Self::new() + } +} + +impl Drop for Collector { + fn drop(&mut self) { + let mut pool = self.pool.borrow_mut(); + for entry in self.pool_entries.borrow().iter() { + unsafe { + (entry.drop_fn)(&mut pool, entry.ptr); + } + } + } +} + +pub struct GcContext { + collector: Collector, +} + +impl GcContext { + pub fn new() -> Self { + Self { + collector: Collector::new(), + } + } + pub fn mutate(&self, f: impl for<'gc> FnOnce(&MutationContext<'gc>) -> R) -> R { + let cx = MutationContext { + collector: &self.collector, + _marker: PhantomData, + }; + f(&cx) + } +} + +impl Default for GcContext { + fn default() -> Self { + Self::new() + } +} + +pub struct MutationContext<'gc> { + pub(crate) collector: &'gc Collector, + pub(crate) _marker: PhantomData<*const ()>, +} + +impl<'gc> MutationContext<'gc> { + pub fn alloc(&self, value: T) -> Gc<'gc, T> { + self.collector.alloc(value) + } + + pub fn alloc_weak(&self, value: T) -> WeakGc { + let gc = self.alloc(value); + WeakGc { ptr: gc.ptr } + } + + /// Roots a `Gc<'gc, T>` and returns a `Pin>>`. + /// + /// `Pin` is required to keep the link address stable while in the list. + /// Inserts after the sentinel (O(1)), self-unlinks on drop (O(1)). + pub fn root(&self, gc: Gc<'gc, T>) -> Pin>> { + let gc_ptr = gc.ptr; + + let root = Box::pin(Root { + link: RootLink::new(), + gc_ptr, + collector_id: self.collector.id, + _marker: PhantomData, + }); + + // SAFETY: + // * root is pinned: address is stable for its lifetime. + // * sentinel is pinned in Collector: outlives all roots. + // * Insertion only touches sentinel.next and root.link.prev/next. + unsafe { + let sentinel_ptr = NonNull::new_unchecked(self.collector.sentinel.as_ref().get_ref() + as *const RootLink + as *mut RootLink); + let link_ptr = NonNull::from(&root.link); + RootLink::link_after(sentinel_ptr, link_ptr); + } + + root + } + + pub fn collector_id(&self) -> u64 { + self.collector.id + } + + pub fn collect(&self) { + self.collector.collect(); + } +} diff --git a/oscars/examples/api_prototype/main.rs b/oscars/examples/api_prototype/main.rs new file mode 100644 index 0000000..4a21341 --- /dev/null +++ b/oscars/examples/api_prototype/main.rs @@ -0,0 +1,327 @@ +//! GC API prototype based on gc-arena's lifetime pattern +//! +//! key change: `Gc<'gc, T>` is Copy (zero overhead) vs current `Gc` (inc/dec on clone/drop) +//! +//! Run: `cargo run --example api_prototype` + +#![allow(dead_code)] + +mod cell; +mod gc; +mod root_list; +mod trace; +mod weak; + +use cell::GcRefCell; +use gc::Gc; +use gc::{GcContext, MutationContext}; +use trace::{Finalize, Trace, Tracer}; + +struct JsObject { + name: String, + value: i32, +} + +impl Trace for JsObject { + fn trace(&mut self, tracer: &mut Tracer) { + self.name.trace(tracer); + self.value.trace(tracer); + } +} +impl Finalize for JsObject {} + +struct JsArray<'gc> { + elements: Vec>, +} + +impl<'gc> Trace for JsArray<'gc> { + fn trace(&mut self, tracer: &mut Tracer) { + for elem in &mut self.elements { + tracer.mark(elem); + } + } +} +impl<'gc> Finalize for JsArray<'gc> {} + +/// Replica of Boa Builtin Function: Array.prototype.push +/// This fully proves that standalone builtin functions can accept the `'gc` +/// context bounded pointers without lifetime errors or borrow checking issues +fn array_push<'gc>( + this: &Gc<'gc, GcRefCell>>, + args: &[Gc<'gc, JsObject>], + _cx: &MutationContext<'gc>, +) -> usize { + let mut array = this.get().borrow_mut(); + + for arg in args { + array.elements.push(*arg); + } + + array.elements.len() +} + +fn main() { + println!("GC API Prototype Example (Redesign Additions)\n"); + + let ctx = GcContext::new(); + + // example 1: boa array migration + println!("1. Boa Array Migration Example:\n"); + ctx.mutate(|cx| { + let val1 = cx.alloc(JsObject { + name: "item1".to_string(), + value: 42, + }); + let val2 = cx.alloc(JsObject { + name: "item2".to_string(), + value: 43, + }); + + let array = cx.alloc(GcRefCell::new(JsArray { + elements: Vec::new(), + })); + + println!(" Calling array_push built-in replica:"); + let new_len = array_push(&array, &[val1, val2], cx); + + println!(" Returned length: {}", new_len); + println!( + " First element value: {}\n", + array.get().borrow().elements[0].get().value + ); + }); + + // example 2: weak refs + println!("2. WeakGc upgrade example:\n"); + ctx.mutate(|cx| { + let target = cx.alloc(JsObject { + name: "target".to_string(), + value: 5, + }); + let _root = cx.root(target); // force it alive + let weak = cx.alloc_weak(JsObject { + name: "weak_data".to_string(), + value: 10, + }); + + cx.collect(); + + match weak.upgrade(cx) { + Some(gc) => println!(" Weak object is accessible: {}", gc.get().value), + None => println!(" Weak object was swept"), + } + println!(); + }); + + println!("Done!"); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn context_mutate() { + let ctx = GcContext::new(); + ctx.mutate(|cx| { + let a = cx.alloc(42i32); + assert_eq!(*a.get(), 42); + }); + } + + #[test] + fn root_works_in_context() { + let ctx = GcContext::new(); + ctx.mutate(|cx| { + let obj = cx.alloc(123i32); + let root = cx.root(obj); + let gc = root.get(cx); + assert_eq!(*gc.get(), 123); + }); + } + + #[test] + fn root_rejects_different_collector() { + let ctx1 = GcContext::new(); + let ctx2 = GcContext::new(); + + let root = ctx1.mutate(|cx| { + let obj = cx.alloc(123i32); + cx.root(obj) + }); + + let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + ctx2.mutate(|cx| { + let _gc = root.get(cx); + }); + })); + assert!(result.is_err()); + } + + #[test] + fn refcell_trace() { + let ctx = GcContext::new(); + ctx.mutate(|cx| { + let cell = cx.alloc(GcRefCell::new(100i32)); + *cell.get().borrow_mut() = 200; + assert_eq!(*cell.get().borrow(), 200); + }); + } + + #[test] + fn root_outlives_context() { + // Ensures escaping roots do not trigger UAF after collector drops + let escaped_root = { + let ctx = GcContext::new(); + ctx.mutate(|cx| cx.root(cx.alloc(555i32))) + }; + drop(escaped_root); + } + + #[test] + fn weak_upgrade() { + let ctx = GcContext::new(); + ctx.mutate(|cx| { + let obj = cx.alloc(JsObject { + name: "test".into(), + value: 42, + }); + let weak = cx.alloc_weak(JsObject { + name: "weak".into(), + value: 10, + }); + + // Sweep unrooted weak pointers. + cx.collect(); + assert!(weak.upgrade(cx).is_none()); + + // Rooted objects remain alive + let root = cx.root(obj); + cx.collect(); + let _ = root.get(cx); + }); + } + + #[test] + fn bulk_allocation_cleanup() { + let ctx = GcContext::new(); + ctx.mutate(|cx| { + for i in 0..100 { + cx.alloc(JsObject { + name: "bulk".into(), + value: i, + }); + } + }); + // Deallocates out of scope without leaking + } + + #[test] + #[cfg_attr(not(debug_assertions), ignore)] + fn compile_fail_tests() { + let t = trybuild::TestCases::new(); + t.compile_fail("examples/api_prototype/tests/ui/*.rs"); + } + + // Tests verifying the lifetime-bounded `'gc` invariant prevents UAF + #[test] + fn unrooted_allocs_are_collected() { + let ctx = GcContext::new(); + ctx.mutate(|cx| { + let weak = cx.alloc_weak(JsObject { + name: "ephemeral".into(), + value: 999, + }); + + cx.collect(); + assert!(weak.upgrade(cx).is_none()); + }); + } + + #[test] + fn pinned_root_keeps_gc_alive() { + // Ensures Pin>> keeps allocations alive. + let ctx = GcContext::new(); + ctx.mutate(|cx| { + let obj = cx.alloc(JsObject { + name: "pinned".into(), + value: 42, + }); + + let pinned_root = cx.root(obj); + cx.collect(); + + let gc = pinned_root.get(cx); + assert_eq!(gc.get().value, 42); + assert_eq!(gc.get().name, "pinned"); + }); + } + + #[test] + fn multiple_roots_are_independent() { + let ctx = GcContext::new(); + ctx.mutate(|cx| { + let obj1 = cx.alloc(100i32); + let obj2 = cx.alloc(200i32); + + let root1 = cx.root(obj1); + let root2 = cx.root(obj2); + + cx.collect(); + + assert_eq!(*root1.get(cx).get(), 100); + assert_eq!(*root2.get(cx).get(), 200); + + drop(root1); + cx.collect(); + + assert_eq!(*root2.get(cx).get(), 200); + }); + } + + #[test] + fn root_get_requires_mut_ctx() { + // Ensures Root::get() requires a valid MutationContext<'gc>. + let ctx = GcContext::new(); + + let root = ctx.mutate(|cx| { + let obj = cx.alloc(JsObject { + name: "escaped".into(), + value: 123, + }); + cx.root(obj) + }); + + ctx.mutate(|cx| { + let gc = root.get(cx); + assert_eq!(gc.get().value, 123); + }); + } + + #[test] + fn gc_lifetime_tied_to_mut_ctx() { + // Ensures Gc<'gc, T> cannot outlive the mutation phase. + let ctx = GcContext::new(); + ctx.mutate(|cx| { + let gc = cx.alloc(42i32); + assert_eq!(*gc.get(), 42); + }); + } + + #[test] + fn seq_mutations_independent() { + let ctx = GcContext::new(); + + let root = ctx.mutate(|cx| { + let obj = cx.alloc(1i32); + cx.root(obj) + }); + + ctx.mutate(|cx| { + let new_obj = cx.alloc(2i32); + + assert_eq!(*root.get(cx).get(), 1); + assert_eq!(*new_obj.get(), 2); + }); + } +} diff --git a/oscars/examples/api_prototype/root_list.rs b/oscars/examples/api_prototype/root_list.rs new file mode 100644 index 0000000..16c900a --- /dev/null +++ b/oscars/examples/api_prototype/root_list.rs @@ -0,0 +1,101 @@ +use core::cell::Cell; +use core::ptr::NonNull; + +/// Intrusive link node, analogous to `intrusive_collections::LinkedListLink +/// +/// Contains only `prev`/`next` pointers. Embed inside a struct (e.g. `Root`) +/// and recover the container via `offset_of!` +/// +/// Must not move while linked. Callers enforce this with `Pin` +pub(crate) struct RootLink { + prev: Cell>>, + next: Cell>>, +} + +impl RootLink { + /// Creates a new unlinked node. `const fn` so it can be used in pinned sentinels. + pub(crate) const fn new() -> Self { + Self { + prev: Cell::new(None), + next: Cell::new(None), + } + } + + /// Returns `true` if this node is currently in a list. + /// Uses `prev.is_some()` as the indicator; `unlink` clears it + #[inline] + pub(crate) fn is_linked(&self) -> bool { + self.prev.get().is_some() + } + + /// Inserts `node` immediately after `anchor` + /// + /// # Safety + /// Both `anchor` and `node` must be pinned until unlinked. + pub(crate) unsafe fn link_after(anchor: NonNull, node: NonNull) { + unsafe { + let anchor_ref = anchor.as_ref(); + let node_ref = node.as_ref(); + let old_next = anchor_ref.next.get(); + + node_ref.prev.set(Some(anchor)); + node_ref.next.set(old_next); + anchor_ref.next.set(Some(node)); + + if let Some(next) = old_next { + next.as_ref().prev.set(Some(node)); + } + } + } + + /// Removes the node from the list in O(1). Sets `is_linked()` to false. + /// + /// # Safety + /// `node` must currently be linked. + pub(crate) unsafe fn unlink(node: NonNull) { + unsafe { + let node_ref = node.as_ref(); + let prev = node_ref.prev.get(); + let next = node_ref.next.get(); + + // Re-wire neighbours around this node. + if let Some(p) = prev { + p.as_ref().next.set(next); + } + if let Some(n) = next { + n.as_ref().prev.set(prev); + } + + // Clear to make is_linked() == false and catch double-unlink bugs. + node_ref.prev.set(None); + node_ref.next.set(None); + } + } + + /// Iterates all nodes after the sentinel. Skips the sentinel itself. + /// Caller uses `offset_of!` to get the `Root` from each yielded link. + pub(crate) fn iter_from_sentinel( + sentinel: NonNull, + ) -> impl Iterator> { + struct Iter { + current: Option>, + } + + impl Iterator for Iter { + type Item = NonNull; + + fn next(&mut self) -> Option { + let node = self.current?; + // SAFETY: nodes are pinned and valid during iteration. + unsafe { + self.current = node.as_ref().next.get(); + } + Some(node) + } + } + + // SAFETY: sentinel is pinned and owned by Collector. + let first = unsafe { sentinel.as_ref().next.get() }; + Iter { current: first } + } +} diff --git a/oscars/examples/api_prototype/tests/ui/gc_cannot_escape_mutate.rs b/oscars/examples/api_prototype/tests/ui/gc_cannot_escape_mutate.rs new file mode 100644 index 0000000..b8165de --- /dev/null +++ b/oscars/examples/api_prototype/tests/ui/gc_cannot_escape_mutate.rs @@ -0,0 +1,30 @@ +//! Compile fail test: Gc<'gc, T> cannot escape the mutate() closure +//! +//! The safety is enforced at compile time via the 'gc lifetime. + +struct GcContext; +struct MutationContext<'gc>(&'gc ()); +struct Gc<'gc, T>(&'gc T); + +impl GcContext { + fn new() -> Self { GcContext } + fn mutate(&self, f: impl for<'gc> FnOnce(&MutationContext<'gc>) -> R) -> R { + f(&MutationContext(&())) + } +} + +impl<'gc> MutationContext<'gc> { + fn alloc(&self, _v: T) -> Gc<'gc, T> { todo!() } +} + +fn main() { + let ctx = GcContext::new(); + + // This MUST fail to compile: Gc cannot escape mutate() + let escaped = ctx.mutate(|cx| { + cx.alloc(42i32) // Gc<'gc, i32> tied to closure lifetime + }); + + // If this compiled, we could access dead memory + let _ = escaped; +} diff --git a/oscars/examples/api_prototype/tests/ui/gc_cannot_escape_mutate.stderr b/oscars/examples/api_prototype/tests/ui/gc_cannot_escape_mutate.stderr new file mode 100644 index 0000000..7a938d7 --- /dev/null +++ b/oscars/examples/api_prototype/tests/ui/gc_cannot_escape_mutate.stderr @@ -0,0 +1,9 @@ +error: lifetime may not live long enough + --> examples/api_prototype/tests/ui/gc_cannot_escape_mutate.rs:25:9 + | +24 | let escaped = ctx.mutate(|cx| { + | --- return type of closure is Gc<'2, i32> + | | + | has type `&MutationContext<'1>` +25 | cx.alloc(42i32) // Gc<'gc, i32> tied to closure lifetime + | ^^^^^^^^^^^^^^^ returning this value requires that `'1` must outlive `'2` diff --git a/oscars/examples/api_prototype/tests/ui/gc_cannot_store_outer_scope.rs b/oscars/examples/api_prototype/tests/ui/gc_cannot_store_outer_scope.rs new file mode 100644 index 0000000..559471f --- /dev/null +++ b/oscars/examples/api_prototype/tests/ui/gc_cannot_store_outer_scope.rs @@ -0,0 +1,34 @@ +//! Compile-fail test: Gc reference cannot be stored beyond 'gc lifetime +//! +//! This demonstrates that the 'gc lifetime prevents storing Gc pointers +//! in locations that outlive the mutation context. + +struct GcContext; +struct MutationContext<'gc>(&'gc ()); +struct Gc<'gc, T>(&'gc T); + +impl GcContext { + fn new() -> Self { GcContext } + fn mutate(&self, f: impl for<'gc> FnOnce(&MutationContext<'gc>) -> R) -> R { + f(&MutationContext(&())) + } +} + +impl<'gc> MutationContext<'gc> { + fn alloc(&self, _v: T) -> Gc<'gc, T> { todo!() } +} + +struct Holder<'a> { + gc: Gc<'a, i32>, +} + +fn main() { + let ctx = GcContext::new(); + let mut holder: Option> = None; + + // This MUST fail: can't store Gc in outer scope + ctx.mutate(|cx| { + let gc = cx.alloc(42); + holder = Some(Holder { gc }); // ERROR: 'gc does not live long enough + }); +} diff --git a/oscars/examples/api_prototype/tests/ui/gc_cannot_store_outer_scope.stderr b/oscars/examples/api_prototype/tests/ui/gc_cannot_store_outer_scope.stderr new file mode 100644 index 0000000..d5a0d4a --- /dev/null +++ b/oscars/examples/api_prototype/tests/ui/gc_cannot_store_outer_scope.stderr @@ -0,0 +1,11 @@ +error[E0521]: borrowed data escapes outside of closure + --> examples/api_prototype/tests/ui/gc_cannot_store_outer_scope.rs:32:9 + | +27 | let mut holder: Option> = None; + | ---------- `holder` declared here, outside of the closure body +... +30 | ctx.mutate(|cx| { + | -- `cx` is a reference that is only valid in the closure body +31 | let gc = cx.alloc(42); +32 | holder = Some(Holder { gc }); // ERROR: 'gc does not live long enough + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `cx` escapes the closure body here diff --git a/oscars/examples/api_prototype/trace.rs b/oscars/examples/api_prototype/trace.rs new file mode 100644 index 0000000..b2d32e6 --- /dev/null +++ b/oscars/examples/api_prototype/trace.rs @@ -0,0 +1,28 @@ +use crate::gc::Gc; +use core::marker::PhantomData; + +pub trait Finalize { + fn finalize(&self) {} +} + +pub trait Trace { + fn trace(&mut self, tracer: &mut Tracer); +} + +pub struct Tracer<'a> { + pub(crate) _marker: PhantomData<&'a ()>, +} + +impl Tracer<'_> { + pub fn mark(&mut self, _gc: &mut Gc<'_, T>) {} +} + +impl Trace for i32 { + fn trace(&mut self, _: &mut Tracer) {} +} +impl Finalize for i32 {} + +impl Trace for String { + fn trace(&mut self, _: &mut Tracer) {} +} +impl Finalize for String {} diff --git a/oscars/examples/api_prototype/weak.rs b/oscars/examples/api_prototype/weak.rs new file mode 100644 index 0000000..e8e3f47 --- /dev/null +++ b/oscars/examples/api_prototype/weak.rs @@ -0,0 +1,47 @@ +use crate::gc::{Gc, GcBox, MutationContext}; +use crate::trace::{Finalize, Trace, Tracer}; +use core::marker::PhantomData; +use core::ptr::NonNull; + +pub struct WeakGc { + pub(crate) ptr: NonNull>, +} + +impl WeakGc { + pub fn upgrade<'gc>(&self, _cx: &MutationContext<'gc>) -> Option> { + unsafe { + let marked = (*self.ptr.as_ptr()).marked.get(); + if marked { + Some(Gc { + ptr: self.ptr, + _marker: PhantomData, + }) + } else { + None + } + } + } +} + +impl Clone for WeakGc { + fn clone(&self) -> Self { + Self { ptr: self.ptr } + } +} + +pub struct WeakMap { + _marker: PhantomData<(K, V)>, +} + +impl WeakMap { + pub fn new() -> Self { + Self { + _marker: PhantomData, + } + } +} + +impl Trace for WeakMap { + fn trace(&mut self, _tracer: &mut Tracer) {} +} +impl Finalize for WeakMap {} diff --git a/oscars/src/alloc/mempool3/alloc.rs b/oscars/src/alloc/mempool3/alloc.rs index 0b7adcf..8a4ea82 100644 --- a/oscars/src/alloc/mempool3/alloc.rs +++ b/oscars/src/alloc/mempool3/alloc.rs @@ -81,8 +81,13 @@ impl<'pool, T> PoolPointer<'pool, T> { ErasedPoolPointer(self.0.cast::(), PhantomData) } - // SAFETY: safe because the gc collector owns the pool and keeps it alive - pub(crate) unsafe fn extend_lifetime(self) -> PoolPointer<'static, T> { + /// Extend the lifetime to 'static for collector owned pools + /// + /// # Safety + /// Caller must ensure the pool outlives any usage of the returned pointer. + /// Made public to support GC collectors outside the oscars crate that own + /// a PoolAllocator and need to store pointers with extended lifetimes + pub unsafe fn extend_lifetime(self) -> PoolPointer<'static, T> { PoolPointer(self.0, PhantomData) } }