replace greedy strategy with a configurable count strategy

This commit is contained in:
2025-12-09 07:50:58 +01:00
parent 8d907f071c
commit a8f55f6d63

View File

@@ -347,7 +347,7 @@ fn attemptDirectOrPunning(
// mapped. While harmless (it becomes an unused executable page), it is technically a // mapped. While harmless (it becomes an unused executable page), it is technically a
// memory leak. A future fix should track "current attempt" pages separately and unmap // memory leak. A future fix should track "current attempt" pages separately and unmap
// them on failure. // them on failure.
while (pii.next(.exhaustive)) |allocated_range| { while (pii.next(.{ .count = 256 })) |allocated_range| {
try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(allocated_range)); try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(allocated_range));
ensureRangeWritable( ensureRangeWritable(
allocated_range, allocated_range,
@@ -375,7 +375,7 @@ fn attemptDirectOrPunning(
); );
if (request.size >= 5) { if (request.size >= 5) {
assert(pii.num_prefixes == 0); // assert(pii.num_prefixes == 0);
stats.jump += 1; stats.jump += 1;
} else { } else {
stats.punning[pii.num_prefixes] += 1; stats.punning[pii.num_prefixes] += 1;
@@ -421,7 +421,7 @@ fn attemptSuccessorEviction(
succ_request.size, succ_request.size,
succ_flicken.size(), succ_flicken.size(),
); );
while (succ_pii.next(.greedy)) |succ_range| { while (succ_pii.next(.{ .count = 16 })) |succ_range| {
// Ensure bytes match original before retry. // Ensure bytes match original before retry.
assert(mem.eql( assert(mem.eql(
u8, u8,
@@ -459,7 +459,7 @@ fn attemptSuccessorEviction(
request.size, request.size,
flicken.size(), flicken.size(),
); );
while (orig_pii.next(.greedy)) |orig_range| { while (orig_pii.next(.{ .count = 16 })) |orig_range| {
if (succ_range.touches(orig_range)) continue; if (succ_range.touches(orig_range)) continue;
try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(orig_range)); try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(orig_range));
ensureRangeWritable( ensureRangeWritable(
@@ -560,7 +560,7 @@ fn attemptNeighborEviction(
patch_flicken.size(), patch_flicken.size(),
); );
while (patch_pii.next(.greedy)) |patch_range| { while (patch_pii.next(.{ .count = 16 })) |patch_range| {
// J_Patch MUST NOT use prefixes, because it's punned inside J_Victim. // J_Patch MUST NOT use prefixes, because it's punned inside J_Victim.
// Adding prefixes would shift J_Patch relative to J_Victim, making constraints harder. // Adding prefixes would shift J_Patch relative to J_Victim, making constraints harder.
if (patch_pii.num_prefixes > 0) break; if (patch_pii.num_prefixes > 0) break;
@@ -596,7 +596,7 @@ fn attemptNeighborEviction(
victim_flicken.size(), victim_flicken.size(),
); );
while (victim_pii.next(.greedy)) |victim_range| { while (victim_pii.next(.{ .count = 16 })) |victim_range| {
if (patch_range.touches(victim_range)) continue; if (patch_range.touches(victim_range)) continue;
try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(victim_range)); try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(victim_range));
@@ -848,6 +848,7 @@ const PatchInstructionIterator = struct {
num_prefixes: u8, num_prefixes: u8,
pli: PatchLocationIterator, pli: PatchLocationIterator,
valid_range: Range, valid_range: Range,
allocated_count: u64,
fn init( fn init(
bytes: []const u8, bytes: []const u8,
@@ -864,15 +865,21 @@ const PatchInstructionIterator = struct {
.num_prefixes = 0, .num_prefixes = 0,
.pli = pli, .pli = pli,
.valid_range = valid_range, .valid_range = valid_range,
.allocated_count = 0,
}; };
} }
pub const Strategy = enum { pub const Strategy = union(enum) {
/// Iterates through all possible ranges. /// Iterates through all possible ranges.
/// Useful for finding the optimal allocation (fewest prefixes). /// Useful for finding the optimal allocation (fewest prefixes).
exhaustive, exhaustive: void,
/// Try one allocation per found valid_range. Dramatically faster. /// Limits the search to `count` allocation attempts per valid constraint range found by the
greedy, /// PatchLocationIterator.
///
/// This acts as a heuristic to prevent worst-case performance (scanning every byte of a 2GB
/// gap) while still offering better density than a purely greedy approach. A count of 1 is
/// equivalent to a greedy strategy.
count: u64,
}; };
fn next( fn next(
@@ -891,14 +898,23 @@ const PatchInstructionIterator = struct {
pii.valid_range, pii.valid_range,
)) |allocated_range| { )) |allocated_range| {
assert(allocated_range.size() == pii.flicken_size); assert(allocated_range.size() == pii.flicken_size);
pii.allocated_count += 1;
// Advancing the valid range, such that the next call to `findAllocation` won't // Advancing the valid range, such that the next call to `findAllocation` won't
// find the same range again. // find the same range again.
switch (strategy) { switch (strategy) {
.exhaustive => pii.valid_range.start = allocated_range.start + 1, .exhaustive => pii.valid_range.start = allocated_range.start + 1,
.greedy => pii.valid_range.start = pii.valid_range.end, .count => |c| {
if (pii.allocated_count >= c) {
pii.valid_range.start = pii.valid_range.end;
pii.allocated_count = 0;
} else {
pii.valid_range.start = allocated_range.start + 1;
}
},
} }
return allocated_range; return allocated_range;
} else { } else {
pii.allocated_count = 0;
continue :blk .range; continue :blk .range;
} }
}, },