convert Patcher to a global singleton

Migrates Patcher state to global variables and uses std.once for initialization.
This is preparing for future syscall tracing, which requires static access to
the patching context across the runtime to be accessed by flicken.
This commit is contained in:
2025-12-09 07:07:22 +01:00
parent 9d4f325a2c
commit 8d907f071c
2 changed files with 58 additions and 68 deletions

View File

@@ -17,15 +17,11 @@ const Range = @import("Range.zig");
const assert = std.debug.assert; const assert = std.debug.assert;
const page_size = 4096; const page_size = std.heap.pageSize();
const jump_rel32: u8 = 0xe9; const jump_rel32: u8 = 0xe9;
const jump_rel32_size = 5; const jump_rel32_size = 5;
const jump_rel8: u8 = 0xeb; const jump_rel8: u8 = 0xeb;
const jump_rel8_size = 2; const jump_rel8_size = 2;
const max_ins_bytes = 15;
// Based on the paper 'x86-64 Instruction Usage among C/C++ Applications' by 'Akshintala et al.'
// it's '4.25' bytes, so 4 is good enough. (https://oscarlab.github.io/papers/instrpop-systor19.pdf)
const avg_ins_bytes = 4;
// TODO: Find an invalid instruction to use. // TODO: Find an invalid instruction to use.
// const invalid: u8 = 0xaa; // const invalid: u8 = 0xaa;
@@ -33,42 +29,43 @@ const int3: u8 = 0xcc;
const nop: u8 = 0x90; const nop: u8 = 0x90;
// Prefixes for Padded Jumps (Tactic T1) // Prefixes for Padded Jumps (Tactic T1)
const prefix_fs: u8 = 0x64; const prefixes = [_]u8{
const prefix_gs: u8 = 0x65; // prefix_fs,
const prefix_ss: u8 = 0x36; 0x64,
const prefixes = [_]u8{ prefix_fs, prefix_gs, prefix_ss }; // prefix_gs,
0x65,
// prefix_ss,
0x36,
};
const Patcher = @This(); pub var gpa: mem.Allocator = undefined;
pub var flicken_templates: std.StringArrayHashMapUnmanaged(Flicken) = .empty;
gpa: mem.Allocator, pub var address_allocator: AddressAllocator = .empty;
flicken: std.StringArrayHashMapUnmanaged(Flicken) = .empty,
address_allocator: AddressAllocator = .empty,
/// Tracks the base addresses of pages we have mmap'd for Flicken. /// Tracks the base addresses of pages we have mmap'd for Flicken.
allocated_pages: std.AutoHashMapUnmanaged(u64, void) = .empty, pub var allocated_pages: std.AutoHashMapUnmanaged(u64, void) = .empty;
pub fn init(gpa: mem.Allocator) !Patcher { var init_once = std.once(initInner);
var flicken: std.StringArrayHashMapUnmanaged(Flicken) = .empty; pub fn init() void {
try flicken.ensureTotalCapacity(gpa, 8); init_once.call();
flicken.putAssumeCapacity("nop", .{ .name = "nop", .bytes = &.{} });
return .{
.gpa = gpa,
.flicken = flicken,
};
} }
fn initInner() void {
pub fn deinit(patcher: *Patcher) void { gpa = std.heap.page_allocator;
_ = patcher; flicken_templates.ensureTotalCapacity(
std.heap.page_allocator,
page_size / @sizeOf(Flicken),
) catch @panic("failed initializing patcher");
flicken_templates.putAssumeCapacity("nop", .{ .name = "nop", .bytes = &.{} });
} }
/// Flicken name and bytes have to be valid for the lifetime it's used. If a trampoline with the /// Flicken name and bytes have to be valid for the lifetime it's used. If a trampoline with the
/// name is already registered it gets overwritten. /// name is already registered it gets overwritten.
/// NOTE: The name "nop" is reserved and always has the ID 0. /// NOTE: The name "nop" is reserved and always has the ID 0.
pub fn addFlicken(patcher: *Patcher, trampoline: Flicken) !FlickenId { pub fn addFlicken(trampoline: Flicken) !FlickenId {
assert(!mem.eql(u8, "nop", trampoline.name)); assert(!mem.eql(u8, "nop", trampoline.name));
try patcher.flicken.ensureUnusedCapacity(patcher.gpa, 1); try flicken_templates.ensureUnusedCapacity(gpa, 1);
errdefer comptime unreachable; errdefer comptime unreachable;
const gop = patcher.flicken.getOrPutAssumeCapacity(trampoline.name); const gop = flicken_templates.getOrPutAssumeCapacity(trampoline.name);
if (gop.found_existing) { if (gop.found_existing) {
log.warn("addTrampoline: Overwriting existing trampoline: {s}", .{trampoline.name}); log.warn("addTrampoline: Overwriting existing trampoline: {s}", .{trampoline.name});
} }
@@ -174,18 +171,18 @@ pub const Statistics = struct {
/// ///
/// The region is processed Back-to-Front to ensure that modifications (punning) only /// The region is processed Back-to-Front to ensure that modifications (punning) only
/// constrain instructions that have already been processed or are locked. /// constrain instructions that have already been processed or are locked.
pub fn patchRegion(patcher: *Patcher, region: []align(page_size) u8) !void { pub fn patchRegion(region: []align(page_size) u8) !void {
{ {
// Block the region, such that we don't try to allocate there anymore. // Block the region, such that we don't try to allocate there anymore.
const start: i64 = @intCast(@intFromPtr(region.ptr)); const start: i64 = @intCast(@intFromPtr(region.ptr));
try patcher.address_allocator.block( try address_allocator.block(
patcher.gpa, gpa,
.{ .start = start, .end = start + @as(i64, @intCast(region.len)) }, .{ .start = start, .end = start + @as(i64, @intCast(region.len)) },
page_size, page_size,
); );
} }
var arena_impl = std.heap.ArenaAllocator.init(patcher.gpa); var arena_impl = std.heap.ArenaAllocator.init(gpa);
const arena = arena_impl.allocator(); const arena = arena_impl.allocator();
defer arena_impl.deinit(); defer arena_impl.deinit();
@@ -239,7 +236,7 @@ pub fn patchRegion(patcher: *Patcher, region: []align(page_size) u8) !void {
} }
last_offset = request.offset; last_offset = request.offset;
if (@as(u64, @intFromEnum(request.flicken)) >= patcher.flicken.count()) { if (@as(u64, @intFromEnum(request.flicken)) >= flicken_templates.count()) {
const fmt = dis.formatBytes(request.bytes[0..request.size]); const fmt = dis.formatBytes(request.bytes[0..request.size]);
log.err( log.err(
"patchRegion: Usage of undefined flicken in request {f} for instruction: {s}", "patchRegion: Usage of undefined flicken in request {f} for instruction: {s}",
@@ -274,7 +271,7 @@ pub fn patchRegion(patcher: *Patcher, region: []align(page_size) u8) !void {
} }
} }
if (try patcher.attemptDirectOrPunning( if (try attemptDirectOrPunning(
request, request,
arena, arena,
&locked_bytes, &locked_bytes,
@@ -284,7 +281,7 @@ pub fn patchRegion(patcher: *Patcher, region: []align(page_size) u8) !void {
continue :requests; continue :requests;
} }
if (try patcher.attemptSuccessorEviction( if (try attemptSuccessorEviction(
request, request,
arena, arena,
&locked_bytes, &locked_bytes,
@@ -294,7 +291,7 @@ pub fn patchRegion(patcher: *Patcher, region: []align(page_size) u8) !void {
continue :requests; continue :requests;
} }
if (try patcher.attemptNeighborEviction( if (try attemptNeighborEviction(
request, request,
arena, arena,
&locked_bytes, &locked_bytes,
@@ -328,7 +325,6 @@ pub fn patchRegion(patcher: *Patcher, region: []align(page_size) u8) !void {
} }
fn attemptDirectOrPunning( fn attemptDirectOrPunning(
patcher: *Patcher,
request: PatchRequest, request: PatchRequest,
arena: mem.Allocator, arena: mem.Allocator,
locked_bytes: *std.DynamicBitSetUnmanaged, locked_bytes: *std.DynamicBitSetUnmanaged,
@@ -338,7 +334,7 @@ fn attemptDirectOrPunning(
const flicken: Flicken = if (request.flicken == .nop) const flicken: Flicken = if (request.flicken == .nop)
.{ .name = "nop", .bytes = request.bytes[0..request.size] } .{ .name = "nop", .bytes = request.bytes[0..request.size] }
else else
patcher.flicken.entries.get(@intFromEnum(request.flicken)).value; flicken_templates.entries.get(@intFromEnum(request.flicken)).value;
var pii = PatchInstructionIterator.init( var pii = PatchInstructionIterator.init(
request.bytes, request.bytes,
@@ -351,9 +347,9 @@ fn attemptDirectOrPunning(
// mapped. While harmless (it becomes an unused executable page), it is technically a // mapped. While harmless (it becomes an unused executable page), it is technically a
// memory leak. A future fix should track "current attempt" pages separately and unmap // memory leak. A future fix should track "current attempt" pages separately and unmap
// them on failure. // them on failure.
while (pii.next(&patcher.address_allocator, .exhaustive)) |allocated_range| { while (pii.next(.exhaustive)) |allocated_range| {
try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(allocated_range)); try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(allocated_range));
patcher.ensureRangeWritable( ensureRangeWritable(
allocated_range, allocated_range,
pages_made_writable, pages_made_writable,
) catch |err| switch (err) { ) catch |err| switch (err) {
@@ -371,7 +367,7 @@ fn attemptDirectOrPunning(
else => return err, else => return err,
}; };
try patcher.address_allocator.block(patcher.gpa, allocated_range, 0); try address_allocator.block(gpa, allocated_range, 0);
const lock_size = jump_rel32_size + pii.num_prefixes; const lock_size = jump_rel32_size + pii.num_prefixes;
locked_bytes.setRangeValue( locked_bytes.setRangeValue(
.{ .start = request.offset, .end = request.offset + lock_size }, .{ .start = request.offset, .end = request.offset + lock_size },
@@ -390,7 +386,6 @@ fn attemptDirectOrPunning(
} }
fn attemptSuccessorEviction( fn attemptSuccessorEviction(
patcher: *Patcher,
request: PatchRequest, request: PatchRequest,
arena: mem.Allocator, arena: mem.Allocator,
locked_bytes: *std.DynamicBitSetUnmanaged, locked_bytes: *std.DynamicBitSetUnmanaged,
@@ -426,7 +421,7 @@ fn attemptSuccessorEviction(
succ_request.size, succ_request.size,
succ_flicken.size(), succ_flicken.size(),
); );
while (succ_pii.next(&patcher.address_allocator, .greedy)) |succ_range| { while (succ_pii.next(.greedy)) |succ_range| {
// Ensure bytes match original before retry. // Ensure bytes match original before retry.
assert(mem.eql( assert(mem.eql(
u8, u8,
@@ -435,7 +430,7 @@ fn attemptSuccessorEviction(
)); ));
try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(succ_range)); try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(succ_range));
patcher.ensureRangeWritable( ensureRangeWritable(
succ_range, succ_range,
pages_made_writable, pages_made_writable,
) catch |err| switch (err) { ) catch |err| switch (err) {
@@ -457,17 +452,17 @@ fn attemptSuccessorEviction(
const flicken: Flicken = if (request.flicken == .nop) const flicken: Flicken = if (request.flicken == .nop)
.{ .name = "nop", .bytes = request.bytes[0..request.size] } .{ .name = "nop", .bytes = request.bytes[0..request.size] }
else else
patcher.flicken.entries.get(@intFromEnum(request.flicken)).value; flicken_templates.entries.get(@intFromEnum(request.flicken)).value;
var orig_pii = PatchInstructionIterator.init( var orig_pii = PatchInstructionIterator.init(
request.bytes, request.bytes,
request.size, request.size,
flicken.size(), flicken.size(),
); );
while (orig_pii.next(&patcher.address_allocator, .greedy)) |orig_range| { while (orig_pii.next(.greedy)) |orig_range| {
if (succ_range.touches(orig_range)) continue; if (succ_range.touches(orig_range)) continue;
try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(orig_range)); try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(orig_range));
patcher.ensureRangeWritable( ensureRangeWritable(
orig_range, orig_range,
pages_made_writable, pages_made_writable,
) catch |err| switch (err) { ) catch |err| switch (err) {
@@ -485,8 +480,8 @@ fn attemptSuccessorEviction(
else => return err, else => return err,
}; };
try patcher.address_allocator.block(patcher.gpa, succ_range, 0); try address_allocator.block(gpa, succ_range, 0);
try patcher.address_allocator.block(patcher.gpa, orig_range, 0); try address_allocator.block(gpa, orig_range, 0);
const lock_size = request.size + jump_rel32_size + succ_pii.num_prefixes; const lock_size = request.size + jump_rel32_size + succ_pii.num_prefixes;
locked_bytes.setRangeValue( locked_bytes.setRangeValue(
.{ .start = request.offset, .end = request.offset + lock_size }, .{ .start = request.offset, .end = request.offset + lock_size },
@@ -506,7 +501,6 @@ fn attemptSuccessorEviction(
} }
fn attemptNeighborEviction( fn attemptNeighborEviction(
patcher: *Patcher,
request: PatchRequest, request: PatchRequest,
arena: mem.Allocator, arena: mem.Allocator,
locked_bytes: *std.DynamicBitSetUnmanaged, locked_bytes: *std.DynamicBitSetUnmanaged,
@@ -555,7 +549,7 @@ fn attemptNeighborEviction(
const patch_flicken: Flicken = if (request.flicken == .nop) const patch_flicken: Flicken = if (request.flicken == .nop)
.{ .name = "nop", .bytes = request.bytes[0..request.size] } .{ .name = "nop", .bytes = request.bytes[0..request.size] }
else else
patcher.flicken.entries.get(@intFromEnum(request.flicken)).value; flicken_templates.entries.get(@intFromEnum(request.flicken)).value;
// Constraints for J_Patch: // Constraints for J_Patch:
// Bytes [0 .. victim_size - k] are free (inside victim). // Bytes [0 .. victim_size - k] are free (inside victim).
@@ -566,13 +560,13 @@ fn attemptNeighborEviction(
patch_flicken.size(), patch_flicken.size(),
); );
while (patch_pii.next(&patcher.address_allocator, .greedy)) |patch_range| { while (patch_pii.next(.greedy)) |patch_range| {
// J_Patch MUST NOT use prefixes, because it's punned inside J_Victim. // J_Patch MUST NOT use prefixes, because it's punned inside J_Victim.
// Adding prefixes would shift J_Patch relative to J_Victim, making constraints harder. // Adding prefixes would shift J_Patch relative to J_Victim, making constraints harder.
if (patch_pii.num_prefixes > 0) break; if (patch_pii.num_prefixes > 0) break;
try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(patch_range)); try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(patch_range));
patcher.ensureRangeWritable(patch_range, pages_made_writable) catch |err| switch (err) { ensureRangeWritable(patch_range, pages_made_writable) catch |err| switch (err) {
error.MappingAlreadyExists => continue, error.MappingAlreadyExists => continue,
else => return err, else => return err,
}; };
@@ -602,11 +596,11 @@ fn attemptNeighborEviction(
victim_flicken.size(), victim_flicken.size(),
); );
while (victim_pii.next(&patcher.address_allocator, .greedy)) |victim_range| { while (victim_pii.next(.greedy)) |victim_range| {
if (patch_range.touches(victim_range)) continue; if (patch_range.touches(victim_range)) continue;
try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(victim_range)); try pages_made_writable.ensureUnusedCapacity(arena, touchedPageCount(victim_range));
patcher.ensureRangeWritable(victim_range, pages_made_writable) catch |err| switch (err) { ensureRangeWritable(victim_range, pages_made_writable) catch |err| switch (err) {
error.MappingAlreadyExists => continue, error.MappingAlreadyExists => continue,
else => return err, else => return err,
}; };
@@ -667,8 +661,8 @@ fn attemptNeighborEviction(
} }
// 5. Locking // 5. Locking
try patcher.address_allocator.block(patcher.gpa, patch_range, 0); try address_allocator.block(gpa, patch_range, 0);
try patcher.address_allocator.block(patcher.gpa, victim_range, 0); try address_allocator.block(gpa, victim_range, 0);
locked_bytes.setRangeValue( locked_bytes.setRangeValue(
.{ .start = request.offset, .end = request.offset + request.size }, .{ .start = request.offset, .end = request.offset + request.size },
@@ -800,7 +794,6 @@ fn touchedPageCount(range: Range) u32 {
/// Ensure `range` is mapped R|W. Assumes `pages_made_writable` has enough free capacity. /// Ensure `range` is mapped R|W. Assumes `pages_made_writable` has enough free capacity.
fn ensureRangeWritable( fn ensureRangeWritable(
patcher: *Patcher,
range: Range, range: Range,
pages_made_writable: *std.AutoHashMapUnmanaged(u64, void), pages_made_writable: *std.AutoHashMapUnmanaged(u64, void),
) !void { ) !void {
@@ -812,7 +805,7 @@ fn ensureRangeWritable(
// If the page is already writable, skip it. // If the page is already writable, skip it.
if (pages_made_writable.get(page_addr)) |_| continue; if (pages_made_writable.get(page_addr)) |_| continue;
// If we mapped it already we have to do mprotect, else mmap. // If we mapped it already we have to do mprotect, else mmap.
const gop = try patcher.allocated_pages.getOrPut(patcher.gpa, page_addr); const gop = try allocated_pages.getOrPut(gpa, page_addr);
if (gop.found_existing) { if (gop.found_existing) {
const ptr: [*]align(page_size) u8 = @ptrFromInt(page_addr); const ptr: [*]align(page_size) u8 = @ptrFromInt(page_addr);
try posix.mprotect(ptr[0..page_addr], protection); try posix.mprotect(ptr[0..page_addr], protection);
@@ -830,8 +823,8 @@ fn ensureRangeWritable(
// (executable, OS, dynamic loader,...) allocated something there. // (executable, OS, dynamic loader,...) allocated something there.
// We block this so we don't try this page again in the future, // We block this so we don't try this page again in the future,
// saving a bunch of syscalls. // saving a bunch of syscalls.
try patcher.address_allocator.block( try address_allocator.block(
patcher.gpa, gpa,
.{ .start = @intCast(page_addr), .end = @intCast(page_addr + page_size) }, .{ .start = @intCast(page_addr), .end = @intCast(page_addr + page_size) },
page_size, page_size,
); );
@@ -884,7 +877,6 @@ const PatchInstructionIterator = struct {
fn next( fn next(
pii: *PatchInstructionIterator, pii: *PatchInstructionIterator,
address_allocator: *AddressAllocator,
strategy: Strategy, strategy: Strategy,
) ?Range { ) ?Range {
const State = enum { const State = enum {

View File

@@ -32,8 +32,6 @@ const help =
const UnfinishedReadError = error{UnfinishedRead}; const UnfinishedReadError = error{UnfinishedRead};
var patcher: Patcher = undefined;
pub fn main() !void { pub fn main() !void {
// Parse arguments // Parse arguments
var arg_index: u64 = 1; // Skip own name var arg_index: u64 = 1; // Skip own name
@@ -52,10 +50,10 @@ pub fn main() !void {
} }
// Initialize patcher // Initialize patcher
patcher = try Patcher.init(std.heap.page_allocator); // TODO: allocator Patcher.init();
// Block the first 64k to avoid mmap_min_addr (EPERM) issues on Linux. // Block the first 64k to avoid mmap_min_addr (EPERM) issues on Linux.
// TODO: read it from `/proc/sys/vm/mmap_min_addr` instead. // TODO: read it from `/proc/sys/vm/mmap_min_addr` instead.
try patcher.address_allocator.block(patcher.gpa, .{ .start = 0, .end = 0x10000 }, 0); try Patcher.address_allocator.block(Patcher.gpa, .{ .start = 0, .end = 0x10000 }, 0);
// Map file into memory // Map file into memory
const file = try lookupFile(mem.sliceTo(std.os.argv[arg_index], 0)); const file = try lookupFile(mem.sliceTo(std.os.argv[arg_index], 0));
@@ -207,7 +205,7 @@ fn loadStaticElf(ehdr: elf.Header, file_reader: *std.fs.File.Reader) !usize {
const protections = elfToMmapProt(phdr.p_flags); const protections = elfToMmapProt(phdr.p_flags);
if (protections & posix.PROT.EXEC > 0) { if (protections & posix.PROT.EXEC > 0) {
log.info("Patching executable segment", .{}); log.info("Patching executable segment", .{});
try patcher.patchRegion(ptr); try Patcher.patchRegion(ptr);
} }
try posix.mprotect(ptr, protections); try posix.mprotect(ptr, protections);
} }